memcg: memory swap controller: fix limit check
[safe/jmp/linux-2.6] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54
55 /*
56  * Statistics for memory cgroup.
57  */
58 enum mem_cgroup_stat_index {
59         /*
60          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61          */
62         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
63         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
64         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
65         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
66
67         MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71         s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75         struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79  * For accounting under irq disable, no need for increment preempt count.
80  */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82                 enum mem_cgroup_stat_index idx, int val)
83 {
84         stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88                 enum mem_cgroup_stat_index idx)
89 {
90         int cpu;
91         s64 ret = 0;
92         for_each_possible_cpu(cpu)
93                 ret += stat->cpustat[cpu].count[idx];
94         return ret;
95 }
96
97 /*
98  * per-zone information in memory controller.
99  */
100 struct mem_cgroup_per_zone {
101         /*
102          * spin_lock to protect the per cgroup LRU
103          */
104         struct list_head        lists[NR_LRU_LISTS];
105         unsigned long           count[NR_LRU_LISTS];
106 };
107 /* Macro for accessing counter */
108 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
109
110 struct mem_cgroup_per_node {
111         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112 };
113
114 struct mem_cgroup_lru_info {
115         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
116 };
117
118 /*
119  * The memory controller data structure. The memory controller controls both
120  * page cache and RSS per cgroup. We would eventually like to provide
121  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
122  * to help the administrator determine what knobs to tune.
123  *
124  * TODO: Add a water mark for the memory controller. Reclaim will begin when
125  * we hit the water mark. May be even add a low water mark, such that
126  * no reclaim occurs from a cgroup at it's low water mark, this is
127  * a feature that will be implemented much later in the future.
128  */
129 struct mem_cgroup {
130         struct cgroup_subsys_state css;
131         /*
132          * the counter to account for memory usage
133          */
134         struct res_counter res;
135         /*
136          * the counter to account for mem+swap usage.
137          */
138         struct res_counter memsw;
139         /*
140          * Per cgroup active and inactive list, similar to the
141          * per zone LRU lists.
142          */
143         struct mem_cgroup_lru_info info;
144
145         int     prev_priority;  /* for recording reclaim priority */
146
147         /*
148          * While reclaiming in a hiearchy, we cache the last child we
149          * reclaimed from. Protected by cgroup_lock()
150          */
151         struct mem_cgroup *last_scanned_child;
152         /*
153          * Should the accounting and control be hierarchical, per subtree?
154          */
155         bool use_hierarchy;
156         unsigned long   last_oom_jiffies;
157         int             obsolete;
158         atomic_t        refcnt;
159         /*
160          * statistics. This must be placed at the end of memcg.
161          */
162         struct mem_cgroup_stat stat;
163 };
164
165 enum charge_type {
166         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
167         MEM_CGROUP_CHARGE_TYPE_MAPPED,
168         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
169         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
170         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
171         NR_CHARGE_TYPE,
172 };
173
174 /* only for here (for easy reading.) */
175 #define PCGF_CACHE      (1UL << PCG_CACHE)
176 #define PCGF_USED       (1UL << PCG_USED)
177 #define PCGF_LOCK       (1UL << PCG_LOCK)
178 static const unsigned long
179 pcg_default_flags[NR_CHARGE_TYPE] = {
180         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
181         PCGF_USED | PCGF_LOCK, /* Anon */
182         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
183         0, /* FORCE */
184 };
185
186
187 /* for encoding cft->private value on file */
188 #define _MEM                    (0)
189 #define _MEMSWAP                (1)
190 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
191 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
192 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
193
194 static void mem_cgroup_get(struct mem_cgroup *mem);
195 static void mem_cgroup_put(struct mem_cgroup *mem);
196
197 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
198                                          struct page_cgroup *pc,
199                                          bool charge)
200 {
201         int val = (charge)? 1 : -1;
202         struct mem_cgroup_stat *stat = &mem->stat;
203         struct mem_cgroup_stat_cpu *cpustat;
204         int cpu = get_cpu();
205
206         cpustat = &stat->cpustat[cpu];
207         if (PageCgroupCache(pc))
208                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
209         else
210                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
211
212         if (charge)
213                 __mem_cgroup_stat_add_safe(cpustat,
214                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
215         else
216                 __mem_cgroup_stat_add_safe(cpustat,
217                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
218         put_cpu();
219 }
220
221 static struct mem_cgroup_per_zone *
222 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
223 {
224         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
225 }
226
227 static struct mem_cgroup_per_zone *
228 page_cgroup_zoneinfo(struct page_cgroup *pc)
229 {
230         struct mem_cgroup *mem = pc->mem_cgroup;
231         int nid = page_cgroup_nid(pc);
232         int zid = page_cgroup_zid(pc);
233
234         return mem_cgroup_zoneinfo(mem, nid, zid);
235 }
236
237 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
238                                         enum lru_list idx)
239 {
240         int nid, zid;
241         struct mem_cgroup_per_zone *mz;
242         u64 total = 0;
243
244         for_each_online_node(nid)
245                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
246                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
247                         total += MEM_CGROUP_ZSTAT(mz, idx);
248                 }
249         return total;
250 }
251
252 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
253 {
254         return container_of(cgroup_subsys_state(cont,
255                                 mem_cgroup_subsys_id), struct mem_cgroup,
256                                 css);
257 }
258
259 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
260 {
261         /*
262          * mm_update_next_owner() may clear mm->owner to NULL
263          * if it races with swapoff, page migration, etc.
264          * So this can be called with p == NULL.
265          */
266         if (unlikely(!p))
267                 return NULL;
268
269         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
270                                 struct mem_cgroup, css);
271 }
272
273 /*
274  * Following LRU functions are allowed to be used without PCG_LOCK.
275  * Operations are called by routine of global LRU independently from memcg.
276  * What we have to take care of here is validness of pc->mem_cgroup.
277  *
278  * Changes to pc->mem_cgroup happens when
279  * 1. charge
280  * 2. moving account
281  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
282  * It is added to LRU before charge.
283  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
284  * When moving account, the page is not on LRU. It's isolated.
285  */
286
287 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
288 {
289         struct page_cgroup *pc;
290         struct mem_cgroup *mem;
291         struct mem_cgroup_per_zone *mz;
292
293         if (mem_cgroup_disabled())
294                 return;
295         pc = lookup_page_cgroup(page);
296         /* can happen while we handle swapcache. */
297         if (list_empty(&pc->lru))
298                 return;
299         mz = page_cgroup_zoneinfo(pc);
300         mem = pc->mem_cgroup;
301         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
302         list_del_init(&pc->lru);
303         return;
304 }
305
306 void mem_cgroup_del_lru(struct page *page)
307 {
308         mem_cgroup_del_lru_list(page, page_lru(page));
309 }
310
311 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
312 {
313         struct mem_cgroup_per_zone *mz;
314         struct page_cgroup *pc;
315
316         if (mem_cgroup_disabled())
317                 return;
318
319         pc = lookup_page_cgroup(page);
320         smp_rmb();
321         /* unused page is not rotated. */
322         if (!PageCgroupUsed(pc))
323                 return;
324         mz = page_cgroup_zoneinfo(pc);
325         list_move(&pc->lru, &mz->lists[lru]);
326 }
327
328 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
329 {
330         struct page_cgroup *pc;
331         struct mem_cgroup_per_zone *mz;
332
333         if (mem_cgroup_disabled())
334                 return;
335         pc = lookup_page_cgroup(page);
336         /* barrier to sync with "charge" */
337         smp_rmb();
338         if (!PageCgroupUsed(pc))
339                 return;
340
341         mz = page_cgroup_zoneinfo(pc);
342         MEM_CGROUP_ZSTAT(mz, lru) += 1;
343         list_add(&pc->lru, &mz->lists[lru]);
344 }
345 /*
346  * To add swapcache into LRU. Be careful to all this function.
347  * zone->lru_lock shouldn't be held and irq must not be disabled.
348  */
349 static void mem_cgroup_lru_fixup(struct page *page)
350 {
351         if (!isolate_lru_page(page))
352                 putback_lru_page(page);
353 }
354
355 void mem_cgroup_move_lists(struct page *page,
356                            enum lru_list from, enum lru_list to)
357 {
358         if (mem_cgroup_disabled())
359                 return;
360         mem_cgroup_del_lru_list(page, from);
361         mem_cgroup_add_lru_list(page, to);
362 }
363
364 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
365 {
366         int ret;
367
368         task_lock(task);
369         ret = task->mm && mm_match_cgroup(task->mm, mem);
370         task_unlock(task);
371         return ret;
372 }
373
374 /*
375  * Calculate mapped_ratio under memory controller. This will be used in
376  * vmscan.c for deteremining we have to reclaim mapped pages.
377  */
378 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
379 {
380         long total, rss;
381
382         /*
383          * usage is recorded in bytes. But, here, we assume the number of
384          * physical pages can be represented by "long" on any arch.
385          */
386         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
387         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
388         return (int)((rss * 100L) / total);
389 }
390
391 /*
392  * prev_priority control...this will be used in memory reclaim path.
393  */
394 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
395 {
396         return mem->prev_priority;
397 }
398
399 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
400 {
401         if (priority < mem->prev_priority)
402                 mem->prev_priority = priority;
403 }
404
405 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
406 {
407         mem->prev_priority = priority;
408 }
409
410 /*
411  * Calculate # of pages to be scanned in this priority/zone.
412  * See also vmscan.c
413  *
414  * priority starts from "DEF_PRIORITY" and decremented in each loop.
415  * (see include/linux/mmzone.h)
416  */
417
418 long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
419                                         int priority, enum lru_list lru)
420 {
421         long nr_pages;
422         int nid = zone->zone_pgdat->node_id;
423         int zid = zone_idx(zone);
424         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
425
426         nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
427
428         return (nr_pages >> priority);
429 }
430
431 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
432                                         struct list_head *dst,
433                                         unsigned long *scanned, int order,
434                                         int mode, struct zone *z,
435                                         struct mem_cgroup *mem_cont,
436                                         int active, int file)
437 {
438         unsigned long nr_taken = 0;
439         struct page *page;
440         unsigned long scan;
441         LIST_HEAD(pc_list);
442         struct list_head *src;
443         struct page_cgroup *pc, *tmp;
444         int nid = z->zone_pgdat->node_id;
445         int zid = zone_idx(z);
446         struct mem_cgroup_per_zone *mz;
447         int lru = LRU_FILE * !!file + !!active;
448
449         BUG_ON(!mem_cont);
450         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
451         src = &mz->lists[lru];
452
453         scan = 0;
454         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
455                 if (scan >= nr_to_scan)
456                         break;
457
458                 page = pc->page;
459                 if (unlikely(!PageCgroupUsed(pc)))
460                         continue;
461                 if (unlikely(!PageLRU(page)))
462                         continue;
463
464                 scan++;
465                 if (__isolate_lru_page(page, mode, file) == 0) {
466                         list_move(&page->lru, dst);
467                         nr_taken++;
468                 }
469         }
470
471         *scanned = scan;
472         return nr_taken;
473 }
474
475 #define mem_cgroup_from_res_counter(counter, member)    \
476         container_of(counter, struct mem_cgroup, member)
477
478 /*
479  * This routine finds the DFS walk successor. This routine should be
480  * called with cgroup_mutex held
481  */
482 static struct mem_cgroup *
483 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
484 {
485         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
486
487         curr_cgroup = curr->css.cgroup;
488         root_cgroup = root_mem->css.cgroup;
489
490         if (!list_empty(&curr_cgroup->children)) {
491                 /*
492                  * Walk down to children
493                  */
494                 mem_cgroup_put(curr);
495                 cgroup = list_entry(curr_cgroup->children.next,
496                                                 struct cgroup, sibling);
497                 curr = mem_cgroup_from_cont(cgroup);
498                 mem_cgroup_get(curr);
499                 goto done;
500         }
501
502 visit_parent:
503         if (curr_cgroup == root_cgroup) {
504                 mem_cgroup_put(curr);
505                 curr = root_mem;
506                 mem_cgroup_get(curr);
507                 goto done;
508         }
509
510         /*
511          * Goto next sibling
512          */
513         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
514                 mem_cgroup_put(curr);
515                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
516                                                 sibling);
517                 curr = mem_cgroup_from_cont(cgroup);
518                 mem_cgroup_get(curr);
519                 goto done;
520         }
521
522         /*
523          * Go up to next parent and next parent's sibling if need be
524          */
525         curr_cgroup = curr_cgroup->parent;
526         goto visit_parent;
527
528 done:
529         root_mem->last_scanned_child = curr;
530         return curr;
531 }
532
533 /*
534  * Visit the first child (need not be the first child as per the ordering
535  * of the cgroup list, since we track last_scanned_child) of @mem and use
536  * that to reclaim free pages from.
537  */
538 static struct mem_cgroup *
539 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
540 {
541         struct cgroup *cgroup;
542         struct mem_cgroup *ret;
543         bool obsolete = (root_mem->last_scanned_child &&
544                                 root_mem->last_scanned_child->obsolete);
545
546         /*
547          * Scan all children under the mem_cgroup mem
548          */
549         cgroup_lock();
550         if (list_empty(&root_mem->css.cgroup->children)) {
551                 ret = root_mem;
552                 goto done;
553         }
554
555         if (!root_mem->last_scanned_child || obsolete) {
556
557                 if (obsolete)
558                         mem_cgroup_put(root_mem->last_scanned_child);
559
560                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
561                                 struct cgroup, sibling);
562                 ret = mem_cgroup_from_cont(cgroup);
563                 mem_cgroup_get(ret);
564         } else
565                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
566                                                 root_mem);
567
568 done:
569         root_mem->last_scanned_child = ret;
570         cgroup_unlock();
571         return ret;
572 }
573
574 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
575 {
576         if (do_swap_account) {
577                 if (res_counter_check_under_limit(&mem->res) &&
578                         res_counter_check_under_limit(&mem->memsw))
579                         return true;
580         } else
581                 if (res_counter_check_under_limit(&mem->res))
582                         return true;
583         return false;
584 }
585
586 /*
587  * Dance down the hierarchy if needed to reclaim memory. We remember the
588  * last child we reclaimed from, so that we don't end up penalizing
589  * one child extensively based on its position in the children list.
590  *
591  * root_mem is the original ancestor that we've been reclaim from.
592  */
593 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
594                                                 gfp_t gfp_mask, bool noswap)
595 {
596         struct mem_cgroup *next_mem;
597         int ret = 0;
598
599         /*
600          * Reclaim unconditionally and don't check for return value.
601          * We need to reclaim in the current group and down the tree.
602          * One might think about checking for children before reclaiming,
603          * but there might be left over accounting, even after children
604          * have left.
605          */
606         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
607         if (mem_cgroup_check_under_limit(root_mem))
608                 return 0;
609
610         next_mem = mem_cgroup_get_first_node(root_mem);
611
612         while (next_mem != root_mem) {
613                 if (next_mem->obsolete) {
614                         mem_cgroup_put(next_mem);
615                         cgroup_lock();
616                         next_mem = mem_cgroup_get_first_node(root_mem);
617                         cgroup_unlock();
618                         continue;
619                 }
620                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
621                 if (mem_cgroup_check_under_limit(root_mem))
622                         return 0;
623                 cgroup_lock();
624                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
625                 cgroup_unlock();
626         }
627         return ret;
628 }
629
630 bool mem_cgroup_oom_called(struct task_struct *task)
631 {
632         bool ret = false;
633         struct mem_cgroup *mem;
634         struct mm_struct *mm;
635
636         rcu_read_lock();
637         mm = task->mm;
638         if (!mm)
639                 mm = &init_mm;
640         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
641         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
642                 ret = true;
643         rcu_read_unlock();
644         return ret;
645 }
646 /*
647  * Unlike exported interface, "oom" parameter is added. if oom==true,
648  * oom-killer can be invoked.
649  */
650 static int __mem_cgroup_try_charge(struct mm_struct *mm,
651                         gfp_t gfp_mask, struct mem_cgroup **memcg,
652                         bool oom)
653 {
654         struct mem_cgroup *mem, *mem_over_limit;
655         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
656         struct res_counter *fail_res;
657
658         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
659                 /* Don't account this! */
660                 *memcg = NULL;
661                 return 0;
662         }
663
664         /*
665          * We always charge the cgroup the mm_struct belongs to.
666          * The mm_struct's mem_cgroup changes on task migration if the
667          * thread group leader migrates. It's possible that mm is not
668          * set, if so charge the init_mm (happens for pagecache usage).
669          */
670         if (likely(!*memcg)) {
671                 rcu_read_lock();
672                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
673                 if (unlikely(!mem)) {
674                         rcu_read_unlock();
675                         return 0;
676                 }
677                 /*
678                  * For every charge from the cgroup, increment reference count
679                  */
680                 css_get(&mem->css);
681                 *memcg = mem;
682                 rcu_read_unlock();
683         } else {
684                 mem = *memcg;
685                 css_get(&mem->css);
686         }
687
688         while (1) {
689                 int ret;
690                 bool noswap = false;
691
692                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
693                 if (likely(!ret)) {
694                         if (!do_swap_account)
695                                 break;
696                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
697                                                         &fail_res);
698                         if (likely(!ret))
699                                 break;
700                         /* mem+swap counter fails */
701                         res_counter_uncharge(&mem->res, PAGE_SIZE);
702                         noswap = true;
703                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
704                                                                         memsw);
705                 } else
706                         /* mem counter fails */
707                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
708                                                                         res);
709
710                 if (!(gfp_mask & __GFP_WAIT))
711                         goto nomem;
712
713                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
714                                                         noswap);
715
716                 /*
717                  * try_to_free_mem_cgroup_pages() might not give us a full
718                  * picture of reclaim. Some pages are reclaimed and might be
719                  * moved to swap cache or just unmapped from the cgroup.
720                  * Check the limit again to see if the reclaim reduced the
721                  * current usage of the cgroup before giving up
722                  *
723                  */
724                 if (mem_cgroup_check_under_limit(mem_over_limit))
725                         continue;
726
727                 if (!nr_retries--) {
728                         if (oom) {
729                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
730                                 mem_over_limit->last_oom_jiffies = jiffies;
731                         }
732                         goto nomem;
733                 }
734         }
735         return 0;
736 nomem:
737         css_put(&mem->css);
738         return -ENOMEM;
739 }
740
741 /**
742  * mem_cgroup_try_charge - get charge of PAGE_SIZE.
743  * @mm: an mm_struct which is charged against. (when *memcg is NULL)
744  * @gfp_mask: gfp_mask for reclaim.
745  * @memcg: a pointer to memory cgroup which is charged against.
746  *
747  * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
748  * memory cgroup from @mm is got and stored in *memcg.
749  *
750  * Returns 0 if success. -ENOMEM at failure.
751  * This call can invoke OOM-Killer.
752  */
753
754 int mem_cgroup_try_charge(struct mm_struct *mm,
755                           gfp_t mask, struct mem_cgroup **memcg)
756 {
757         return __mem_cgroup_try_charge(mm, mask, memcg, true);
758 }
759
760 /*
761  * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
762  * USED state. If already USED, uncharge and return.
763  */
764
765 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
766                                      struct page_cgroup *pc,
767                                      enum charge_type ctype)
768 {
769         /* try_charge() can return NULL to *memcg, taking care of it. */
770         if (!mem)
771                 return;
772
773         lock_page_cgroup(pc);
774         if (unlikely(PageCgroupUsed(pc))) {
775                 unlock_page_cgroup(pc);
776                 res_counter_uncharge(&mem->res, PAGE_SIZE);
777                 if (do_swap_account)
778                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
779                 css_put(&mem->css);
780                 return;
781         }
782         pc->mem_cgroup = mem;
783         smp_wmb();
784         pc->flags = pcg_default_flags[ctype];
785
786         mem_cgroup_charge_statistics(mem, pc, true);
787
788         unlock_page_cgroup(pc);
789 }
790
791 /**
792  * mem_cgroup_move_account - move account of the page
793  * @pc: page_cgroup of the page.
794  * @from: mem_cgroup which the page is moved from.
795  * @to: mem_cgroup which the page is moved to. @from != @to.
796  *
797  * The caller must confirm following.
798  * - page is not on LRU (isolate_page() is useful.)
799  *
800  * returns 0 at success,
801  * returns -EBUSY when lock is busy or "pc" is unstable.
802  *
803  * This function does "uncharge" from old cgroup but doesn't do "charge" to
804  * new cgroup. It should be done by a caller.
805  */
806
807 static int mem_cgroup_move_account(struct page_cgroup *pc,
808         struct mem_cgroup *from, struct mem_cgroup *to)
809 {
810         struct mem_cgroup_per_zone *from_mz, *to_mz;
811         int nid, zid;
812         int ret = -EBUSY;
813
814         VM_BUG_ON(from == to);
815         VM_BUG_ON(PageLRU(pc->page));
816
817         nid = page_cgroup_nid(pc);
818         zid = page_cgroup_zid(pc);
819         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
820         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
821
822         if (!trylock_page_cgroup(pc))
823                 return ret;
824
825         if (!PageCgroupUsed(pc))
826                 goto out;
827
828         if (pc->mem_cgroup != from)
829                 goto out;
830
831         css_put(&from->css);
832         res_counter_uncharge(&from->res, PAGE_SIZE);
833         mem_cgroup_charge_statistics(from, pc, false);
834         if (do_swap_account)
835                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
836         pc->mem_cgroup = to;
837         mem_cgroup_charge_statistics(to, pc, true);
838         css_get(&to->css);
839         ret = 0;
840 out:
841         unlock_page_cgroup(pc);
842         return ret;
843 }
844
845 /*
846  * move charges to its parent.
847  */
848
849 static int mem_cgroup_move_parent(struct page_cgroup *pc,
850                                   struct mem_cgroup *child,
851                                   gfp_t gfp_mask)
852 {
853         struct page *page = pc->page;
854         struct cgroup *cg = child->css.cgroup;
855         struct cgroup *pcg = cg->parent;
856         struct mem_cgroup *parent;
857         int ret;
858
859         /* Is ROOT ? */
860         if (!pcg)
861                 return -EINVAL;
862
863
864         parent = mem_cgroup_from_cont(pcg);
865
866
867         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
868         if (ret || !parent)
869                 return ret;
870
871         if (!get_page_unless_zero(page))
872                 return -EBUSY;
873
874         ret = isolate_lru_page(page);
875
876         if (ret)
877                 goto cancel;
878
879         ret = mem_cgroup_move_account(pc, child, parent);
880
881         /* drop extra refcnt by try_charge() (move_account increment one) */
882         css_put(&parent->css);
883         putback_lru_page(page);
884         if (!ret) {
885                 put_page(page);
886                 return 0;
887         }
888         /* uncharge if move fails */
889 cancel:
890         res_counter_uncharge(&parent->res, PAGE_SIZE);
891         if (do_swap_account)
892                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
893         put_page(page);
894         return ret;
895 }
896
897 /*
898  * Charge the memory controller for page usage.
899  * Return
900  * 0 if the charge was successful
901  * < 0 if the cgroup is over its limit
902  */
903 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
904                                 gfp_t gfp_mask, enum charge_type ctype,
905                                 struct mem_cgroup *memcg)
906 {
907         struct mem_cgroup *mem;
908         struct page_cgroup *pc;
909         int ret;
910
911         pc = lookup_page_cgroup(page);
912         /* can happen at boot */
913         if (unlikely(!pc))
914                 return 0;
915         prefetchw(pc);
916
917         mem = memcg;
918         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
919         if (ret || !mem)
920                 return ret;
921
922         __mem_cgroup_commit_charge(mem, pc, ctype);
923         return 0;
924 }
925
926 int mem_cgroup_newpage_charge(struct page *page,
927                               struct mm_struct *mm, gfp_t gfp_mask)
928 {
929         if (mem_cgroup_disabled())
930                 return 0;
931         if (PageCompound(page))
932                 return 0;
933         /*
934          * If already mapped, we don't have to account.
935          * If page cache, page->mapping has address_space.
936          * But page->mapping may have out-of-use anon_vma pointer,
937          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
938          * is NULL.
939          */
940         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
941                 return 0;
942         if (unlikely(!mm))
943                 mm = &init_mm;
944         return mem_cgroup_charge_common(page, mm, gfp_mask,
945                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
946 }
947
948 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
949                                 gfp_t gfp_mask)
950 {
951         if (mem_cgroup_disabled())
952                 return 0;
953         if (PageCompound(page))
954                 return 0;
955         /*
956          * Corner case handling. This is called from add_to_page_cache()
957          * in usual. But some FS (shmem) precharges this page before calling it
958          * and call add_to_page_cache() with GFP_NOWAIT.
959          *
960          * For GFP_NOWAIT case, the page may be pre-charged before calling
961          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
962          * charge twice. (It works but has to pay a bit larger cost.)
963          */
964         if (!(gfp_mask & __GFP_WAIT)) {
965                 struct page_cgroup *pc;
966
967
968                 pc = lookup_page_cgroup(page);
969                 if (!pc)
970                         return 0;
971                 lock_page_cgroup(pc);
972                 if (PageCgroupUsed(pc)) {
973                         unlock_page_cgroup(pc);
974                         return 0;
975                 }
976                 unlock_page_cgroup(pc);
977         }
978
979         if (unlikely(!mm))
980                 mm = &init_mm;
981
982         if (page_is_file_cache(page))
983                 return mem_cgroup_charge_common(page, mm, gfp_mask,
984                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
985         else
986                 return mem_cgroup_charge_common(page, mm, gfp_mask,
987                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
988 }
989
990 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
991                                  struct page *page,
992                                  gfp_t mask, struct mem_cgroup **ptr)
993 {
994         struct mem_cgroup *mem;
995         swp_entry_t     ent;
996
997         if (mem_cgroup_disabled())
998                 return 0;
999
1000         if (!do_swap_account)
1001                 goto charge_cur_mm;
1002
1003         /*
1004          * A racing thread's fault, or swapoff, may have already updated
1005          * the pte, and even removed page from swap cache: return success
1006          * to go on to do_swap_page()'s pte_same() test, which should fail.
1007          */
1008         if (!PageSwapCache(page))
1009                 return 0;
1010
1011         ent.val = page_private(page);
1012
1013         mem = lookup_swap_cgroup(ent);
1014         if (!mem || mem->obsolete)
1015                 goto charge_cur_mm;
1016         *ptr = mem;
1017         return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1018 charge_cur_mm:
1019         if (unlikely(!mm))
1020                 mm = &init_mm;
1021         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1022 }
1023
1024 #ifdef CONFIG_SWAP
1025
1026 int mem_cgroup_cache_charge_swapin(struct page *page,
1027                         struct mm_struct *mm, gfp_t mask, bool locked)
1028 {
1029         int ret = 0;
1030
1031         if (mem_cgroup_disabled())
1032                 return 0;
1033         if (unlikely(!mm))
1034                 mm = &init_mm;
1035         if (!locked)
1036                 lock_page(page);
1037         /*
1038          * If not locked, the page can be dropped from SwapCache until
1039          * we reach here.
1040          */
1041         if (PageSwapCache(page)) {
1042                 struct mem_cgroup *mem = NULL;
1043                 swp_entry_t ent;
1044
1045                 ent.val = page_private(page);
1046                 if (do_swap_account) {
1047                         mem = lookup_swap_cgroup(ent);
1048                         if (mem && mem->obsolete)
1049                                 mem = NULL;
1050                         if (mem)
1051                                 mm = NULL;
1052                 }
1053                 ret = mem_cgroup_charge_common(page, mm, mask,
1054                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1055
1056                 if (!ret && do_swap_account) {
1057                         /* avoid double counting */
1058                         mem = swap_cgroup_record(ent, NULL);
1059                         if (mem) {
1060                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1061                                 mem_cgroup_put(mem);
1062                         }
1063                 }
1064         }
1065         if (!locked)
1066                 unlock_page(page);
1067         /* add this page(page_cgroup) to the LRU we want. */
1068         mem_cgroup_lru_fixup(page);
1069
1070         return ret;
1071 }
1072 #endif
1073
1074 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1075 {
1076         struct page_cgroup *pc;
1077
1078         if (mem_cgroup_disabled())
1079                 return;
1080         if (!ptr)
1081                 return;
1082         pc = lookup_page_cgroup(page);
1083         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1084         /*
1085          * Now swap is on-memory. This means this page may be
1086          * counted both as mem and swap....double count.
1087          * Fix it by uncharging from memsw. This SwapCache is stable
1088          * because we're still under lock_page().
1089          */
1090         if (do_swap_account) {
1091                 swp_entry_t ent = {.val = page_private(page)};
1092                 struct mem_cgroup *memcg;
1093                 memcg = swap_cgroup_record(ent, NULL);
1094                 if (memcg) {
1095                         /* If memcg is obsolete, memcg can be != ptr */
1096                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1097                         mem_cgroup_put(memcg);
1098                 }
1099
1100         }
1101         /* add this page(page_cgroup) to the LRU we want. */
1102         mem_cgroup_lru_fixup(page);
1103 }
1104
1105 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1106 {
1107         if (mem_cgroup_disabled())
1108                 return;
1109         if (!mem)
1110                 return;
1111         res_counter_uncharge(&mem->res, PAGE_SIZE);
1112         if (do_swap_account)
1113                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1114         css_put(&mem->css);
1115 }
1116
1117
1118 /*
1119  * uncharge if !page_mapped(page)
1120  */
1121 static struct mem_cgroup *
1122 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1123 {
1124         struct page_cgroup *pc;
1125         struct mem_cgroup *mem = NULL;
1126         struct mem_cgroup_per_zone *mz;
1127
1128         if (mem_cgroup_disabled())
1129                 return NULL;
1130
1131         if (PageSwapCache(page))
1132                 return NULL;
1133
1134         /*
1135          * Check if our page_cgroup is valid
1136          */
1137         pc = lookup_page_cgroup(page);
1138         if (unlikely(!pc || !PageCgroupUsed(pc)))
1139                 return NULL;
1140
1141         lock_page_cgroup(pc);
1142
1143         mem = pc->mem_cgroup;
1144
1145         if (!PageCgroupUsed(pc))
1146                 goto unlock_out;
1147
1148         switch (ctype) {
1149         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1150                 if (page_mapped(page))
1151                         goto unlock_out;
1152                 break;
1153         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1154                 if (!PageAnon(page)) {  /* Shared memory */
1155                         if (page->mapping && !page_is_file_cache(page))
1156                                 goto unlock_out;
1157                 } else if (page_mapped(page)) /* Anon */
1158                                 goto unlock_out;
1159                 break;
1160         default:
1161                 break;
1162         }
1163
1164         res_counter_uncharge(&mem->res, PAGE_SIZE);
1165         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1166                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1167
1168         mem_cgroup_charge_statistics(mem, pc, false);
1169         ClearPageCgroupUsed(pc);
1170
1171         mz = page_cgroup_zoneinfo(pc);
1172         unlock_page_cgroup(pc);
1173
1174         css_put(&mem->css);
1175
1176         return mem;
1177
1178 unlock_out:
1179         unlock_page_cgroup(pc);
1180         return NULL;
1181 }
1182
1183 void mem_cgroup_uncharge_page(struct page *page)
1184 {
1185         /* early check. */
1186         if (page_mapped(page))
1187                 return;
1188         if (page->mapping && !PageAnon(page))
1189                 return;
1190         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1191 }
1192
1193 void mem_cgroup_uncharge_cache_page(struct page *page)
1194 {
1195         VM_BUG_ON(page_mapped(page));
1196         VM_BUG_ON(page->mapping);
1197         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1198 }
1199
1200 /*
1201  * called from __delete_from_swap_cache() and drop "page" account.
1202  * memcg information is recorded to swap_cgroup of "ent"
1203  */
1204 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1205 {
1206         struct mem_cgroup *memcg;
1207
1208         memcg = __mem_cgroup_uncharge_common(page,
1209                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1210         /* record memcg information */
1211         if (do_swap_account && memcg) {
1212                 swap_cgroup_record(ent, memcg);
1213                 mem_cgroup_get(memcg);
1214         }
1215 }
1216
1217 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1218 /*
1219  * called from swap_entry_free(). remove record in swap_cgroup and
1220  * uncharge "memsw" account.
1221  */
1222 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1223 {
1224         struct mem_cgroup *memcg;
1225
1226         if (!do_swap_account)
1227                 return;
1228
1229         memcg = swap_cgroup_record(ent, NULL);
1230         if (memcg) {
1231                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1232                 mem_cgroup_put(memcg);
1233         }
1234 }
1235 #endif
1236
1237 /*
1238  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1239  * page belongs to.
1240  */
1241 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1242 {
1243         struct page_cgroup *pc;
1244         struct mem_cgroup *mem = NULL;
1245         int ret = 0;
1246
1247         if (mem_cgroup_disabled())
1248                 return 0;
1249
1250         pc = lookup_page_cgroup(page);
1251         lock_page_cgroup(pc);
1252         if (PageCgroupUsed(pc)) {
1253                 mem = pc->mem_cgroup;
1254                 css_get(&mem->css);
1255         }
1256         unlock_page_cgroup(pc);
1257
1258         if (mem) {
1259                 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1260                 css_put(&mem->css);
1261         }
1262         *ptr = mem;
1263         return ret;
1264 }
1265
1266 /* remove redundant charge if migration failed*/
1267 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1268                 struct page *oldpage, struct page *newpage)
1269 {
1270         struct page *target, *unused;
1271         struct page_cgroup *pc;
1272         enum charge_type ctype;
1273
1274         if (!mem)
1275                 return;
1276
1277         /* at migration success, oldpage->mapping is NULL. */
1278         if (oldpage->mapping) {
1279                 target = oldpage;
1280                 unused = NULL;
1281         } else {
1282                 target = newpage;
1283                 unused = oldpage;
1284         }
1285
1286         if (PageAnon(target))
1287                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1288         else if (page_is_file_cache(target))
1289                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1290         else
1291                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1292
1293         /* unused page is not on radix-tree now. */
1294         if (unused)
1295                 __mem_cgroup_uncharge_common(unused, ctype);
1296
1297         pc = lookup_page_cgroup(target);
1298         /*
1299          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1300          * So, double-counting is effectively avoided.
1301          */
1302         __mem_cgroup_commit_charge(mem, pc, ctype);
1303
1304         /*
1305          * Both of oldpage and newpage are still under lock_page().
1306          * Then, we don't have to care about race in radix-tree.
1307          * But we have to be careful that this page is unmapped or not.
1308          *
1309          * There is a case for !page_mapped(). At the start of
1310          * migration, oldpage was mapped. But now, it's zapped.
1311          * But we know *target* page is not freed/reused under us.
1312          * mem_cgroup_uncharge_page() does all necessary checks.
1313          */
1314         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1315                 mem_cgroup_uncharge_page(target);
1316 }
1317
1318 /*
1319  * A call to try to shrink memory usage under specified resource controller.
1320  * This is typically used for page reclaiming for shmem for reducing side
1321  * effect of page allocation from shmem, which is used by some mem_cgroup.
1322  */
1323 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1324 {
1325         struct mem_cgroup *mem;
1326         int progress = 0;
1327         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1328
1329         if (mem_cgroup_disabled())
1330                 return 0;
1331         if (!mm)
1332                 return 0;
1333
1334         rcu_read_lock();
1335         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1336         if (unlikely(!mem)) {
1337                 rcu_read_unlock();
1338                 return 0;
1339         }
1340         css_get(&mem->css);
1341         rcu_read_unlock();
1342
1343         do {
1344                 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1345                 progress += mem_cgroup_check_under_limit(mem);
1346         } while (!progress && --retry);
1347
1348         css_put(&mem->css);
1349         if (!retry)
1350                 return -ENOMEM;
1351         return 0;
1352 }
1353
1354 static DEFINE_MUTEX(set_limit_mutex);
1355
1356 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1357                                 unsigned long long val)
1358 {
1359
1360         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1361         int progress;
1362         u64 memswlimit;
1363         int ret = 0;
1364
1365         while (retry_count) {
1366                 if (signal_pending(current)) {
1367                         ret = -EINTR;
1368                         break;
1369                 }
1370                 /*
1371                  * Rather than hide all in some function, I do this in
1372                  * open coded manner. You see what this really does.
1373                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1374                  */
1375                 mutex_lock(&set_limit_mutex);
1376                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1377                 if (memswlimit < val) {
1378                         ret = -EINVAL;
1379                         mutex_unlock(&set_limit_mutex);
1380                         break;
1381                 }
1382                 ret = res_counter_set_limit(&memcg->res, val);
1383                 mutex_unlock(&set_limit_mutex);
1384
1385                 if (!ret)
1386                         break;
1387
1388                 progress = try_to_free_mem_cgroup_pages(memcg,
1389                                 GFP_KERNEL, false);
1390                 if (!progress)                  retry_count--;
1391         }
1392         return ret;
1393 }
1394
1395 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1396                                 unsigned long long val)
1397 {
1398         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1399         u64 memlimit, oldusage, curusage;
1400         int ret;
1401
1402         if (!do_swap_account)
1403                 return -EINVAL;
1404
1405         while (retry_count) {
1406                 if (signal_pending(current)) {
1407                         ret = -EINTR;
1408                         break;
1409                 }
1410                 /*
1411                  * Rather than hide all in some function, I do this in
1412                  * open coded manner. You see what this really does.
1413                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1414                  */
1415                 mutex_lock(&set_limit_mutex);
1416                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1417                 if (memlimit > val) {
1418                         ret = -EINVAL;
1419                         mutex_unlock(&set_limit_mutex);
1420                         break;
1421                 }
1422                 ret = res_counter_set_limit(&memcg->memsw, val);
1423                 mutex_unlock(&set_limit_mutex);
1424
1425                 if (!ret)
1426                         break;
1427
1428                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1429                 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1430                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1431                 if (curusage >= oldusage)
1432                         retry_count--;
1433         }
1434         return ret;
1435 }
1436
1437 /*
1438  * This routine traverse page_cgroup in given list and drop them all.
1439  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1440  */
1441 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1442                                 int node, int zid, enum lru_list lru)
1443 {
1444         struct zone *zone;
1445         struct mem_cgroup_per_zone *mz;
1446         struct page_cgroup *pc, *busy;
1447         unsigned long flags, loop;
1448         struct list_head *list;
1449         int ret = 0;
1450
1451         zone = &NODE_DATA(node)->node_zones[zid];
1452         mz = mem_cgroup_zoneinfo(mem, node, zid);
1453         list = &mz->lists[lru];
1454
1455         loop = MEM_CGROUP_ZSTAT(mz, lru);
1456         /* give some margin against EBUSY etc...*/
1457         loop += 256;
1458         busy = NULL;
1459         while (loop--) {
1460                 ret = 0;
1461                 spin_lock_irqsave(&zone->lru_lock, flags);
1462                 if (list_empty(list)) {
1463                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1464                         break;
1465                 }
1466                 pc = list_entry(list->prev, struct page_cgroup, lru);
1467                 if (busy == pc) {
1468                         list_move(&pc->lru, list);
1469                         busy = 0;
1470                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1471                         continue;
1472                 }
1473                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1474
1475                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1476                 if (ret == -ENOMEM)
1477                         break;
1478
1479                 if (ret == -EBUSY || ret == -EINVAL) {
1480                         /* found lock contention or "pc" is obsolete. */
1481                         busy = pc;
1482                         cond_resched();
1483                 } else
1484                         busy = NULL;
1485         }
1486
1487         if (!ret && !list_empty(list))
1488                 return -EBUSY;
1489         return ret;
1490 }
1491
1492 /*
1493  * make mem_cgroup's charge to be 0 if there is no task.
1494  * This enables deleting this mem_cgroup.
1495  */
1496 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1497 {
1498         int ret;
1499         int node, zid, shrink;
1500         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1501         struct cgroup *cgrp = mem->css.cgroup;
1502
1503         css_get(&mem->css);
1504
1505         shrink = 0;
1506         /* should free all ? */
1507         if (free_all)
1508                 goto try_to_free;
1509 move_account:
1510         while (mem->res.usage > 0) {
1511                 ret = -EBUSY;
1512                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1513                         goto out;
1514                 ret = -EINTR;
1515                 if (signal_pending(current))
1516                         goto out;
1517                 /* This is for making all *used* pages to be on LRU. */
1518                 lru_add_drain_all();
1519                 ret = 0;
1520                 for_each_node_state(node, N_POSSIBLE) {
1521                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1522                                 enum lru_list l;
1523                                 for_each_lru(l) {
1524                                         ret = mem_cgroup_force_empty_list(mem,
1525                                                         node, zid, l);
1526                                         if (ret)
1527                                                 break;
1528                                 }
1529                         }
1530                         if (ret)
1531                                 break;
1532                 }
1533                 /* it seems parent cgroup doesn't have enough mem */
1534                 if (ret == -ENOMEM)
1535                         goto try_to_free;
1536                 cond_resched();
1537         }
1538         ret = 0;
1539 out:
1540         css_put(&mem->css);
1541         return ret;
1542
1543 try_to_free:
1544         /* returns EBUSY if there is a task or if we come here twice. */
1545         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1546                 ret = -EBUSY;
1547                 goto out;
1548         }
1549         /* we call try-to-free pages for make this cgroup empty */
1550         lru_add_drain_all();
1551         /* try to free all pages in this cgroup */
1552         shrink = 1;
1553         while (nr_retries && mem->res.usage > 0) {
1554                 int progress;
1555
1556                 if (signal_pending(current)) {
1557                         ret = -EINTR;
1558                         goto out;
1559                 }
1560                 progress = try_to_free_mem_cgroup_pages(mem,
1561                                                   GFP_KERNEL, false);
1562                 if (!progress) {
1563                         nr_retries--;
1564                         /* maybe some writeback is necessary */
1565                         congestion_wait(WRITE, HZ/10);
1566                 }
1567
1568         }
1569         lru_add_drain();
1570         /* try move_account...there may be some *locked* pages. */
1571         if (mem->res.usage)
1572                 goto move_account;
1573         ret = 0;
1574         goto out;
1575 }
1576
1577 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1578 {
1579         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1580 }
1581
1582
1583 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1584 {
1585         return mem_cgroup_from_cont(cont)->use_hierarchy;
1586 }
1587
1588 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1589                                         u64 val)
1590 {
1591         int retval = 0;
1592         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1593         struct cgroup *parent = cont->parent;
1594         struct mem_cgroup *parent_mem = NULL;
1595
1596         if (parent)
1597                 parent_mem = mem_cgroup_from_cont(parent);
1598
1599         cgroup_lock();
1600         /*
1601          * If parent's use_hiearchy is set, we can't make any modifications
1602          * in the child subtrees. If it is unset, then the change can
1603          * occur, provided the current cgroup has no children.
1604          *
1605          * For the root cgroup, parent_mem is NULL, we allow value to be
1606          * set if there are no children.
1607          */
1608         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1609                                 (val == 1 || val == 0)) {
1610                 if (list_empty(&cont->children))
1611                         mem->use_hierarchy = val;
1612                 else
1613                         retval = -EBUSY;
1614         } else
1615                 retval = -EINVAL;
1616         cgroup_unlock();
1617
1618         return retval;
1619 }
1620
1621 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1622 {
1623         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1624         u64 val = 0;
1625         int type, name;
1626
1627         type = MEMFILE_TYPE(cft->private);
1628         name = MEMFILE_ATTR(cft->private);
1629         switch (type) {
1630         case _MEM:
1631                 val = res_counter_read_u64(&mem->res, name);
1632                 break;
1633         case _MEMSWAP:
1634                 if (do_swap_account)
1635                         val = res_counter_read_u64(&mem->memsw, name);
1636                 break;
1637         default:
1638                 BUG();
1639                 break;
1640         }
1641         return val;
1642 }
1643 /*
1644  * The user of this function is...
1645  * RES_LIMIT.
1646  */
1647 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1648                             const char *buffer)
1649 {
1650         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1651         int type, name;
1652         unsigned long long val;
1653         int ret;
1654
1655         type = MEMFILE_TYPE(cft->private);
1656         name = MEMFILE_ATTR(cft->private);
1657         switch (name) {
1658         case RES_LIMIT:
1659                 /* This function does all necessary parse...reuse it */
1660                 ret = res_counter_memparse_write_strategy(buffer, &val);
1661                 if (ret)
1662                         break;
1663                 if (type == _MEM)
1664                         ret = mem_cgroup_resize_limit(memcg, val);
1665                 else
1666                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1667                 break;
1668         default:
1669                 ret = -EINVAL; /* should be BUG() ? */
1670                 break;
1671         }
1672         return ret;
1673 }
1674
1675 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1676 {
1677         struct mem_cgroup *mem;
1678         int type, name;
1679
1680         mem = mem_cgroup_from_cont(cont);
1681         type = MEMFILE_TYPE(event);
1682         name = MEMFILE_ATTR(event);
1683         switch (name) {
1684         case RES_MAX_USAGE:
1685                 if (type == _MEM)
1686                         res_counter_reset_max(&mem->res);
1687                 else
1688                         res_counter_reset_max(&mem->memsw);
1689                 break;
1690         case RES_FAILCNT:
1691                 if (type == _MEM)
1692                         res_counter_reset_failcnt(&mem->res);
1693                 else
1694                         res_counter_reset_failcnt(&mem->memsw);
1695                 break;
1696         }
1697         return 0;
1698 }
1699
1700 static const struct mem_cgroup_stat_desc {
1701         const char *msg;
1702         u64 unit;
1703 } mem_cgroup_stat_desc[] = {
1704         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1705         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1706         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1707         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1708 };
1709
1710 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1711                                  struct cgroup_map_cb *cb)
1712 {
1713         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1714         struct mem_cgroup_stat *stat = &mem_cont->stat;
1715         int i;
1716
1717         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1718                 s64 val;
1719
1720                 val = mem_cgroup_read_stat(stat, i);
1721                 val *= mem_cgroup_stat_desc[i].unit;
1722                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1723         }
1724         /* showing # of active pages */
1725         {
1726                 unsigned long active_anon, inactive_anon;
1727                 unsigned long active_file, inactive_file;
1728                 unsigned long unevictable;
1729
1730                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1731                                                 LRU_INACTIVE_ANON);
1732                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1733                                                 LRU_ACTIVE_ANON);
1734                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1735                                                 LRU_INACTIVE_FILE);
1736                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1737                                                 LRU_ACTIVE_FILE);
1738                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1739                                                         LRU_UNEVICTABLE);
1740
1741                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1742                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1743                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1744                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1745                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1746
1747         }
1748         return 0;
1749 }
1750
1751
1752 static struct cftype mem_cgroup_files[] = {
1753         {
1754                 .name = "usage_in_bytes",
1755                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1756                 .read_u64 = mem_cgroup_read,
1757         },
1758         {
1759                 .name = "max_usage_in_bytes",
1760                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1761                 .trigger = mem_cgroup_reset,
1762                 .read_u64 = mem_cgroup_read,
1763         },
1764         {
1765                 .name = "limit_in_bytes",
1766                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1767                 .write_string = mem_cgroup_write,
1768                 .read_u64 = mem_cgroup_read,
1769         },
1770         {
1771                 .name = "failcnt",
1772                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1773                 .trigger = mem_cgroup_reset,
1774                 .read_u64 = mem_cgroup_read,
1775         },
1776         {
1777                 .name = "stat",
1778                 .read_map = mem_control_stat_show,
1779         },
1780         {
1781                 .name = "force_empty",
1782                 .trigger = mem_cgroup_force_empty_write,
1783         },
1784         {
1785                 .name = "use_hierarchy",
1786                 .write_u64 = mem_cgroup_hierarchy_write,
1787                 .read_u64 = mem_cgroup_hierarchy_read,
1788         },
1789 };
1790
1791 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1792 static struct cftype memsw_cgroup_files[] = {
1793         {
1794                 .name = "memsw.usage_in_bytes",
1795                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1796                 .read_u64 = mem_cgroup_read,
1797         },
1798         {
1799                 .name = "memsw.max_usage_in_bytes",
1800                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1801                 .trigger = mem_cgroup_reset,
1802                 .read_u64 = mem_cgroup_read,
1803         },
1804         {
1805                 .name = "memsw.limit_in_bytes",
1806                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1807                 .write_string = mem_cgroup_write,
1808                 .read_u64 = mem_cgroup_read,
1809         },
1810         {
1811                 .name = "memsw.failcnt",
1812                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1813                 .trigger = mem_cgroup_reset,
1814                 .read_u64 = mem_cgroup_read,
1815         },
1816 };
1817
1818 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1819 {
1820         if (!do_swap_account)
1821                 return 0;
1822         return cgroup_add_files(cont, ss, memsw_cgroup_files,
1823                                 ARRAY_SIZE(memsw_cgroup_files));
1824 };
1825 #else
1826 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1827 {
1828         return 0;
1829 }
1830 #endif
1831
1832 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1833 {
1834         struct mem_cgroup_per_node *pn;
1835         struct mem_cgroup_per_zone *mz;
1836         enum lru_list l;
1837         int zone, tmp = node;
1838         /*
1839          * This routine is called against possible nodes.
1840          * But it's BUG to call kmalloc() against offline node.
1841          *
1842          * TODO: this routine can waste much memory for nodes which will
1843          *       never be onlined. It's better to use memory hotplug callback
1844          *       function.
1845          */
1846         if (!node_state(node, N_NORMAL_MEMORY))
1847                 tmp = -1;
1848         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1849         if (!pn)
1850                 return 1;
1851
1852         mem->info.nodeinfo[node] = pn;
1853         memset(pn, 0, sizeof(*pn));
1854
1855         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1856                 mz = &pn->zoneinfo[zone];
1857                 for_each_lru(l)
1858                         INIT_LIST_HEAD(&mz->lists[l]);
1859         }
1860         return 0;
1861 }
1862
1863 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1864 {
1865         kfree(mem->info.nodeinfo[node]);
1866 }
1867
1868 static int mem_cgroup_size(void)
1869 {
1870         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1871         return sizeof(struct mem_cgroup) + cpustat_size;
1872 }
1873
1874 static struct mem_cgroup *mem_cgroup_alloc(void)
1875 {
1876         struct mem_cgroup *mem;
1877         int size = mem_cgroup_size();
1878
1879         if (size < PAGE_SIZE)
1880                 mem = kmalloc(size, GFP_KERNEL);
1881         else
1882                 mem = vmalloc(size);
1883
1884         if (mem)
1885                 memset(mem, 0, size);
1886         return mem;
1887 }
1888
1889 /*
1890  * At destroying mem_cgroup, references from swap_cgroup can remain.
1891  * (scanning all at force_empty is too costly...)
1892  *
1893  * Instead of clearing all references at force_empty, we remember
1894  * the number of reference from swap_cgroup and free mem_cgroup when
1895  * it goes down to 0.
1896  *
1897  * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
1898  * entry which points to this memcg will be ignore at swapin.
1899  *
1900  * Removal of cgroup itself succeeds regardless of refs from swap.
1901  */
1902
1903 static void mem_cgroup_free(struct mem_cgroup *mem)
1904 {
1905         int node;
1906
1907         if (atomic_read(&mem->refcnt) > 0)
1908                 return;
1909
1910
1911         for_each_node_state(node, N_POSSIBLE)
1912                 free_mem_cgroup_per_zone_info(mem, node);
1913
1914         if (mem_cgroup_size() < PAGE_SIZE)
1915                 kfree(mem);
1916         else
1917                 vfree(mem);
1918 }
1919
1920 static void mem_cgroup_get(struct mem_cgroup *mem)
1921 {
1922         atomic_inc(&mem->refcnt);
1923 }
1924
1925 static void mem_cgroup_put(struct mem_cgroup *mem)
1926 {
1927         if (atomic_dec_and_test(&mem->refcnt)) {
1928                 if (!mem->obsolete)
1929                         return;
1930                 mem_cgroup_free(mem);
1931         }
1932 }
1933
1934
1935 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1936 static void __init enable_swap_cgroup(void)
1937 {
1938         if (!mem_cgroup_disabled() && really_do_swap_account)
1939                 do_swap_account = 1;
1940 }
1941 #else
1942 static void __init enable_swap_cgroup(void)
1943 {
1944 }
1945 #endif
1946
1947 static struct cgroup_subsys_state *
1948 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1949 {
1950         struct mem_cgroup *mem, *parent;
1951         int node;
1952
1953         mem = mem_cgroup_alloc();
1954         if (!mem)
1955                 return ERR_PTR(-ENOMEM);
1956
1957         for_each_node_state(node, N_POSSIBLE)
1958                 if (alloc_mem_cgroup_per_zone_info(mem, node))
1959                         goto free_out;
1960         /* root ? */
1961         if (cont->parent == NULL) {
1962                 enable_swap_cgroup();
1963                 parent = NULL;
1964         } else {
1965                 parent = mem_cgroup_from_cont(cont->parent);
1966                 mem->use_hierarchy = parent->use_hierarchy;
1967         }
1968
1969         if (parent && parent->use_hierarchy) {
1970                 res_counter_init(&mem->res, &parent->res);
1971                 res_counter_init(&mem->memsw, &parent->memsw);
1972         } else {
1973                 res_counter_init(&mem->res, NULL);
1974                 res_counter_init(&mem->memsw, NULL);
1975         }
1976
1977         mem->last_scanned_child = NULL;
1978
1979         return &mem->css;
1980 free_out:
1981         for_each_node_state(node, N_POSSIBLE)
1982                 free_mem_cgroup_per_zone_info(mem, node);
1983         mem_cgroup_free(mem);
1984         return ERR_PTR(-ENOMEM);
1985 }
1986
1987 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1988                                         struct cgroup *cont)
1989 {
1990         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1991         mem->obsolete = 1;
1992         mem_cgroup_force_empty(mem, false);
1993 }
1994
1995 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1996                                 struct cgroup *cont)
1997 {
1998         mem_cgroup_free(mem_cgroup_from_cont(cont));
1999 }
2000
2001 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2002                                 struct cgroup *cont)
2003 {
2004         int ret;
2005
2006         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2007                                 ARRAY_SIZE(mem_cgroup_files));
2008
2009         if (!ret)
2010                 ret = register_memsw_files(cont, ss);
2011         return ret;
2012 }
2013
2014 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2015                                 struct cgroup *cont,
2016                                 struct cgroup *old_cont,
2017                                 struct task_struct *p)
2018 {
2019         /*
2020          * FIXME: It's better to move charges of this process from old
2021          * memcg to new memcg. But it's just on TODO-List now.
2022          */
2023 }
2024
2025 struct cgroup_subsys mem_cgroup_subsys = {
2026         .name = "memory",
2027         .subsys_id = mem_cgroup_subsys_id,
2028         .create = mem_cgroup_create,
2029         .pre_destroy = mem_cgroup_pre_destroy,
2030         .destroy = mem_cgroup_destroy,
2031         .populate = mem_cgroup_populate,
2032         .attach = mem_cgroup_move_task,
2033         .early_init = 0,
2034 };
2035
2036 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2037
2038 static int __init disable_swap_account(char *s)
2039 {
2040         really_do_swap_account = 0;
2041         return 1;
2042 }
2043 __setup("noswapaccount", disable_swap_account);
2044 #endif