memcg: protect prev_priority
[safe/jmp/linux-2.6] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54
55 /*
56  * Statistics for memory cgroup.
57  */
58 enum mem_cgroup_stat_index {
59         /*
60          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61          */
62         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
63         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
64         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
65         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
66
67         MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71         s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75         struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79  * For accounting under irq disable, no need for increment preempt count.
80  */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82                 enum mem_cgroup_stat_index idx, int val)
83 {
84         stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88                 enum mem_cgroup_stat_index idx)
89 {
90         int cpu;
91         s64 ret = 0;
92         for_each_possible_cpu(cpu)
93                 ret += stat->cpustat[cpu].count[idx];
94         return ret;
95 }
96
97 /*
98  * per-zone information in memory controller.
99  */
100 struct mem_cgroup_per_zone {
101         /*
102          * spin_lock to protect the per cgroup LRU
103          */
104         struct list_head        lists[NR_LRU_LISTS];
105         unsigned long           count[NR_LRU_LISTS];
106
107         struct zone_reclaim_stat reclaim_stat;
108 };
109 /* Macro for accessing counter */
110 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
111
112 struct mem_cgroup_per_node {
113         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
114 };
115
116 struct mem_cgroup_lru_info {
117         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
118 };
119
120 /*
121  * The memory controller data structure. The memory controller controls both
122  * page cache and RSS per cgroup. We would eventually like to provide
123  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
124  * to help the administrator determine what knobs to tune.
125  *
126  * TODO: Add a water mark for the memory controller. Reclaim will begin when
127  * we hit the water mark. May be even add a low water mark, such that
128  * no reclaim occurs from a cgroup at it's low water mark, this is
129  * a feature that will be implemented much later in the future.
130  */
131 struct mem_cgroup {
132         struct cgroup_subsys_state css;
133         /*
134          * the counter to account for memory usage
135          */
136         struct res_counter res;
137         /*
138          * the counter to account for mem+swap usage.
139          */
140         struct res_counter memsw;
141         /*
142          * Per cgroup active and inactive list, similar to the
143          * per zone LRU lists.
144          */
145         struct mem_cgroup_lru_info info;
146
147         /*
148           protect against reclaim related member.
149         */
150         spinlock_t reclaim_param_lock;
151
152         int     prev_priority;  /* for recording reclaim priority */
153
154         /*
155          * While reclaiming in a hiearchy, we cache the last child we
156          * reclaimed from. Protected by cgroup_lock()
157          */
158         struct mem_cgroup *last_scanned_child;
159         /*
160          * Should the accounting and control be hierarchical, per subtree?
161          */
162         bool use_hierarchy;
163         unsigned long   last_oom_jiffies;
164         int             obsolete;
165         atomic_t        refcnt;
166
167         unsigned int inactive_ratio;
168
169         /*
170          * statistics. This must be placed at the end of memcg.
171          */
172         struct mem_cgroup_stat stat;
173 };
174
175 enum charge_type {
176         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
177         MEM_CGROUP_CHARGE_TYPE_MAPPED,
178         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
179         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
180         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
181         NR_CHARGE_TYPE,
182 };
183
184 /* only for here (for easy reading.) */
185 #define PCGF_CACHE      (1UL << PCG_CACHE)
186 #define PCGF_USED       (1UL << PCG_USED)
187 #define PCGF_LOCK       (1UL << PCG_LOCK)
188 static const unsigned long
189 pcg_default_flags[NR_CHARGE_TYPE] = {
190         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
191         PCGF_USED | PCGF_LOCK, /* Anon */
192         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
193         0, /* FORCE */
194 };
195
196 /* for encoding cft->private value on file */
197 #define _MEM                    (0)
198 #define _MEMSWAP                (1)
199 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
200 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
201 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
202
203 static void mem_cgroup_get(struct mem_cgroup *mem);
204 static void mem_cgroup_put(struct mem_cgroup *mem);
205
206 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
207                                          struct page_cgroup *pc,
208                                          bool charge)
209 {
210         int val = (charge)? 1 : -1;
211         struct mem_cgroup_stat *stat = &mem->stat;
212         struct mem_cgroup_stat_cpu *cpustat;
213         int cpu = get_cpu();
214
215         cpustat = &stat->cpustat[cpu];
216         if (PageCgroupCache(pc))
217                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
218         else
219                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
220
221         if (charge)
222                 __mem_cgroup_stat_add_safe(cpustat,
223                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
224         else
225                 __mem_cgroup_stat_add_safe(cpustat,
226                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
227         put_cpu();
228 }
229
230 static struct mem_cgroup_per_zone *
231 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
232 {
233         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
234 }
235
236 static struct mem_cgroup_per_zone *
237 page_cgroup_zoneinfo(struct page_cgroup *pc)
238 {
239         struct mem_cgroup *mem = pc->mem_cgroup;
240         int nid = page_cgroup_nid(pc);
241         int zid = page_cgroup_zid(pc);
242
243         if (!mem)
244                 return NULL;
245
246         return mem_cgroup_zoneinfo(mem, nid, zid);
247 }
248
249 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
250                                         enum lru_list idx)
251 {
252         int nid, zid;
253         struct mem_cgroup_per_zone *mz;
254         u64 total = 0;
255
256         for_each_online_node(nid)
257                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
258                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
259                         total += MEM_CGROUP_ZSTAT(mz, idx);
260                 }
261         return total;
262 }
263
264 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
265 {
266         return container_of(cgroup_subsys_state(cont,
267                                 mem_cgroup_subsys_id), struct mem_cgroup,
268                                 css);
269 }
270
271 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
272 {
273         /*
274          * mm_update_next_owner() may clear mm->owner to NULL
275          * if it races with swapoff, page migration, etc.
276          * So this can be called with p == NULL.
277          */
278         if (unlikely(!p))
279                 return NULL;
280
281         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
282                                 struct mem_cgroup, css);
283 }
284
285 /*
286  * Following LRU functions are allowed to be used without PCG_LOCK.
287  * Operations are called by routine of global LRU independently from memcg.
288  * What we have to take care of here is validness of pc->mem_cgroup.
289  *
290  * Changes to pc->mem_cgroup happens when
291  * 1. charge
292  * 2. moving account
293  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
294  * It is added to LRU before charge.
295  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
296  * When moving account, the page is not on LRU. It's isolated.
297  */
298
299 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
300 {
301         struct page_cgroup *pc;
302         struct mem_cgroup *mem;
303         struct mem_cgroup_per_zone *mz;
304
305         if (mem_cgroup_disabled())
306                 return;
307         pc = lookup_page_cgroup(page);
308         /* can happen while we handle swapcache. */
309         if (list_empty(&pc->lru))
310                 return;
311         mz = page_cgroup_zoneinfo(pc);
312         mem = pc->mem_cgroup;
313         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
314         list_del_init(&pc->lru);
315         return;
316 }
317
318 void mem_cgroup_del_lru(struct page *page)
319 {
320         mem_cgroup_del_lru_list(page, page_lru(page));
321 }
322
323 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
324 {
325         struct mem_cgroup_per_zone *mz;
326         struct page_cgroup *pc;
327
328         if (mem_cgroup_disabled())
329                 return;
330
331         pc = lookup_page_cgroup(page);
332         smp_rmb();
333         /* unused page is not rotated. */
334         if (!PageCgroupUsed(pc))
335                 return;
336         mz = page_cgroup_zoneinfo(pc);
337         list_move(&pc->lru, &mz->lists[lru]);
338 }
339
340 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
341 {
342         struct page_cgroup *pc;
343         struct mem_cgroup_per_zone *mz;
344
345         if (mem_cgroup_disabled())
346                 return;
347         pc = lookup_page_cgroup(page);
348         /* barrier to sync with "charge" */
349         smp_rmb();
350         if (!PageCgroupUsed(pc))
351                 return;
352
353         mz = page_cgroup_zoneinfo(pc);
354         MEM_CGROUP_ZSTAT(mz, lru) += 1;
355         list_add(&pc->lru, &mz->lists[lru]);
356 }
357 /*
358  * To add swapcache into LRU. Be careful to all this function.
359  * zone->lru_lock shouldn't be held and irq must not be disabled.
360  */
361 static void mem_cgroup_lru_fixup(struct page *page)
362 {
363         if (!isolate_lru_page(page))
364                 putback_lru_page(page);
365 }
366
367 void mem_cgroup_move_lists(struct page *page,
368                            enum lru_list from, enum lru_list to)
369 {
370         if (mem_cgroup_disabled())
371                 return;
372         mem_cgroup_del_lru_list(page, from);
373         mem_cgroup_add_lru_list(page, to);
374 }
375
376 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
377 {
378         int ret;
379
380         task_lock(task);
381         ret = task->mm && mm_match_cgroup(task->mm, mem);
382         task_unlock(task);
383         return ret;
384 }
385
386 /*
387  * Calculate mapped_ratio under memory controller. This will be used in
388  * vmscan.c for deteremining we have to reclaim mapped pages.
389  */
390 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
391 {
392         long total, rss;
393
394         /*
395          * usage is recorded in bytes. But, here, we assume the number of
396          * physical pages can be represented by "long" on any arch.
397          */
398         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
399         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
400         return (int)((rss * 100L) / total);
401 }
402
403 /*
404  * prev_priority control...this will be used in memory reclaim path.
405  */
406 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
407 {
408         int prev_priority;
409
410         spin_lock(&mem->reclaim_param_lock);
411         prev_priority = mem->prev_priority;
412         spin_unlock(&mem->reclaim_param_lock);
413
414         return prev_priority;
415 }
416
417 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
418 {
419         spin_lock(&mem->reclaim_param_lock);
420         if (priority < mem->prev_priority)
421                 mem->prev_priority = priority;
422         spin_unlock(&mem->reclaim_param_lock);
423 }
424
425 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
426 {
427         spin_lock(&mem->reclaim_param_lock);
428         mem->prev_priority = priority;
429         spin_unlock(&mem->reclaim_param_lock);
430 }
431
432 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
433 {
434         unsigned long active;
435         unsigned long inactive;
436
437         inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
438         active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
439
440         if (inactive * memcg->inactive_ratio < active)
441                 return 1;
442
443         return 0;
444 }
445
446 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
447                                        struct zone *zone,
448                                        enum lru_list lru)
449 {
450         int nid = zone->zone_pgdat->node_id;
451         int zid = zone_idx(zone);
452         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
453
454         return MEM_CGROUP_ZSTAT(mz, lru);
455 }
456
457 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
458                                                       struct zone *zone)
459 {
460         int nid = zone->zone_pgdat->node_id;
461         int zid = zone_idx(zone);
462         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
463
464         return &mz->reclaim_stat;
465 }
466
467 struct zone_reclaim_stat *
468 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
469 {
470         struct page_cgroup *pc;
471         struct mem_cgroup_per_zone *mz;
472
473         if (mem_cgroup_disabled())
474                 return NULL;
475
476         pc = lookup_page_cgroup(page);
477         mz = page_cgroup_zoneinfo(pc);
478         if (!mz)
479                 return NULL;
480
481         return &mz->reclaim_stat;
482 }
483
484 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
485                                         struct list_head *dst,
486                                         unsigned long *scanned, int order,
487                                         int mode, struct zone *z,
488                                         struct mem_cgroup *mem_cont,
489                                         int active, int file)
490 {
491         unsigned long nr_taken = 0;
492         struct page *page;
493         unsigned long scan;
494         LIST_HEAD(pc_list);
495         struct list_head *src;
496         struct page_cgroup *pc, *tmp;
497         int nid = z->zone_pgdat->node_id;
498         int zid = zone_idx(z);
499         struct mem_cgroup_per_zone *mz;
500         int lru = LRU_FILE * !!file + !!active;
501
502         BUG_ON(!mem_cont);
503         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
504         src = &mz->lists[lru];
505
506         scan = 0;
507         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
508                 if (scan >= nr_to_scan)
509                         break;
510
511                 page = pc->page;
512                 if (unlikely(!PageCgroupUsed(pc)))
513                         continue;
514                 if (unlikely(!PageLRU(page)))
515                         continue;
516
517                 scan++;
518                 if (__isolate_lru_page(page, mode, file) == 0) {
519                         list_move(&page->lru, dst);
520                         nr_taken++;
521                 }
522         }
523
524         *scanned = scan;
525         return nr_taken;
526 }
527
528 #define mem_cgroup_from_res_counter(counter, member)    \
529         container_of(counter, struct mem_cgroup, member)
530
531 /*
532  * This routine finds the DFS walk successor. This routine should be
533  * called with cgroup_mutex held
534  */
535 static struct mem_cgroup *
536 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
537 {
538         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
539
540         curr_cgroup = curr->css.cgroup;
541         root_cgroup = root_mem->css.cgroup;
542
543         if (!list_empty(&curr_cgroup->children)) {
544                 /*
545                  * Walk down to children
546                  */
547                 mem_cgroup_put(curr);
548                 cgroup = list_entry(curr_cgroup->children.next,
549                                                 struct cgroup, sibling);
550                 curr = mem_cgroup_from_cont(cgroup);
551                 mem_cgroup_get(curr);
552                 goto done;
553         }
554
555 visit_parent:
556         if (curr_cgroup == root_cgroup) {
557                 mem_cgroup_put(curr);
558                 curr = root_mem;
559                 mem_cgroup_get(curr);
560                 goto done;
561         }
562
563         /*
564          * Goto next sibling
565          */
566         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
567                 mem_cgroup_put(curr);
568                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
569                                                 sibling);
570                 curr = mem_cgroup_from_cont(cgroup);
571                 mem_cgroup_get(curr);
572                 goto done;
573         }
574
575         /*
576          * Go up to next parent and next parent's sibling if need be
577          */
578         curr_cgroup = curr_cgroup->parent;
579         goto visit_parent;
580
581 done:
582         root_mem->last_scanned_child = curr;
583         return curr;
584 }
585
586 /*
587  * Visit the first child (need not be the first child as per the ordering
588  * of the cgroup list, since we track last_scanned_child) of @mem and use
589  * that to reclaim free pages from.
590  */
591 static struct mem_cgroup *
592 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
593 {
594         struct cgroup *cgroup;
595         struct mem_cgroup *ret;
596         bool obsolete = (root_mem->last_scanned_child &&
597                                 root_mem->last_scanned_child->obsolete);
598
599         /*
600          * Scan all children under the mem_cgroup mem
601          */
602         cgroup_lock();
603         if (list_empty(&root_mem->css.cgroup->children)) {
604                 ret = root_mem;
605                 goto done;
606         }
607
608         if (!root_mem->last_scanned_child || obsolete) {
609
610                 if (obsolete)
611                         mem_cgroup_put(root_mem->last_scanned_child);
612
613                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
614                                 struct cgroup, sibling);
615                 ret = mem_cgroup_from_cont(cgroup);
616                 mem_cgroup_get(ret);
617         } else
618                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
619                                                 root_mem);
620
621 done:
622         root_mem->last_scanned_child = ret;
623         cgroup_unlock();
624         return ret;
625 }
626
627 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
628 {
629         if (do_swap_account) {
630                 if (res_counter_check_under_limit(&mem->res) &&
631                         res_counter_check_under_limit(&mem->memsw))
632                         return true;
633         } else
634                 if (res_counter_check_under_limit(&mem->res))
635                         return true;
636         return false;
637 }
638
639 /*
640  * Dance down the hierarchy if needed to reclaim memory. We remember the
641  * last child we reclaimed from, so that we don't end up penalizing
642  * one child extensively based on its position in the children list.
643  *
644  * root_mem is the original ancestor that we've been reclaim from.
645  */
646 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
647                                                 gfp_t gfp_mask, bool noswap)
648 {
649         struct mem_cgroup *next_mem;
650         int ret = 0;
651
652         /*
653          * Reclaim unconditionally and don't check for return value.
654          * We need to reclaim in the current group and down the tree.
655          * One might think about checking for children before reclaiming,
656          * but there might be left over accounting, even after children
657          * have left.
658          */
659         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
660         if (mem_cgroup_check_under_limit(root_mem))
661                 return 0;
662         if (!root_mem->use_hierarchy)
663                 return ret;
664
665         next_mem = mem_cgroup_get_first_node(root_mem);
666
667         while (next_mem != root_mem) {
668                 if (next_mem->obsolete) {
669                         mem_cgroup_put(next_mem);
670                         cgroup_lock();
671                         next_mem = mem_cgroup_get_first_node(root_mem);
672                         cgroup_unlock();
673                         continue;
674                 }
675                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
676                 if (mem_cgroup_check_under_limit(root_mem))
677                         return 0;
678                 cgroup_lock();
679                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
680                 cgroup_unlock();
681         }
682         return ret;
683 }
684
685 bool mem_cgroup_oom_called(struct task_struct *task)
686 {
687         bool ret = false;
688         struct mem_cgroup *mem;
689         struct mm_struct *mm;
690
691         rcu_read_lock();
692         mm = task->mm;
693         if (!mm)
694                 mm = &init_mm;
695         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
696         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
697                 ret = true;
698         rcu_read_unlock();
699         return ret;
700 }
701 /*
702  * Unlike exported interface, "oom" parameter is added. if oom==true,
703  * oom-killer can be invoked.
704  */
705 static int __mem_cgroup_try_charge(struct mm_struct *mm,
706                         gfp_t gfp_mask, struct mem_cgroup **memcg,
707                         bool oom)
708 {
709         struct mem_cgroup *mem, *mem_over_limit;
710         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
711         struct res_counter *fail_res;
712
713         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
714                 /* Don't account this! */
715                 *memcg = NULL;
716                 return 0;
717         }
718
719         /*
720          * We always charge the cgroup the mm_struct belongs to.
721          * The mm_struct's mem_cgroup changes on task migration if the
722          * thread group leader migrates. It's possible that mm is not
723          * set, if so charge the init_mm (happens for pagecache usage).
724          */
725         if (likely(!*memcg)) {
726                 rcu_read_lock();
727                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
728                 if (unlikely(!mem)) {
729                         rcu_read_unlock();
730                         return 0;
731                 }
732                 /*
733                  * For every charge from the cgroup, increment reference count
734                  */
735                 css_get(&mem->css);
736                 *memcg = mem;
737                 rcu_read_unlock();
738         } else {
739                 mem = *memcg;
740                 css_get(&mem->css);
741         }
742
743         while (1) {
744                 int ret;
745                 bool noswap = false;
746
747                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
748                 if (likely(!ret)) {
749                         if (!do_swap_account)
750                                 break;
751                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
752                                                         &fail_res);
753                         if (likely(!ret))
754                                 break;
755                         /* mem+swap counter fails */
756                         res_counter_uncharge(&mem->res, PAGE_SIZE);
757                         noswap = true;
758                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
759                                                                         memsw);
760                 } else
761                         /* mem counter fails */
762                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
763                                                                         res);
764
765                 if (!(gfp_mask & __GFP_WAIT))
766                         goto nomem;
767
768                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
769                                                         noswap);
770
771                 /*
772                  * try_to_free_mem_cgroup_pages() might not give us a full
773                  * picture of reclaim. Some pages are reclaimed and might be
774                  * moved to swap cache or just unmapped from the cgroup.
775                  * Check the limit again to see if the reclaim reduced the
776                  * current usage of the cgroup before giving up
777                  *
778                  */
779                 if (mem_cgroup_check_under_limit(mem_over_limit))
780                         continue;
781
782                 if (!nr_retries--) {
783                         if (oom) {
784                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
785                                 mem_over_limit->last_oom_jiffies = jiffies;
786                         }
787                         goto nomem;
788                 }
789         }
790         return 0;
791 nomem:
792         css_put(&mem->css);
793         return -ENOMEM;
794 }
795
796 /**
797  * mem_cgroup_try_charge - get charge of PAGE_SIZE.
798  * @mm: an mm_struct which is charged against. (when *memcg is NULL)
799  * @gfp_mask: gfp_mask for reclaim.
800  * @memcg: a pointer to memory cgroup which is charged against.
801  *
802  * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
803  * memory cgroup from @mm is got and stored in *memcg.
804  *
805  * Returns 0 if success. -ENOMEM at failure.
806  * This call can invoke OOM-Killer.
807  */
808
809 int mem_cgroup_try_charge(struct mm_struct *mm,
810                           gfp_t mask, struct mem_cgroup **memcg)
811 {
812         return __mem_cgroup_try_charge(mm, mask, memcg, true);
813 }
814
815 /*
816  * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
817  * USED state. If already USED, uncharge and return.
818  */
819
820 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
821                                      struct page_cgroup *pc,
822                                      enum charge_type ctype)
823 {
824         /* try_charge() can return NULL to *memcg, taking care of it. */
825         if (!mem)
826                 return;
827
828         lock_page_cgroup(pc);
829         if (unlikely(PageCgroupUsed(pc))) {
830                 unlock_page_cgroup(pc);
831                 res_counter_uncharge(&mem->res, PAGE_SIZE);
832                 if (do_swap_account)
833                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
834                 css_put(&mem->css);
835                 return;
836         }
837         pc->mem_cgroup = mem;
838         smp_wmb();
839         pc->flags = pcg_default_flags[ctype];
840
841         mem_cgroup_charge_statistics(mem, pc, true);
842
843         unlock_page_cgroup(pc);
844 }
845
846 /**
847  * mem_cgroup_move_account - move account of the page
848  * @pc: page_cgroup of the page.
849  * @from: mem_cgroup which the page is moved from.
850  * @to: mem_cgroup which the page is moved to. @from != @to.
851  *
852  * The caller must confirm following.
853  * - page is not on LRU (isolate_page() is useful.)
854  *
855  * returns 0 at success,
856  * returns -EBUSY when lock is busy or "pc" is unstable.
857  *
858  * This function does "uncharge" from old cgroup but doesn't do "charge" to
859  * new cgroup. It should be done by a caller.
860  */
861
862 static int mem_cgroup_move_account(struct page_cgroup *pc,
863         struct mem_cgroup *from, struct mem_cgroup *to)
864 {
865         struct mem_cgroup_per_zone *from_mz, *to_mz;
866         int nid, zid;
867         int ret = -EBUSY;
868
869         VM_BUG_ON(from == to);
870         VM_BUG_ON(PageLRU(pc->page));
871
872         nid = page_cgroup_nid(pc);
873         zid = page_cgroup_zid(pc);
874         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
875         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
876
877         if (!trylock_page_cgroup(pc))
878                 return ret;
879
880         if (!PageCgroupUsed(pc))
881                 goto out;
882
883         if (pc->mem_cgroup != from)
884                 goto out;
885
886         css_put(&from->css);
887         res_counter_uncharge(&from->res, PAGE_SIZE);
888         mem_cgroup_charge_statistics(from, pc, false);
889         if (do_swap_account)
890                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
891         pc->mem_cgroup = to;
892         mem_cgroup_charge_statistics(to, pc, true);
893         css_get(&to->css);
894         ret = 0;
895 out:
896         unlock_page_cgroup(pc);
897         return ret;
898 }
899
900 /*
901  * move charges to its parent.
902  */
903
904 static int mem_cgroup_move_parent(struct page_cgroup *pc,
905                                   struct mem_cgroup *child,
906                                   gfp_t gfp_mask)
907 {
908         struct page *page = pc->page;
909         struct cgroup *cg = child->css.cgroup;
910         struct cgroup *pcg = cg->parent;
911         struct mem_cgroup *parent;
912         int ret;
913
914         /* Is ROOT ? */
915         if (!pcg)
916                 return -EINVAL;
917
918
919         parent = mem_cgroup_from_cont(pcg);
920
921
922         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
923         if (ret || !parent)
924                 return ret;
925
926         if (!get_page_unless_zero(page))
927                 return -EBUSY;
928
929         ret = isolate_lru_page(page);
930
931         if (ret)
932                 goto cancel;
933
934         ret = mem_cgroup_move_account(pc, child, parent);
935
936         /* drop extra refcnt by try_charge() (move_account increment one) */
937         css_put(&parent->css);
938         putback_lru_page(page);
939         if (!ret) {
940                 put_page(page);
941                 return 0;
942         }
943         /* uncharge if move fails */
944 cancel:
945         res_counter_uncharge(&parent->res, PAGE_SIZE);
946         if (do_swap_account)
947                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
948         put_page(page);
949         return ret;
950 }
951
952 /*
953  * Charge the memory controller for page usage.
954  * Return
955  * 0 if the charge was successful
956  * < 0 if the cgroup is over its limit
957  */
958 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
959                                 gfp_t gfp_mask, enum charge_type ctype,
960                                 struct mem_cgroup *memcg)
961 {
962         struct mem_cgroup *mem;
963         struct page_cgroup *pc;
964         int ret;
965
966         pc = lookup_page_cgroup(page);
967         /* can happen at boot */
968         if (unlikely(!pc))
969                 return 0;
970         prefetchw(pc);
971
972         mem = memcg;
973         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
974         if (ret || !mem)
975                 return ret;
976
977         __mem_cgroup_commit_charge(mem, pc, ctype);
978         return 0;
979 }
980
981 int mem_cgroup_newpage_charge(struct page *page,
982                               struct mm_struct *mm, gfp_t gfp_mask)
983 {
984         if (mem_cgroup_disabled())
985                 return 0;
986         if (PageCompound(page))
987                 return 0;
988         /*
989          * If already mapped, we don't have to account.
990          * If page cache, page->mapping has address_space.
991          * But page->mapping may have out-of-use anon_vma pointer,
992          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
993          * is NULL.
994          */
995         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
996                 return 0;
997         if (unlikely(!mm))
998                 mm = &init_mm;
999         return mem_cgroup_charge_common(page, mm, gfp_mask,
1000                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1001 }
1002
1003 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1004                                 gfp_t gfp_mask)
1005 {
1006         if (mem_cgroup_disabled())
1007                 return 0;
1008         if (PageCompound(page))
1009                 return 0;
1010         /*
1011          * Corner case handling. This is called from add_to_page_cache()
1012          * in usual. But some FS (shmem) precharges this page before calling it
1013          * and call add_to_page_cache() with GFP_NOWAIT.
1014          *
1015          * For GFP_NOWAIT case, the page may be pre-charged before calling
1016          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1017          * charge twice. (It works but has to pay a bit larger cost.)
1018          */
1019         if (!(gfp_mask & __GFP_WAIT)) {
1020                 struct page_cgroup *pc;
1021
1022
1023                 pc = lookup_page_cgroup(page);
1024                 if (!pc)
1025                         return 0;
1026                 lock_page_cgroup(pc);
1027                 if (PageCgroupUsed(pc)) {
1028                         unlock_page_cgroup(pc);
1029                         return 0;
1030                 }
1031                 unlock_page_cgroup(pc);
1032         }
1033
1034         if (unlikely(!mm))
1035                 mm = &init_mm;
1036
1037         if (page_is_file_cache(page))
1038                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1039                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1040         else
1041                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1042                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
1043 }
1044
1045 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1046                                  struct page *page,
1047                                  gfp_t mask, struct mem_cgroup **ptr)
1048 {
1049         struct mem_cgroup *mem;
1050         swp_entry_t     ent;
1051
1052         if (mem_cgroup_disabled())
1053                 return 0;
1054
1055         if (!do_swap_account)
1056                 goto charge_cur_mm;
1057
1058         /*
1059          * A racing thread's fault, or swapoff, may have already updated
1060          * the pte, and even removed page from swap cache: return success
1061          * to go on to do_swap_page()'s pte_same() test, which should fail.
1062          */
1063         if (!PageSwapCache(page))
1064                 return 0;
1065
1066         ent.val = page_private(page);
1067
1068         mem = lookup_swap_cgroup(ent);
1069         if (!mem || mem->obsolete)
1070                 goto charge_cur_mm;
1071         *ptr = mem;
1072         return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1073 charge_cur_mm:
1074         if (unlikely(!mm))
1075                 mm = &init_mm;
1076         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1077 }
1078
1079 #ifdef CONFIG_SWAP
1080
1081 int mem_cgroup_cache_charge_swapin(struct page *page,
1082                         struct mm_struct *mm, gfp_t mask, bool locked)
1083 {
1084         int ret = 0;
1085
1086         if (mem_cgroup_disabled())
1087                 return 0;
1088         if (unlikely(!mm))
1089                 mm = &init_mm;
1090         if (!locked)
1091                 lock_page(page);
1092         /*
1093          * If not locked, the page can be dropped from SwapCache until
1094          * we reach here.
1095          */
1096         if (PageSwapCache(page)) {
1097                 struct mem_cgroup *mem = NULL;
1098                 swp_entry_t ent;
1099
1100                 ent.val = page_private(page);
1101                 if (do_swap_account) {
1102                         mem = lookup_swap_cgroup(ent);
1103                         if (mem && mem->obsolete)
1104                                 mem = NULL;
1105                         if (mem)
1106                                 mm = NULL;
1107                 }
1108                 ret = mem_cgroup_charge_common(page, mm, mask,
1109                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1110
1111                 if (!ret && do_swap_account) {
1112                         /* avoid double counting */
1113                         mem = swap_cgroup_record(ent, NULL);
1114                         if (mem) {
1115                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1116                                 mem_cgroup_put(mem);
1117                         }
1118                 }
1119         }
1120         if (!locked)
1121                 unlock_page(page);
1122         /* add this page(page_cgroup) to the LRU we want. */
1123         mem_cgroup_lru_fixup(page);
1124
1125         return ret;
1126 }
1127 #endif
1128
1129 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1130 {
1131         struct page_cgroup *pc;
1132
1133         if (mem_cgroup_disabled())
1134                 return;
1135         if (!ptr)
1136                 return;
1137         pc = lookup_page_cgroup(page);
1138         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1139         /*
1140          * Now swap is on-memory. This means this page may be
1141          * counted both as mem and swap....double count.
1142          * Fix it by uncharging from memsw. This SwapCache is stable
1143          * because we're still under lock_page().
1144          */
1145         if (do_swap_account) {
1146                 swp_entry_t ent = {.val = page_private(page)};
1147                 struct mem_cgroup *memcg;
1148                 memcg = swap_cgroup_record(ent, NULL);
1149                 if (memcg) {
1150                         /* If memcg is obsolete, memcg can be != ptr */
1151                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1152                         mem_cgroup_put(memcg);
1153                 }
1154
1155         }
1156         /* add this page(page_cgroup) to the LRU we want. */
1157         mem_cgroup_lru_fixup(page);
1158 }
1159
1160 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1161 {
1162         if (mem_cgroup_disabled())
1163                 return;
1164         if (!mem)
1165                 return;
1166         res_counter_uncharge(&mem->res, PAGE_SIZE);
1167         if (do_swap_account)
1168                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1169         css_put(&mem->css);
1170 }
1171
1172
1173 /*
1174  * uncharge if !page_mapped(page)
1175  */
1176 static struct mem_cgroup *
1177 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1178 {
1179         struct page_cgroup *pc;
1180         struct mem_cgroup *mem = NULL;
1181         struct mem_cgroup_per_zone *mz;
1182
1183         if (mem_cgroup_disabled())
1184                 return NULL;
1185
1186         if (PageSwapCache(page))
1187                 return NULL;
1188
1189         /*
1190          * Check if our page_cgroup is valid
1191          */
1192         pc = lookup_page_cgroup(page);
1193         if (unlikely(!pc || !PageCgroupUsed(pc)))
1194                 return NULL;
1195
1196         lock_page_cgroup(pc);
1197
1198         mem = pc->mem_cgroup;
1199
1200         if (!PageCgroupUsed(pc))
1201                 goto unlock_out;
1202
1203         switch (ctype) {
1204         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1205                 if (page_mapped(page))
1206                         goto unlock_out;
1207                 break;
1208         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1209                 if (!PageAnon(page)) {  /* Shared memory */
1210                         if (page->mapping && !page_is_file_cache(page))
1211                                 goto unlock_out;
1212                 } else if (page_mapped(page)) /* Anon */
1213                                 goto unlock_out;
1214                 break;
1215         default:
1216                 break;
1217         }
1218
1219         res_counter_uncharge(&mem->res, PAGE_SIZE);
1220         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1221                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1222
1223         mem_cgroup_charge_statistics(mem, pc, false);
1224         ClearPageCgroupUsed(pc);
1225
1226         mz = page_cgroup_zoneinfo(pc);
1227         unlock_page_cgroup(pc);
1228
1229         /* at swapout, this memcg will be accessed to record to swap */
1230         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1231                 css_put(&mem->css);
1232
1233         return mem;
1234
1235 unlock_out:
1236         unlock_page_cgroup(pc);
1237         return NULL;
1238 }
1239
1240 void mem_cgroup_uncharge_page(struct page *page)
1241 {
1242         /* early check. */
1243         if (page_mapped(page))
1244                 return;
1245         if (page->mapping && !PageAnon(page))
1246                 return;
1247         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1248 }
1249
1250 void mem_cgroup_uncharge_cache_page(struct page *page)
1251 {
1252         VM_BUG_ON(page_mapped(page));
1253         VM_BUG_ON(page->mapping);
1254         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1255 }
1256
1257 /*
1258  * called from __delete_from_swap_cache() and drop "page" account.
1259  * memcg information is recorded to swap_cgroup of "ent"
1260  */
1261 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1262 {
1263         struct mem_cgroup *memcg;
1264
1265         memcg = __mem_cgroup_uncharge_common(page,
1266                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1267         /* record memcg information */
1268         if (do_swap_account && memcg) {
1269                 swap_cgroup_record(ent, memcg);
1270                 mem_cgroup_get(memcg);
1271         }
1272         if (memcg)
1273                 css_put(&memcg->css);
1274 }
1275
1276 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1277 /*
1278  * called from swap_entry_free(). remove record in swap_cgroup and
1279  * uncharge "memsw" account.
1280  */
1281 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1282 {
1283         struct mem_cgroup *memcg;
1284
1285         if (!do_swap_account)
1286                 return;
1287
1288         memcg = swap_cgroup_record(ent, NULL);
1289         if (memcg) {
1290                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1291                 mem_cgroup_put(memcg);
1292         }
1293 }
1294 #endif
1295
1296 /*
1297  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1298  * page belongs to.
1299  */
1300 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1301 {
1302         struct page_cgroup *pc;
1303         struct mem_cgroup *mem = NULL;
1304         int ret = 0;
1305
1306         if (mem_cgroup_disabled())
1307                 return 0;
1308
1309         pc = lookup_page_cgroup(page);
1310         lock_page_cgroup(pc);
1311         if (PageCgroupUsed(pc)) {
1312                 mem = pc->mem_cgroup;
1313                 css_get(&mem->css);
1314         }
1315         unlock_page_cgroup(pc);
1316
1317         if (mem) {
1318                 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1319                 css_put(&mem->css);
1320         }
1321         *ptr = mem;
1322         return ret;
1323 }
1324
1325 /* remove redundant charge if migration failed*/
1326 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1327                 struct page *oldpage, struct page *newpage)
1328 {
1329         struct page *target, *unused;
1330         struct page_cgroup *pc;
1331         enum charge_type ctype;
1332
1333         if (!mem)
1334                 return;
1335
1336         /* at migration success, oldpage->mapping is NULL. */
1337         if (oldpage->mapping) {
1338                 target = oldpage;
1339                 unused = NULL;
1340         } else {
1341                 target = newpage;
1342                 unused = oldpage;
1343         }
1344
1345         if (PageAnon(target))
1346                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1347         else if (page_is_file_cache(target))
1348                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1349         else
1350                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1351
1352         /* unused page is not on radix-tree now. */
1353         if (unused)
1354                 __mem_cgroup_uncharge_common(unused, ctype);
1355
1356         pc = lookup_page_cgroup(target);
1357         /*
1358          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1359          * So, double-counting is effectively avoided.
1360          */
1361         __mem_cgroup_commit_charge(mem, pc, ctype);
1362
1363         /*
1364          * Both of oldpage and newpage are still under lock_page().
1365          * Then, we don't have to care about race in radix-tree.
1366          * But we have to be careful that this page is unmapped or not.
1367          *
1368          * There is a case for !page_mapped(). At the start of
1369          * migration, oldpage was mapped. But now, it's zapped.
1370          * But we know *target* page is not freed/reused under us.
1371          * mem_cgroup_uncharge_page() does all necessary checks.
1372          */
1373         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1374                 mem_cgroup_uncharge_page(target);
1375 }
1376
1377 /*
1378  * A call to try to shrink memory usage under specified resource controller.
1379  * This is typically used for page reclaiming for shmem for reducing side
1380  * effect of page allocation from shmem, which is used by some mem_cgroup.
1381  */
1382 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1383 {
1384         struct mem_cgroup *mem;
1385         int progress = 0;
1386         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1387
1388         if (mem_cgroup_disabled())
1389                 return 0;
1390         if (!mm)
1391                 return 0;
1392
1393         rcu_read_lock();
1394         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1395         if (unlikely(!mem)) {
1396                 rcu_read_unlock();
1397                 return 0;
1398         }
1399         css_get(&mem->css);
1400         rcu_read_unlock();
1401
1402         do {
1403                 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1404                 progress += mem_cgroup_check_under_limit(mem);
1405         } while (!progress && --retry);
1406
1407         css_put(&mem->css);
1408         if (!retry)
1409                 return -ENOMEM;
1410         return 0;
1411 }
1412
1413 /*
1414  * The inactive anon list should be small enough that the VM never has to
1415  * do too much work, but large enough that each inactive page has a chance
1416  * to be referenced again before it is swapped out.
1417  *
1418  * this calculation is straightforward porting from
1419  * page_alloc.c::setup_per_zone_inactive_ratio().
1420  * it describe more detail.
1421  */
1422 static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg)
1423 {
1424         unsigned int gb, ratio;
1425
1426         gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30;
1427         if (gb)
1428                 ratio = int_sqrt(10 * gb);
1429         else
1430                 ratio = 1;
1431
1432         memcg->inactive_ratio = ratio;
1433
1434 }
1435
1436 static DEFINE_MUTEX(set_limit_mutex);
1437
1438 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1439                                 unsigned long long val)
1440 {
1441
1442         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1443         int progress;
1444         u64 memswlimit;
1445         int ret = 0;
1446
1447         while (retry_count) {
1448                 if (signal_pending(current)) {
1449                         ret = -EINTR;
1450                         break;
1451                 }
1452                 /*
1453                  * Rather than hide all in some function, I do this in
1454                  * open coded manner. You see what this really does.
1455                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1456                  */
1457                 mutex_lock(&set_limit_mutex);
1458                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1459                 if (memswlimit < val) {
1460                         ret = -EINVAL;
1461                         mutex_unlock(&set_limit_mutex);
1462                         break;
1463                 }
1464                 ret = res_counter_set_limit(&memcg->res, val);
1465                 mutex_unlock(&set_limit_mutex);
1466
1467                 if (!ret)
1468                         break;
1469
1470                 progress = try_to_free_mem_cgroup_pages(memcg,
1471                                 GFP_KERNEL, false);
1472                 if (!progress)                  retry_count--;
1473         }
1474
1475         if (!ret)
1476                 mem_cgroup_set_inactive_ratio(memcg);
1477
1478         return ret;
1479 }
1480
1481 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1482                                 unsigned long long val)
1483 {
1484         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1485         u64 memlimit, oldusage, curusage;
1486         int ret;
1487
1488         if (!do_swap_account)
1489                 return -EINVAL;
1490
1491         while (retry_count) {
1492                 if (signal_pending(current)) {
1493                         ret = -EINTR;
1494                         break;
1495                 }
1496                 /*
1497                  * Rather than hide all in some function, I do this in
1498                  * open coded manner. You see what this really does.
1499                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1500                  */
1501                 mutex_lock(&set_limit_mutex);
1502                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1503                 if (memlimit > val) {
1504                         ret = -EINVAL;
1505                         mutex_unlock(&set_limit_mutex);
1506                         break;
1507                 }
1508                 ret = res_counter_set_limit(&memcg->memsw, val);
1509                 mutex_unlock(&set_limit_mutex);
1510
1511                 if (!ret)
1512                         break;
1513
1514                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1515                 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1516                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1517                 if (curusage >= oldusage)
1518                         retry_count--;
1519         }
1520         return ret;
1521 }
1522
1523 /*
1524  * This routine traverse page_cgroup in given list and drop them all.
1525  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1526  */
1527 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1528                                 int node, int zid, enum lru_list lru)
1529 {
1530         struct zone *zone;
1531         struct mem_cgroup_per_zone *mz;
1532         struct page_cgroup *pc, *busy;
1533         unsigned long flags, loop;
1534         struct list_head *list;
1535         int ret = 0;
1536
1537         zone = &NODE_DATA(node)->node_zones[zid];
1538         mz = mem_cgroup_zoneinfo(mem, node, zid);
1539         list = &mz->lists[lru];
1540
1541         loop = MEM_CGROUP_ZSTAT(mz, lru);
1542         /* give some margin against EBUSY etc...*/
1543         loop += 256;
1544         busy = NULL;
1545         while (loop--) {
1546                 ret = 0;
1547                 spin_lock_irqsave(&zone->lru_lock, flags);
1548                 if (list_empty(list)) {
1549                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1550                         break;
1551                 }
1552                 pc = list_entry(list->prev, struct page_cgroup, lru);
1553                 if (busy == pc) {
1554                         list_move(&pc->lru, list);
1555                         busy = 0;
1556                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1557                         continue;
1558                 }
1559                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1560
1561                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1562                 if (ret == -ENOMEM)
1563                         break;
1564
1565                 if (ret == -EBUSY || ret == -EINVAL) {
1566                         /* found lock contention or "pc" is obsolete. */
1567                         busy = pc;
1568                         cond_resched();
1569                 } else
1570                         busy = NULL;
1571         }
1572
1573         if (!ret && !list_empty(list))
1574                 return -EBUSY;
1575         return ret;
1576 }
1577
1578 /*
1579  * make mem_cgroup's charge to be 0 if there is no task.
1580  * This enables deleting this mem_cgroup.
1581  */
1582 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1583 {
1584         int ret;
1585         int node, zid, shrink;
1586         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1587         struct cgroup *cgrp = mem->css.cgroup;
1588
1589         css_get(&mem->css);
1590
1591         shrink = 0;
1592         /* should free all ? */
1593         if (free_all)
1594                 goto try_to_free;
1595 move_account:
1596         while (mem->res.usage > 0) {
1597                 ret = -EBUSY;
1598                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1599                         goto out;
1600                 ret = -EINTR;
1601                 if (signal_pending(current))
1602                         goto out;
1603                 /* This is for making all *used* pages to be on LRU. */
1604                 lru_add_drain_all();
1605                 ret = 0;
1606                 for_each_node_state(node, N_POSSIBLE) {
1607                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1608                                 enum lru_list l;
1609                                 for_each_lru(l) {
1610                                         ret = mem_cgroup_force_empty_list(mem,
1611                                                         node, zid, l);
1612                                         if (ret)
1613                                                 break;
1614                                 }
1615                         }
1616                         if (ret)
1617                                 break;
1618                 }
1619                 /* it seems parent cgroup doesn't have enough mem */
1620                 if (ret == -ENOMEM)
1621                         goto try_to_free;
1622                 cond_resched();
1623         }
1624         ret = 0;
1625 out:
1626         css_put(&mem->css);
1627         return ret;
1628
1629 try_to_free:
1630         /* returns EBUSY if there is a task or if we come here twice. */
1631         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1632                 ret = -EBUSY;
1633                 goto out;
1634         }
1635         /* we call try-to-free pages for make this cgroup empty */
1636         lru_add_drain_all();
1637         /* try to free all pages in this cgroup */
1638         shrink = 1;
1639         while (nr_retries && mem->res.usage > 0) {
1640                 int progress;
1641
1642                 if (signal_pending(current)) {
1643                         ret = -EINTR;
1644                         goto out;
1645                 }
1646                 progress = try_to_free_mem_cgroup_pages(mem,
1647                                                   GFP_KERNEL, false);
1648                 if (!progress) {
1649                         nr_retries--;
1650                         /* maybe some writeback is necessary */
1651                         congestion_wait(WRITE, HZ/10);
1652                 }
1653
1654         }
1655         lru_add_drain();
1656         /* try move_account...there may be some *locked* pages. */
1657         if (mem->res.usage)
1658                 goto move_account;
1659         ret = 0;
1660         goto out;
1661 }
1662
1663 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1664 {
1665         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1666 }
1667
1668
1669 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1670 {
1671         return mem_cgroup_from_cont(cont)->use_hierarchy;
1672 }
1673
1674 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1675                                         u64 val)
1676 {
1677         int retval = 0;
1678         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1679         struct cgroup *parent = cont->parent;
1680         struct mem_cgroup *parent_mem = NULL;
1681
1682         if (parent)
1683                 parent_mem = mem_cgroup_from_cont(parent);
1684
1685         cgroup_lock();
1686         /*
1687          * If parent's use_hiearchy is set, we can't make any modifications
1688          * in the child subtrees. If it is unset, then the change can
1689          * occur, provided the current cgroup has no children.
1690          *
1691          * For the root cgroup, parent_mem is NULL, we allow value to be
1692          * set if there are no children.
1693          */
1694         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1695                                 (val == 1 || val == 0)) {
1696                 if (list_empty(&cont->children))
1697                         mem->use_hierarchy = val;
1698                 else
1699                         retval = -EBUSY;
1700         } else
1701                 retval = -EINVAL;
1702         cgroup_unlock();
1703
1704         return retval;
1705 }
1706
1707 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1708 {
1709         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1710         u64 val = 0;
1711         int type, name;
1712
1713         type = MEMFILE_TYPE(cft->private);
1714         name = MEMFILE_ATTR(cft->private);
1715         switch (type) {
1716         case _MEM:
1717                 val = res_counter_read_u64(&mem->res, name);
1718                 break;
1719         case _MEMSWAP:
1720                 if (do_swap_account)
1721                         val = res_counter_read_u64(&mem->memsw, name);
1722                 break;
1723         default:
1724                 BUG();
1725                 break;
1726         }
1727         return val;
1728 }
1729 /*
1730  * The user of this function is...
1731  * RES_LIMIT.
1732  */
1733 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1734                             const char *buffer)
1735 {
1736         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1737         int type, name;
1738         unsigned long long val;
1739         int ret;
1740
1741         type = MEMFILE_TYPE(cft->private);
1742         name = MEMFILE_ATTR(cft->private);
1743         switch (name) {
1744         case RES_LIMIT:
1745                 /* This function does all necessary parse...reuse it */
1746                 ret = res_counter_memparse_write_strategy(buffer, &val);
1747                 if (ret)
1748                         break;
1749                 if (type == _MEM)
1750                         ret = mem_cgroup_resize_limit(memcg, val);
1751                 else
1752                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1753                 break;
1754         default:
1755                 ret = -EINVAL; /* should be BUG() ? */
1756                 break;
1757         }
1758         return ret;
1759 }
1760
1761 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1762 {
1763         struct mem_cgroup *mem;
1764         int type, name;
1765
1766         mem = mem_cgroup_from_cont(cont);
1767         type = MEMFILE_TYPE(event);
1768         name = MEMFILE_ATTR(event);
1769         switch (name) {
1770         case RES_MAX_USAGE:
1771                 if (type == _MEM)
1772                         res_counter_reset_max(&mem->res);
1773                 else
1774                         res_counter_reset_max(&mem->memsw);
1775                 break;
1776         case RES_FAILCNT:
1777                 if (type == _MEM)
1778                         res_counter_reset_failcnt(&mem->res);
1779                 else
1780                         res_counter_reset_failcnt(&mem->memsw);
1781                 break;
1782         }
1783         return 0;
1784 }
1785
1786 static const struct mem_cgroup_stat_desc {
1787         const char *msg;
1788         u64 unit;
1789 } mem_cgroup_stat_desc[] = {
1790         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1791         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1792         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1793         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1794 };
1795
1796 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1797                                  struct cgroup_map_cb *cb)
1798 {
1799         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1800         struct mem_cgroup_stat *stat = &mem_cont->stat;
1801         int i;
1802
1803         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1804                 s64 val;
1805
1806                 val = mem_cgroup_read_stat(stat, i);
1807                 val *= mem_cgroup_stat_desc[i].unit;
1808                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1809         }
1810         /* showing # of active pages */
1811         {
1812                 unsigned long active_anon, inactive_anon;
1813                 unsigned long active_file, inactive_file;
1814                 unsigned long unevictable;
1815
1816                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1817                                                 LRU_INACTIVE_ANON);
1818                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1819                                                 LRU_ACTIVE_ANON);
1820                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1821                                                 LRU_INACTIVE_FILE);
1822                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1823                                                 LRU_ACTIVE_FILE);
1824                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1825                                                         LRU_UNEVICTABLE);
1826
1827                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1828                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1829                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1830                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1831                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1832
1833         }
1834
1835 #ifdef CONFIG_DEBUG_VM
1836         cb->fill(cb, "inactive_ratio", mem_cont->inactive_ratio);
1837
1838         {
1839                 int nid, zid;
1840                 struct mem_cgroup_per_zone *mz;
1841                 unsigned long recent_rotated[2] = {0, 0};
1842                 unsigned long recent_scanned[2] = {0, 0};
1843
1844                 for_each_online_node(nid)
1845                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1846                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1847
1848                                 recent_rotated[0] +=
1849                                         mz->reclaim_stat.recent_rotated[0];
1850                                 recent_rotated[1] +=
1851                                         mz->reclaim_stat.recent_rotated[1];
1852                                 recent_scanned[0] +=
1853                                         mz->reclaim_stat.recent_scanned[0];
1854                                 recent_scanned[1] +=
1855                                         mz->reclaim_stat.recent_scanned[1];
1856                         }
1857                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1858                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1859                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1860                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1861         }
1862 #endif
1863
1864         return 0;
1865 }
1866
1867
1868 static struct cftype mem_cgroup_files[] = {
1869         {
1870                 .name = "usage_in_bytes",
1871                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1872                 .read_u64 = mem_cgroup_read,
1873         },
1874         {
1875                 .name = "max_usage_in_bytes",
1876                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1877                 .trigger = mem_cgroup_reset,
1878                 .read_u64 = mem_cgroup_read,
1879         },
1880         {
1881                 .name = "limit_in_bytes",
1882                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1883                 .write_string = mem_cgroup_write,
1884                 .read_u64 = mem_cgroup_read,
1885         },
1886         {
1887                 .name = "failcnt",
1888                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1889                 .trigger = mem_cgroup_reset,
1890                 .read_u64 = mem_cgroup_read,
1891         },
1892         {
1893                 .name = "stat",
1894                 .read_map = mem_control_stat_show,
1895         },
1896         {
1897                 .name = "force_empty",
1898                 .trigger = mem_cgroup_force_empty_write,
1899         },
1900         {
1901                 .name = "use_hierarchy",
1902                 .write_u64 = mem_cgroup_hierarchy_write,
1903                 .read_u64 = mem_cgroup_hierarchy_read,
1904         },
1905 };
1906
1907 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1908 static struct cftype memsw_cgroup_files[] = {
1909         {
1910                 .name = "memsw.usage_in_bytes",
1911                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1912                 .read_u64 = mem_cgroup_read,
1913         },
1914         {
1915                 .name = "memsw.max_usage_in_bytes",
1916                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1917                 .trigger = mem_cgroup_reset,
1918                 .read_u64 = mem_cgroup_read,
1919         },
1920         {
1921                 .name = "memsw.limit_in_bytes",
1922                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1923                 .write_string = mem_cgroup_write,
1924                 .read_u64 = mem_cgroup_read,
1925         },
1926         {
1927                 .name = "memsw.failcnt",
1928                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1929                 .trigger = mem_cgroup_reset,
1930                 .read_u64 = mem_cgroup_read,
1931         },
1932 };
1933
1934 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1935 {
1936         if (!do_swap_account)
1937                 return 0;
1938         return cgroup_add_files(cont, ss, memsw_cgroup_files,
1939                                 ARRAY_SIZE(memsw_cgroup_files));
1940 };
1941 #else
1942 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1943 {
1944         return 0;
1945 }
1946 #endif
1947
1948 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1949 {
1950         struct mem_cgroup_per_node *pn;
1951         struct mem_cgroup_per_zone *mz;
1952         enum lru_list l;
1953         int zone, tmp = node;
1954         /*
1955          * This routine is called against possible nodes.
1956          * But it's BUG to call kmalloc() against offline node.
1957          *
1958          * TODO: this routine can waste much memory for nodes which will
1959          *       never be onlined. It's better to use memory hotplug callback
1960          *       function.
1961          */
1962         if (!node_state(node, N_NORMAL_MEMORY))
1963                 tmp = -1;
1964         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1965         if (!pn)
1966                 return 1;
1967
1968         mem->info.nodeinfo[node] = pn;
1969         memset(pn, 0, sizeof(*pn));
1970
1971         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1972                 mz = &pn->zoneinfo[zone];
1973                 for_each_lru(l)
1974                         INIT_LIST_HEAD(&mz->lists[l]);
1975         }
1976         return 0;
1977 }
1978
1979 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1980 {
1981         kfree(mem->info.nodeinfo[node]);
1982 }
1983
1984 static int mem_cgroup_size(void)
1985 {
1986         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1987         return sizeof(struct mem_cgroup) + cpustat_size;
1988 }
1989
1990 static struct mem_cgroup *mem_cgroup_alloc(void)
1991 {
1992         struct mem_cgroup *mem;
1993         int size = mem_cgroup_size();
1994
1995         if (size < PAGE_SIZE)
1996                 mem = kmalloc(size, GFP_KERNEL);
1997         else
1998                 mem = vmalloc(size);
1999
2000         if (mem)
2001                 memset(mem, 0, size);
2002         return mem;
2003 }
2004
2005 /*
2006  * At destroying mem_cgroup, references from swap_cgroup can remain.
2007  * (scanning all at force_empty is too costly...)
2008  *
2009  * Instead of clearing all references at force_empty, we remember
2010  * the number of reference from swap_cgroup and free mem_cgroup when
2011  * it goes down to 0.
2012  *
2013  * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
2014  * entry which points to this memcg will be ignore at swapin.
2015  *
2016  * Removal of cgroup itself succeeds regardless of refs from swap.
2017  */
2018
2019 static void mem_cgroup_free(struct mem_cgroup *mem)
2020 {
2021         int node;
2022
2023         if (atomic_read(&mem->refcnt) > 0)
2024                 return;
2025
2026
2027         for_each_node_state(node, N_POSSIBLE)
2028                 free_mem_cgroup_per_zone_info(mem, node);
2029
2030         if (mem_cgroup_size() < PAGE_SIZE)
2031                 kfree(mem);
2032         else
2033                 vfree(mem);
2034 }
2035
2036 static void mem_cgroup_get(struct mem_cgroup *mem)
2037 {
2038         atomic_inc(&mem->refcnt);
2039 }
2040
2041 static void mem_cgroup_put(struct mem_cgroup *mem)
2042 {
2043         if (atomic_dec_and_test(&mem->refcnt)) {
2044                 if (!mem->obsolete)
2045                         return;
2046                 mem_cgroup_free(mem);
2047         }
2048 }
2049
2050
2051 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2052 static void __init enable_swap_cgroup(void)
2053 {
2054         if (!mem_cgroup_disabled() && really_do_swap_account)
2055                 do_swap_account = 1;
2056 }
2057 #else
2058 static void __init enable_swap_cgroup(void)
2059 {
2060 }
2061 #endif
2062
2063 static struct cgroup_subsys_state *
2064 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2065 {
2066         struct mem_cgroup *mem, *parent;
2067         int node;
2068
2069         mem = mem_cgroup_alloc();
2070         if (!mem)
2071                 return ERR_PTR(-ENOMEM);
2072
2073         for_each_node_state(node, N_POSSIBLE)
2074                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2075                         goto free_out;
2076         /* root ? */
2077         if (cont->parent == NULL) {
2078                 enable_swap_cgroup();
2079                 parent = NULL;
2080         } else {
2081                 parent = mem_cgroup_from_cont(cont->parent);
2082                 mem->use_hierarchy = parent->use_hierarchy;
2083         }
2084
2085         if (parent && parent->use_hierarchy) {
2086                 res_counter_init(&mem->res, &parent->res);
2087                 res_counter_init(&mem->memsw, &parent->memsw);
2088         } else {
2089                 res_counter_init(&mem->res, NULL);
2090                 res_counter_init(&mem->memsw, NULL);
2091         }
2092         mem_cgroup_set_inactive_ratio(mem);
2093         mem->last_scanned_child = NULL;
2094         spin_lock_init(&mem->reclaim_param_lock);
2095
2096         return &mem->css;
2097 free_out:
2098         for_each_node_state(node, N_POSSIBLE)
2099                 free_mem_cgroup_per_zone_info(mem, node);
2100         mem_cgroup_free(mem);
2101         return ERR_PTR(-ENOMEM);
2102 }
2103
2104 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2105                                         struct cgroup *cont)
2106 {
2107         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2108         mem->obsolete = 1;
2109         mem_cgroup_force_empty(mem, false);
2110 }
2111
2112 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2113                                 struct cgroup *cont)
2114 {
2115         mem_cgroup_free(mem_cgroup_from_cont(cont));
2116 }
2117
2118 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2119                                 struct cgroup *cont)
2120 {
2121         int ret;
2122
2123         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2124                                 ARRAY_SIZE(mem_cgroup_files));
2125
2126         if (!ret)
2127                 ret = register_memsw_files(cont, ss);
2128         return ret;
2129 }
2130
2131 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2132                                 struct cgroup *cont,
2133                                 struct cgroup *old_cont,
2134                                 struct task_struct *p)
2135 {
2136         /*
2137          * FIXME: It's better to move charges of this process from old
2138          * memcg to new memcg. But it's just on TODO-List now.
2139          */
2140 }
2141
2142 struct cgroup_subsys mem_cgroup_subsys = {
2143         .name = "memory",
2144         .subsys_id = mem_cgroup_subsys_id,
2145         .create = mem_cgroup_create,
2146         .pre_destroy = mem_cgroup_pre_destroy,
2147         .destroy = mem_cgroup_destroy,
2148         .populate = mem_cgroup_populate,
2149         .attach = mem_cgroup_move_task,
2150         .early_init = 0,
2151 };
2152
2153 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2154
2155 static int __init disable_swap_account(char *s)
2156 {
2157         really_do_swap_account = 0;
2158         return 1;
2159 }
2160 __setup("noswapaccount", disable_swap_account);
2161 #endif