memcg: add null check to page_cgroup_zoneinfo()
[safe/jmp/linux-2.6] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54
55 /*
56  * Statistics for memory cgroup.
57  */
58 enum mem_cgroup_stat_index {
59         /*
60          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61          */
62         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
63         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
64         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
65         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
66
67         MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71         s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75         struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79  * For accounting under irq disable, no need for increment preempt count.
80  */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82                 enum mem_cgroup_stat_index idx, int val)
83 {
84         stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88                 enum mem_cgroup_stat_index idx)
89 {
90         int cpu;
91         s64 ret = 0;
92         for_each_possible_cpu(cpu)
93                 ret += stat->cpustat[cpu].count[idx];
94         return ret;
95 }
96
97 /*
98  * per-zone information in memory controller.
99  */
100 struct mem_cgroup_per_zone {
101         /*
102          * spin_lock to protect the per cgroup LRU
103          */
104         struct list_head        lists[NR_LRU_LISTS];
105         unsigned long           count[NR_LRU_LISTS];
106 };
107 /* Macro for accessing counter */
108 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
109
110 struct mem_cgroup_per_node {
111         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112 };
113
114 struct mem_cgroup_lru_info {
115         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
116 };
117
118 /*
119  * The memory controller data structure. The memory controller controls both
120  * page cache and RSS per cgroup. We would eventually like to provide
121  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
122  * to help the administrator determine what knobs to tune.
123  *
124  * TODO: Add a water mark for the memory controller. Reclaim will begin when
125  * we hit the water mark. May be even add a low water mark, such that
126  * no reclaim occurs from a cgroup at it's low water mark, this is
127  * a feature that will be implemented much later in the future.
128  */
129 struct mem_cgroup {
130         struct cgroup_subsys_state css;
131         /*
132          * the counter to account for memory usage
133          */
134         struct res_counter res;
135         /*
136          * the counter to account for mem+swap usage.
137          */
138         struct res_counter memsw;
139         /*
140          * Per cgroup active and inactive list, similar to the
141          * per zone LRU lists.
142          */
143         struct mem_cgroup_lru_info info;
144
145         int     prev_priority;  /* for recording reclaim priority */
146
147         /*
148          * While reclaiming in a hiearchy, we cache the last child we
149          * reclaimed from. Protected by cgroup_lock()
150          */
151         struct mem_cgroup *last_scanned_child;
152         /*
153          * Should the accounting and control be hierarchical, per subtree?
154          */
155         bool use_hierarchy;
156         unsigned long   last_oom_jiffies;
157         int             obsolete;
158         atomic_t        refcnt;
159         /*
160          * statistics. This must be placed at the end of memcg.
161          */
162         struct mem_cgroup_stat stat;
163 };
164
165 enum charge_type {
166         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
167         MEM_CGROUP_CHARGE_TYPE_MAPPED,
168         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
169         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
170         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
171         NR_CHARGE_TYPE,
172 };
173
174 /* only for here (for easy reading.) */
175 #define PCGF_CACHE      (1UL << PCG_CACHE)
176 #define PCGF_USED       (1UL << PCG_USED)
177 #define PCGF_LOCK       (1UL << PCG_LOCK)
178 static const unsigned long
179 pcg_default_flags[NR_CHARGE_TYPE] = {
180         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
181         PCGF_USED | PCGF_LOCK, /* Anon */
182         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
183         0, /* FORCE */
184 };
185
186
187 /* for encoding cft->private value on file */
188 #define _MEM                    (0)
189 #define _MEMSWAP                (1)
190 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
191 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
192 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
193
194 static void mem_cgroup_get(struct mem_cgroup *mem);
195 static void mem_cgroup_put(struct mem_cgroup *mem);
196
197 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
198                                          struct page_cgroup *pc,
199                                          bool charge)
200 {
201         int val = (charge)? 1 : -1;
202         struct mem_cgroup_stat *stat = &mem->stat;
203         struct mem_cgroup_stat_cpu *cpustat;
204         int cpu = get_cpu();
205
206         cpustat = &stat->cpustat[cpu];
207         if (PageCgroupCache(pc))
208                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
209         else
210                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
211
212         if (charge)
213                 __mem_cgroup_stat_add_safe(cpustat,
214                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
215         else
216                 __mem_cgroup_stat_add_safe(cpustat,
217                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
218         put_cpu();
219 }
220
221 static struct mem_cgroup_per_zone *
222 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
223 {
224         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
225 }
226
227 static struct mem_cgroup_per_zone *
228 page_cgroup_zoneinfo(struct page_cgroup *pc)
229 {
230         struct mem_cgroup *mem = pc->mem_cgroup;
231         int nid = page_cgroup_nid(pc);
232         int zid = page_cgroup_zid(pc);
233
234         if (!mem)
235                 return NULL;
236
237         return mem_cgroup_zoneinfo(mem, nid, zid);
238 }
239
240 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
241                                         enum lru_list idx)
242 {
243         int nid, zid;
244         struct mem_cgroup_per_zone *mz;
245         u64 total = 0;
246
247         for_each_online_node(nid)
248                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
249                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
250                         total += MEM_CGROUP_ZSTAT(mz, idx);
251                 }
252         return total;
253 }
254
255 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
256 {
257         return container_of(cgroup_subsys_state(cont,
258                                 mem_cgroup_subsys_id), struct mem_cgroup,
259                                 css);
260 }
261
262 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
263 {
264         /*
265          * mm_update_next_owner() may clear mm->owner to NULL
266          * if it races with swapoff, page migration, etc.
267          * So this can be called with p == NULL.
268          */
269         if (unlikely(!p))
270                 return NULL;
271
272         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
273                                 struct mem_cgroup, css);
274 }
275
276 /*
277  * Following LRU functions are allowed to be used without PCG_LOCK.
278  * Operations are called by routine of global LRU independently from memcg.
279  * What we have to take care of here is validness of pc->mem_cgroup.
280  *
281  * Changes to pc->mem_cgroup happens when
282  * 1. charge
283  * 2. moving account
284  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
285  * It is added to LRU before charge.
286  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
287  * When moving account, the page is not on LRU. It's isolated.
288  */
289
290 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
291 {
292         struct page_cgroup *pc;
293         struct mem_cgroup *mem;
294         struct mem_cgroup_per_zone *mz;
295
296         if (mem_cgroup_disabled())
297                 return;
298         pc = lookup_page_cgroup(page);
299         /* can happen while we handle swapcache. */
300         if (list_empty(&pc->lru))
301                 return;
302         mz = page_cgroup_zoneinfo(pc);
303         mem = pc->mem_cgroup;
304         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
305         list_del_init(&pc->lru);
306         return;
307 }
308
309 void mem_cgroup_del_lru(struct page *page)
310 {
311         mem_cgroup_del_lru_list(page, page_lru(page));
312 }
313
314 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
315 {
316         struct mem_cgroup_per_zone *mz;
317         struct page_cgroup *pc;
318
319         if (mem_cgroup_disabled())
320                 return;
321
322         pc = lookup_page_cgroup(page);
323         smp_rmb();
324         /* unused page is not rotated. */
325         if (!PageCgroupUsed(pc))
326                 return;
327         mz = page_cgroup_zoneinfo(pc);
328         list_move(&pc->lru, &mz->lists[lru]);
329 }
330
331 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
332 {
333         struct page_cgroup *pc;
334         struct mem_cgroup_per_zone *mz;
335
336         if (mem_cgroup_disabled())
337                 return;
338         pc = lookup_page_cgroup(page);
339         /* barrier to sync with "charge" */
340         smp_rmb();
341         if (!PageCgroupUsed(pc))
342                 return;
343
344         mz = page_cgroup_zoneinfo(pc);
345         MEM_CGROUP_ZSTAT(mz, lru) += 1;
346         list_add(&pc->lru, &mz->lists[lru]);
347 }
348 /*
349  * To add swapcache into LRU. Be careful to all this function.
350  * zone->lru_lock shouldn't be held and irq must not be disabled.
351  */
352 static void mem_cgroup_lru_fixup(struct page *page)
353 {
354         if (!isolate_lru_page(page))
355                 putback_lru_page(page);
356 }
357
358 void mem_cgroup_move_lists(struct page *page,
359                            enum lru_list from, enum lru_list to)
360 {
361         if (mem_cgroup_disabled())
362                 return;
363         mem_cgroup_del_lru_list(page, from);
364         mem_cgroup_add_lru_list(page, to);
365 }
366
367 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
368 {
369         int ret;
370
371         task_lock(task);
372         ret = task->mm && mm_match_cgroup(task->mm, mem);
373         task_unlock(task);
374         return ret;
375 }
376
377 /*
378  * Calculate mapped_ratio under memory controller. This will be used in
379  * vmscan.c for deteremining we have to reclaim mapped pages.
380  */
381 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
382 {
383         long total, rss;
384
385         /*
386          * usage is recorded in bytes. But, here, we assume the number of
387          * physical pages can be represented by "long" on any arch.
388          */
389         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
390         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
391         return (int)((rss * 100L) / total);
392 }
393
394 /*
395  * prev_priority control...this will be used in memory reclaim path.
396  */
397 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
398 {
399         return mem->prev_priority;
400 }
401
402 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
403 {
404         if (priority < mem->prev_priority)
405                 mem->prev_priority = priority;
406 }
407
408 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
409 {
410         mem->prev_priority = priority;
411 }
412
413 /*
414  * Calculate # of pages to be scanned in this priority/zone.
415  * See also vmscan.c
416  *
417  * priority starts from "DEF_PRIORITY" and decremented in each loop.
418  * (see include/linux/mmzone.h)
419  */
420
421 long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
422                                         int priority, enum lru_list lru)
423 {
424         long nr_pages;
425         int nid = zone->zone_pgdat->node_id;
426         int zid = zone_idx(zone);
427         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
428
429         nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
430
431         return (nr_pages >> priority);
432 }
433
434 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
435                                         struct list_head *dst,
436                                         unsigned long *scanned, int order,
437                                         int mode, struct zone *z,
438                                         struct mem_cgroup *mem_cont,
439                                         int active, int file)
440 {
441         unsigned long nr_taken = 0;
442         struct page *page;
443         unsigned long scan;
444         LIST_HEAD(pc_list);
445         struct list_head *src;
446         struct page_cgroup *pc, *tmp;
447         int nid = z->zone_pgdat->node_id;
448         int zid = zone_idx(z);
449         struct mem_cgroup_per_zone *mz;
450         int lru = LRU_FILE * !!file + !!active;
451
452         BUG_ON(!mem_cont);
453         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
454         src = &mz->lists[lru];
455
456         scan = 0;
457         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
458                 if (scan >= nr_to_scan)
459                         break;
460
461                 page = pc->page;
462                 if (unlikely(!PageCgroupUsed(pc)))
463                         continue;
464                 if (unlikely(!PageLRU(page)))
465                         continue;
466
467                 scan++;
468                 if (__isolate_lru_page(page, mode, file) == 0) {
469                         list_move(&page->lru, dst);
470                         nr_taken++;
471                 }
472         }
473
474         *scanned = scan;
475         return nr_taken;
476 }
477
478 #define mem_cgroup_from_res_counter(counter, member)    \
479         container_of(counter, struct mem_cgroup, member)
480
481 /*
482  * This routine finds the DFS walk successor. This routine should be
483  * called with cgroup_mutex held
484  */
485 static struct mem_cgroup *
486 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
487 {
488         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
489
490         curr_cgroup = curr->css.cgroup;
491         root_cgroup = root_mem->css.cgroup;
492
493         if (!list_empty(&curr_cgroup->children)) {
494                 /*
495                  * Walk down to children
496                  */
497                 mem_cgroup_put(curr);
498                 cgroup = list_entry(curr_cgroup->children.next,
499                                                 struct cgroup, sibling);
500                 curr = mem_cgroup_from_cont(cgroup);
501                 mem_cgroup_get(curr);
502                 goto done;
503         }
504
505 visit_parent:
506         if (curr_cgroup == root_cgroup) {
507                 mem_cgroup_put(curr);
508                 curr = root_mem;
509                 mem_cgroup_get(curr);
510                 goto done;
511         }
512
513         /*
514          * Goto next sibling
515          */
516         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
517                 mem_cgroup_put(curr);
518                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
519                                                 sibling);
520                 curr = mem_cgroup_from_cont(cgroup);
521                 mem_cgroup_get(curr);
522                 goto done;
523         }
524
525         /*
526          * Go up to next parent and next parent's sibling if need be
527          */
528         curr_cgroup = curr_cgroup->parent;
529         goto visit_parent;
530
531 done:
532         root_mem->last_scanned_child = curr;
533         return curr;
534 }
535
536 /*
537  * Visit the first child (need not be the first child as per the ordering
538  * of the cgroup list, since we track last_scanned_child) of @mem and use
539  * that to reclaim free pages from.
540  */
541 static struct mem_cgroup *
542 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
543 {
544         struct cgroup *cgroup;
545         struct mem_cgroup *ret;
546         bool obsolete = (root_mem->last_scanned_child &&
547                                 root_mem->last_scanned_child->obsolete);
548
549         /*
550          * Scan all children under the mem_cgroup mem
551          */
552         cgroup_lock();
553         if (list_empty(&root_mem->css.cgroup->children)) {
554                 ret = root_mem;
555                 goto done;
556         }
557
558         if (!root_mem->last_scanned_child || obsolete) {
559
560                 if (obsolete)
561                         mem_cgroup_put(root_mem->last_scanned_child);
562
563                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
564                                 struct cgroup, sibling);
565                 ret = mem_cgroup_from_cont(cgroup);
566                 mem_cgroup_get(ret);
567         } else
568                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
569                                                 root_mem);
570
571 done:
572         root_mem->last_scanned_child = ret;
573         cgroup_unlock();
574         return ret;
575 }
576
577 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
578 {
579         if (do_swap_account) {
580                 if (res_counter_check_under_limit(&mem->res) &&
581                         res_counter_check_under_limit(&mem->memsw))
582                         return true;
583         } else
584                 if (res_counter_check_under_limit(&mem->res))
585                         return true;
586         return false;
587 }
588
589 /*
590  * Dance down the hierarchy if needed to reclaim memory. We remember the
591  * last child we reclaimed from, so that we don't end up penalizing
592  * one child extensively based on its position in the children list.
593  *
594  * root_mem is the original ancestor that we've been reclaim from.
595  */
596 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
597                                                 gfp_t gfp_mask, bool noswap)
598 {
599         struct mem_cgroup *next_mem;
600         int ret = 0;
601
602         /*
603          * Reclaim unconditionally and don't check for return value.
604          * We need to reclaim in the current group and down the tree.
605          * One might think about checking for children before reclaiming,
606          * but there might be left over accounting, even after children
607          * have left.
608          */
609         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
610         if (mem_cgroup_check_under_limit(root_mem))
611                 return 0;
612         if (!root_mem->use_hierarchy)
613                 return ret;
614
615         next_mem = mem_cgroup_get_first_node(root_mem);
616
617         while (next_mem != root_mem) {
618                 if (next_mem->obsolete) {
619                         mem_cgroup_put(next_mem);
620                         cgroup_lock();
621                         next_mem = mem_cgroup_get_first_node(root_mem);
622                         cgroup_unlock();
623                         continue;
624                 }
625                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
626                 if (mem_cgroup_check_under_limit(root_mem))
627                         return 0;
628                 cgroup_lock();
629                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
630                 cgroup_unlock();
631         }
632         return ret;
633 }
634
635 bool mem_cgroup_oom_called(struct task_struct *task)
636 {
637         bool ret = false;
638         struct mem_cgroup *mem;
639         struct mm_struct *mm;
640
641         rcu_read_lock();
642         mm = task->mm;
643         if (!mm)
644                 mm = &init_mm;
645         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
646         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
647                 ret = true;
648         rcu_read_unlock();
649         return ret;
650 }
651 /*
652  * Unlike exported interface, "oom" parameter is added. if oom==true,
653  * oom-killer can be invoked.
654  */
655 static int __mem_cgroup_try_charge(struct mm_struct *mm,
656                         gfp_t gfp_mask, struct mem_cgroup **memcg,
657                         bool oom)
658 {
659         struct mem_cgroup *mem, *mem_over_limit;
660         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
661         struct res_counter *fail_res;
662
663         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
664                 /* Don't account this! */
665                 *memcg = NULL;
666                 return 0;
667         }
668
669         /*
670          * We always charge the cgroup the mm_struct belongs to.
671          * The mm_struct's mem_cgroup changes on task migration if the
672          * thread group leader migrates. It's possible that mm is not
673          * set, if so charge the init_mm (happens for pagecache usage).
674          */
675         if (likely(!*memcg)) {
676                 rcu_read_lock();
677                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
678                 if (unlikely(!mem)) {
679                         rcu_read_unlock();
680                         return 0;
681                 }
682                 /*
683                  * For every charge from the cgroup, increment reference count
684                  */
685                 css_get(&mem->css);
686                 *memcg = mem;
687                 rcu_read_unlock();
688         } else {
689                 mem = *memcg;
690                 css_get(&mem->css);
691         }
692
693         while (1) {
694                 int ret;
695                 bool noswap = false;
696
697                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
698                 if (likely(!ret)) {
699                         if (!do_swap_account)
700                                 break;
701                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
702                                                         &fail_res);
703                         if (likely(!ret))
704                                 break;
705                         /* mem+swap counter fails */
706                         res_counter_uncharge(&mem->res, PAGE_SIZE);
707                         noswap = true;
708                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
709                                                                         memsw);
710                 } else
711                         /* mem counter fails */
712                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
713                                                                         res);
714
715                 if (!(gfp_mask & __GFP_WAIT))
716                         goto nomem;
717
718                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
719                                                         noswap);
720
721                 /*
722                  * try_to_free_mem_cgroup_pages() might not give us a full
723                  * picture of reclaim. Some pages are reclaimed and might be
724                  * moved to swap cache or just unmapped from the cgroup.
725                  * Check the limit again to see if the reclaim reduced the
726                  * current usage of the cgroup before giving up
727                  *
728                  */
729                 if (mem_cgroup_check_under_limit(mem_over_limit))
730                         continue;
731
732                 if (!nr_retries--) {
733                         if (oom) {
734                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
735                                 mem_over_limit->last_oom_jiffies = jiffies;
736                         }
737                         goto nomem;
738                 }
739         }
740         return 0;
741 nomem:
742         css_put(&mem->css);
743         return -ENOMEM;
744 }
745
746 /**
747  * mem_cgroup_try_charge - get charge of PAGE_SIZE.
748  * @mm: an mm_struct which is charged against. (when *memcg is NULL)
749  * @gfp_mask: gfp_mask for reclaim.
750  * @memcg: a pointer to memory cgroup which is charged against.
751  *
752  * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
753  * memory cgroup from @mm is got and stored in *memcg.
754  *
755  * Returns 0 if success. -ENOMEM at failure.
756  * This call can invoke OOM-Killer.
757  */
758
759 int mem_cgroup_try_charge(struct mm_struct *mm,
760                           gfp_t mask, struct mem_cgroup **memcg)
761 {
762         return __mem_cgroup_try_charge(mm, mask, memcg, true);
763 }
764
765 /*
766  * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
767  * USED state. If already USED, uncharge and return.
768  */
769
770 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
771                                      struct page_cgroup *pc,
772                                      enum charge_type ctype)
773 {
774         /* try_charge() can return NULL to *memcg, taking care of it. */
775         if (!mem)
776                 return;
777
778         lock_page_cgroup(pc);
779         if (unlikely(PageCgroupUsed(pc))) {
780                 unlock_page_cgroup(pc);
781                 res_counter_uncharge(&mem->res, PAGE_SIZE);
782                 if (do_swap_account)
783                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
784                 css_put(&mem->css);
785                 return;
786         }
787         pc->mem_cgroup = mem;
788         smp_wmb();
789         pc->flags = pcg_default_flags[ctype];
790
791         mem_cgroup_charge_statistics(mem, pc, true);
792
793         unlock_page_cgroup(pc);
794 }
795
796 /**
797  * mem_cgroup_move_account - move account of the page
798  * @pc: page_cgroup of the page.
799  * @from: mem_cgroup which the page is moved from.
800  * @to: mem_cgroup which the page is moved to. @from != @to.
801  *
802  * The caller must confirm following.
803  * - page is not on LRU (isolate_page() is useful.)
804  *
805  * returns 0 at success,
806  * returns -EBUSY when lock is busy or "pc" is unstable.
807  *
808  * This function does "uncharge" from old cgroup but doesn't do "charge" to
809  * new cgroup. It should be done by a caller.
810  */
811
812 static int mem_cgroup_move_account(struct page_cgroup *pc,
813         struct mem_cgroup *from, struct mem_cgroup *to)
814 {
815         struct mem_cgroup_per_zone *from_mz, *to_mz;
816         int nid, zid;
817         int ret = -EBUSY;
818
819         VM_BUG_ON(from == to);
820         VM_BUG_ON(PageLRU(pc->page));
821
822         nid = page_cgroup_nid(pc);
823         zid = page_cgroup_zid(pc);
824         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
825         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
826
827         if (!trylock_page_cgroup(pc))
828                 return ret;
829
830         if (!PageCgroupUsed(pc))
831                 goto out;
832
833         if (pc->mem_cgroup != from)
834                 goto out;
835
836         css_put(&from->css);
837         res_counter_uncharge(&from->res, PAGE_SIZE);
838         mem_cgroup_charge_statistics(from, pc, false);
839         if (do_swap_account)
840                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
841         pc->mem_cgroup = to;
842         mem_cgroup_charge_statistics(to, pc, true);
843         css_get(&to->css);
844         ret = 0;
845 out:
846         unlock_page_cgroup(pc);
847         return ret;
848 }
849
850 /*
851  * move charges to its parent.
852  */
853
854 static int mem_cgroup_move_parent(struct page_cgroup *pc,
855                                   struct mem_cgroup *child,
856                                   gfp_t gfp_mask)
857 {
858         struct page *page = pc->page;
859         struct cgroup *cg = child->css.cgroup;
860         struct cgroup *pcg = cg->parent;
861         struct mem_cgroup *parent;
862         int ret;
863
864         /* Is ROOT ? */
865         if (!pcg)
866                 return -EINVAL;
867
868
869         parent = mem_cgroup_from_cont(pcg);
870
871
872         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
873         if (ret || !parent)
874                 return ret;
875
876         if (!get_page_unless_zero(page))
877                 return -EBUSY;
878
879         ret = isolate_lru_page(page);
880
881         if (ret)
882                 goto cancel;
883
884         ret = mem_cgroup_move_account(pc, child, parent);
885
886         /* drop extra refcnt by try_charge() (move_account increment one) */
887         css_put(&parent->css);
888         putback_lru_page(page);
889         if (!ret) {
890                 put_page(page);
891                 return 0;
892         }
893         /* uncharge if move fails */
894 cancel:
895         res_counter_uncharge(&parent->res, PAGE_SIZE);
896         if (do_swap_account)
897                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
898         put_page(page);
899         return ret;
900 }
901
902 /*
903  * Charge the memory controller for page usage.
904  * Return
905  * 0 if the charge was successful
906  * < 0 if the cgroup is over its limit
907  */
908 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
909                                 gfp_t gfp_mask, enum charge_type ctype,
910                                 struct mem_cgroup *memcg)
911 {
912         struct mem_cgroup *mem;
913         struct page_cgroup *pc;
914         int ret;
915
916         pc = lookup_page_cgroup(page);
917         /* can happen at boot */
918         if (unlikely(!pc))
919                 return 0;
920         prefetchw(pc);
921
922         mem = memcg;
923         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
924         if (ret || !mem)
925                 return ret;
926
927         __mem_cgroup_commit_charge(mem, pc, ctype);
928         return 0;
929 }
930
931 int mem_cgroup_newpage_charge(struct page *page,
932                               struct mm_struct *mm, gfp_t gfp_mask)
933 {
934         if (mem_cgroup_disabled())
935                 return 0;
936         if (PageCompound(page))
937                 return 0;
938         /*
939          * If already mapped, we don't have to account.
940          * If page cache, page->mapping has address_space.
941          * But page->mapping may have out-of-use anon_vma pointer,
942          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
943          * is NULL.
944          */
945         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
946                 return 0;
947         if (unlikely(!mm))
948                 mm = &init_mm;
949         return mem_cgroup_charge_common(page, mm, gfp_mask,
950                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
951 }
952
953 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
954                                 gfp_t gfp_mask)
955 {
956         if (mem_cgroup_disabled())
957                 return 0;
958         if (PageCompound(page))
959                 return 0;
960         /*
961          * Corner case handling. This is called from add_to_page_cache()
962          * in usual. But some FS (shmem) precharges this page before calling it
963          * and call add_to_page_cache() with GFP_NOWAIT.
964          *
965          * For GFP_NOWAIT case, the page may be pre-charged before calling
966          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
967          * charge twice. (It works but has to pay a bit larger cost.)
968          */
969         if (!(gfp_mask & __GFP_WAIT)) {
970                 struct page_cgroup *pc;
971
972
973                 pc = lookup_page_cgroup(page);
974                 if (!pc)
975                         return 0;
976                 lock_page_cgroup(pc);
977                 if (PageCgroupUsed(pc)) {
978                         unlock_page_cgroup(pc);
979                         return 0;
980                 }
981                 unlock_page_cgroup(pc);
982         }
983
984         if (unlikely(!mm))
985                 mm = &init_mm;
986
987         if (page_is_file_cache(page))
988                 return mem_cgroup_charge_common(page, mm, gfp_mask,
989                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
990         else
991                 return mem_cgroup_charge_common(page, mm, gfp_mask,
992                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
993 }
994
995 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
996                                  struct page *page,
997                                  gfp_t mask, struct mem_cgroup **ptr)
998 {
999         struct mem_cgroup *mem;
1000         swp_entry_t     ent;
1001
1002         if (mem_cgroup_disabled())
1003                 return 0;
1004
1005         if (!do_swap_account)
1006                 goto charge_cur_mm;
1007
1008         /*
1009          * A racing thread's fault, or swapoff, may have already updated
1010          * the pte, and even removed page from swap cache: return success
1011          * to go on to do_swap_page()'s pte_same() test, which should fail.
1012          */
1013         if (!PageSwapCache(page))
1014                 return 0;
1015
1016         ent.val = page_private(page);
1017
1018         mem = lookup_swap_cgroup(ent);
1019         if (!mem || mem->obsolete)
1020                 goto charge_cur_mm;
1021         *ptr = mem;
1022         return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1023 charge_cur_mm:
1024         if (unlikely(!mm))
1025                 mm = &init_mm;
1026         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1027 }
1028
1029 #ifdef CONFIG_SWAP
1030
1031 int mem_cgroup_cache_charge_swapin(struct page *page,
1032                         struct mm_struct *mm, gfp_t mask, bool locked)
1033 {
1034         int ret = 0;
1035
1036         if (mem_cgroup_disabled())
1037                 return 0;
1038         if (unlikely(!mm))
1039                 mm = &init_mm;
1040         if (!locked)
1041                 lock_page(page);
1042         /*
1043          * If not locked, the page can be dropped from SwapCache until
1044          * we reach here.
1045          */
1046         if (PageSwapCache(page)) {
1047                 struct mem_cgroup *mem = NULL;
1048                 swp_entry_t ent;
1049
1050                 ent.val = page_private(page);
1051                 if (do_swap_account) {
1052                         mem = lookup_swap_cgroup(ent);
1053                         if (mem && mem->obsolete)
1054                                 mem = NULL;
1055                         if (mem)
1056                                 mm = NULL;
1057                 }
1058                 ret = mem_cgroup_charge_common(page, mm, mask,
1059                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1060
1061                 if (!ret && do_swap_account) {
1062                         /* avoid double counting */
1063                         mem = swap_cgroup_record(ent, NULL);
1064                         if (mem) {
1065                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1066                                 mem_cgroup_put(mem);
1067                         }
1068                 }
1069         }
1070         if (!locked)
1071                 unlock_page(page);
1072         /* add this page(page_cgroup) to the LRU we want. */
1073         mem_cgroup_lru_fixup(page);
1074
1075         return ret;
1076 }
1077 #endif
1078
1079 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1080 {
1081         struct page_cgroup *pc;
1082
1083         if (mem_cgroup_disabled())
1084                 return;
1085         if (!ptr)
1086                 return;
1087         pc = lookup_page_cgroup(page);
1088         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1089         /*
1090          * Now swap is on-memory. This means this page may be
1091          * counted both as mem and swap....double count.
1092          * Fix it by uncharging from memsw. This SwapCache is stable
1093          * because we're still under lock_page().
1094          */
1095         if (do_swap_account) {
1096                 swp_entry_t ent = {.val = page_private(page)};
1097                 struct mem_cgroup *memcg;
1098                 memcg = swap_cgroup_record(ent, NULL);
1099                 if (memcg) {
1100                         /* If memcg is obsolete, memcg can be != ptr */
1101                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1102                         mem_cgroup_put(memcg);
1103                 }
1104
1105         }
1106         /* add this page(page_cgroup) to the LRU we want. */
1107         mem_cgroup_lru_fixup(page);
1108 }
1109
1110 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1111 {
1112         if (mem_cgroup_disabled())
1113                 return;
1114         if (!mem)
1115                 return;
1116         res_counter_uncharge(&mem->res, PAGE_SIZE);
1117         if (do_swap_account)
1118                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1119         css_put(&mem->css);
1120 }
1121
1122
1123 /*
1124  * uncharge if !page_mapped(page)
1125  */
1126 static struct mem_cgroup *
1127 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1128 {
1129         struct page_cgroup *pc;
1130         struct mem_cgroup *mem = NULL;
1131         struct mem_cgroup_per_zone *mz;
1132
1133         if (mem_cgroup_disabled())
1134                 return NULL;
1135
1136         if (PageSwapCache(page))
1137                 return NULL;
1138
1139         /*
1140          * Check if our page_cgroup is valid
1141          */
1142         pc = lookup_page_cgroup(page);
1143         if (unlikely(!pc || !PageCgroupUsed(pc)))
1144                 return NULL;
1145
1146         lock_page_cgroup(pc);
1147
1148         mem = pc->mem_cgroup;
1149
1150         if (!PageCgroupUsed(pc))
1151                 goto unlock_out;
1152
1153         switch (ctype) {
1154         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1155                 if (page_mapped(page))
1156                         goto unlock_out;
1157                 break;
1158         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1159                 if (!PageAnon(page)) {  /* Shared memory */
1160                         if (page->mapping && !page_is_file_cache(page))
1161                                 goto unlock_out;
1162                 } else if (page_mapped(page)) /* Anon */
1163                                 goto unlock_out;
1164                 break;
1165         default:
1166                 break;
1167         }
1168
1169         res_counter_uncharge(&mem->res, PAGE_SIZE);
1170         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1171                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1172
1173         mem_cgroup_charge_statistics(mem, pc, false);
1174         ClearPageCgroupUsed(pc);
1175
1176         mz = page_cgroup_zoneinfo(pc);
1177         unlock_page_cgroup(pc);
1178
1179         /* at swapout, this memcg will be accessed to record to swap */
1180         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1181                 css_put(&mem->css);
1182
1183         return mem;
1184
1185 unlock_out:
1186         unlock_page_cgroup(pc);
1187         return NULL;
1188 }
1189
1190 void mem_cgroup_uncharge_page(struct page *page)
1191 {
1192         /* early check. */
1193         if (page_mapped(page))
1194                 return;
1195         if (page->mapping && !PageAnon(page))
1196                 return;
1197         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1198 }
1199
1200 void mem_cgroup_uncharge_cache_page(struct page *page)
1201 {
1202         VM_BUG_ON(page_mapped(page));
1203         VM_BUG_ON(page->mapping);
1204         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1205 }
1206
1207 /*
1208  * called from __delete_from_swap_cache() and drop "page" account.
1209  * memcg information is recorded to swap_cgroup of "ent"
1210  */
1211 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1212 {
1213         struct mem_cgroup *memcg;
1214
1215         memcg = __mem_cgroup_uncharge_common(page,
1216                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1217         /* record memcg information */
1218         if (do_swap_account && memcg) {
1219                 swap_cgroup_record(ent, memcg);
1220                 mem_cgroup_get(memcg);
1221         }
1222         if (memcg)
1223                 css_put(&memcg->css);
1224 }
1225
1226 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1227 /*
1228  * called from swap_entry_free(). remove record in swap_cgroup and
1229  * uncharge "memsw" account.
1230  */
1231 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1232 {
1233         struct mem_cgroup *memcg;
1234
1235         if (!do_swap_account)
1236                 return;
1237
1238         memcg = swap_cgroup_record(ent, NULL);
1239         if (memcg) {
1240                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1241                 mem_cgroup_put(memcg);
1242         }
1243 }
1244 #endif
1245
1246 /*
1247  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1248  * page belongs to.
1249  */
1250 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1251 {
1252         struct page_cgroup *pc;
1253         struct mem_cgroup *mem = NULL;
1254         int ret = 0;
1255
1256         if (mem_cgroup_disabled())
1257                 return 0;
1258
1259         pc = lookup_page_cgroup(page);
1260         lock_page_cgroup(pc);
1261         if (PageCgroupUsed(pc)) {
1262                 mem = pc->mem_cgroup;
1263                 css_get(&mem->css);
1264         }
1265         unlock_page_cgroup(pc);
1266
1267         if (mem) {
1268                 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1269                 css_put(&mem->css);
1270         }
1271         *ptr = mem;
1272         return ret;
1273 }
1274
1275 /* remove redundant charge if migration failed*/
1276 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1277                 struct page *oldpage, struct page *newpage)
1278 {
1279         struct page *target, *unused;
1280         struct page_cgroup *pc;
1281         enum charge_type ctype;
1282
1283         if (!mem)
1284                 return;
1285
1286         /* at migration success, oldpage->mapping is NULL. */
1287         if (oldpage->mapping) {
1288                 target = oldpage;
1289                 unused = NULL;
1290         } else {
1291                 target = newpage;
1292                 unused = oldpage;
1293         }
1294
1295         if (PageAnon(target))
1296                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1297         else if (page_is_file_cache(target))
1298                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1299         else
1300                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1301
1302         /* unused page is not on radix-tree now. */
1303         if (unused)
1304                 __mem_cgroup_uncharge_common(unused, ctype);
1305
1306         pc = lookup_page_cgroup(target);
1307         /*
1308          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1309          * So, double-counting is effectively avoided.
1310          */
1311         __mem_cgroup_commit_charge(mem, pc, ctype);
1312
1313         /*
1314          * Both of oldpage and newpage are still under lock_page().
1315          * Then, we don't have to care about race in radix-tree.
1316          * But we have to be careful that this page is unmapped or not.
1317          *
1318          * There is a case for !page_mapped(). At the start of
1319          * migration, oldpage was mapped. But now, it's zapped.
1320          * But we know *target* page is not freed/reused under us.
1321          * mem_cgroup_uncharge_page() does all necessary checks.
1322          */
1323         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1324                 mem_cgroup_uncharge_page(target);
1325 }
1326
1327 /*
1328  * A call to try to shrink memory usage under specified resource controller.
1329  * This is typically used for page reclaiming for shmem for reducing side
1330  * effect of page allocation from shmem, which is used by some mem_cgroup.
1331  */
1332 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1333 {
1334         struct mem_cgroup *mem;
1335         int progress = 0;
1336         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1337
1338         if (mem_cgroup_disabled())
1339                 return 0;
1340         if (!mm)
1341                 return 0;
1342
1343         rcu_read_lock();
1344         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1345         if (unlikely(!mem)) {
1346                 rcu_read_unlock();
1347                 return 0;
1348         }
1349         css_get(&mem->css);
1350         rcu_read_unlock();
1351
1352         do {
1353                 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1354                 progress += mem_cgroup_check_under_limit(mem);
1355         } while (!progress && --retry);
1356
1357         css_put(&mem->css);
1358         if (!retry)
1359                 return -ENOMEM;
1360         return 0;
1361 }
1362
1363 static DEFINE_MUTEX(set_limit_mutex);
1364
1365 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1366                                 unsigned long long val)
1367 {
1368
1369         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1370         int progress;
1371         u64 memswlimit;
1372         int ret = 0;
1373
1374         while (retry_count) {
1375                 if (signal_pending(current)) {
1376                         ret = -EINTR;
1377                         break;
1378                 }
1379                 /*
1380                  * Rather than hide all in some function, I do this in
1381                  * open coded manner. You see what this really does.
1382                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1383                  */
1384                 mutex_lock(&set_limit_mutex);
1385                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1386                 if (memswlimit < val) {
1387                         ret = -EINVAL;
1388                         mutex_unlock(&set_limit_mutex);
1389                         break;
1390                 }
1391                 ret = res_counter_set_limit(&memcg->res, val);
1392                 mutex_unlock(&set_limit_mutex);
1393
1394                 if (!ret)
1395                         break;
1396
1397                 progress = try_to_free_mem_cgroup_pages(memcg,
1398                                 GFP_KERNEL, false);
1399                 if (!progress)                  retry_count--;
1400         }
1401         return ret;
1402 }
1403
1404 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1405                                 unsigned long long val)
1406 {
1407         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1408         u64 memlimit, oldusage, curusage;
1409         int ret;
1410
1411         if (!do_swap_account)
1412                 return -EINVAL;
1413
1414         while (retry_count) {
1415                 if (signal_pending(current)) {
1416                         ret = -EINTR;
1417                         break;
1418                 }
1419                 /*
1420                  * Rather than hide all in some function, I do this in
1421                  * open coded manner. You see what this really does.
1422                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1423                  */
1424                 mutex_lock(&set_limit_mutex);
1425                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1426                 if (memlimit > val) {
1427                         ret = -EINVAL;
1428                         mutex_unlock(&set_limit_mutex);
1429                         break;
1430                 }
1431                 ret = res_counter_set_limit(&memcg->memsw, val);
1432                 mutex_unlock(&set_limit_mutex);
1433
1434                 if (!ret)
1435                         break;
1436
1437                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1438                 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1439                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1440                 if (curusage >= oldusage)
1441                         retry_count--;
1442         }
1443         return ret;
1444 }
1445
1446 /*
1447  * This routine traverse page_cgroup in given list and drop them all.
1448  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1449  */
1450 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1451                                 int node, int zid, enum lru_list lru)
1452 {
1453         struct zone *zone;
1454         struct mem_cgroup_per_zone *mz;
1455         struct page_cgroup *pc, *busy;
1456         unsigned long flags, loop;
1457         struct list_head *list;
1458         int ret = 0;
1459
1460         zone = &NODE_DATA(node)->node_zones[zid];
1461         mz = mem_cgroup_zoneinfo(mem, node, zid);
1462         list = &mz->lists[lru];
1463
1464         loop = MEM_CGROUP_ZSTAT(mz, lru);
1465         /* give some margin against EBUSY etc...*/
1466         loop += 256;
1467         busy = NULL;
1468         while (loop--) {
1469                 ret = 0;
1470                 spin_lock_irqsave(&zone->lru_lock, flags);
1471                 if (list_empty(list)) {
1472                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1473                         break;
1474                 }
1475                 pc = list_entry(list->prev, struct page_cgroup, lru);
1476                 if (busy == pc) {
1477                         list_move(&pc->lru, list);
1478                         busy = 0;
1479                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1480                         continue;
1481                 }
1482                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1483
1484                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1485                 if (ret == -ENOMEM)
1486                         break;
1487
1488                 if (ret == -EBUSY || ret == -EINVAL) {
1489                         /* found lock contention or "pc" is obsolete. */
1490                         busy = pc;
1491                         cond_resched();
1492                 } else
1493                         busy = NULL;
1494         }
1495
1496         if (!ret && !list_empty(list))
1497                 return -EBUSY;
1498         return ret;
1499 }
1500
1501 /*
1502  * make mem_cgroup's charge to be 0 if there is no task.
1503  * This enables deleting this mem_cgroup.
1504  */
1505 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1506 {
1507         int ret;
1508         int node, zid, shrink;
1509         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1510         struct cgroup *cgrp = mem->css.cgroup;
1511
1512         css_get(&mem->css);
1513
1514         shrink = 0;
1515         /* should free all ? */
1516         if (free_all)
1517                 goto try_to_free;
1518 move_account:
1519         while (mem->res.usage > 0) {
1520                 ret = -EBUSY;
1521                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1522                         goto out;
1523                 ret = -EINTR;
1524                 if (signal_pending(current))
1525                         goto out;
1526                 /* This is for making all *used* pages to be on LRU. */
1527                 lru_add_drain_all();
1528                 ret = 0;
1529                 for_each_node_state(node, N_POSSIBLE) {
1530                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1531                                 enum lru_list l;
1532                                 for_each_lru(l) {
1533                                         ret = mem_cgroup_force_empty_list(mem,
1534                                                         node, zid, l);
1535                                         if (ret)
1536                                                 break;
1537                                 }
1538                         }
1539                         if (ret)
1540                                 break;
1541                 }
1542                 /* it seems parent cgroup doesn't have enough mem */
1543                 if (ret == -ENOMEM)
1544                         goto try_to_free;
1545                 cond_resched();
1546         }
1547         ret = 0;
1548 out:
1549         css_put(&mem->css);
1550         return ret;
1551
1552 try_to_free:
1553         /* returns EBUSY if there is a task or if we come here twice. */
1554         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1555                 ret = -EBUSY;
1556                 goto out;
1557         }
1558         /* we call try-to-free pages for make this cgroup empty */
1559         lru_add_drain_all();
1560         /* try to free all pages in this cgroup */
1561         shrink = 1;
1562         while (nr_retries && mem->res.usage > 0) {
1563                 int progress;
1564
1565                 if (signal_pending(current)) {
1566                         ret = -EINTR;
1567                         goto out;
1568                 }
1569                 progress = try_to_free_mem_cgroup_pages(mem,
1570                                                   GFP_KERNEL, false);
1571                 if (!progress) {
1572                         nr_retries--;
1573                         /* maybe some writeback is necessary */
1574                         congestion_wait(WRITE, HZ/10);
1575                 }
1576
1577         }
1578         lru_add_drain();
1579         /* try move_account...there may be some *locked* pages. */
1580         if (mem->res.usage)
1581                 goto move_account;
1582         ret = 0;
1583         goto out;
1584 }
1585
1586 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1587 {
1588         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1589 }
1590
1591
1592 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1593 {
1594         return mem_cgroup_from_cont(cont)->use_hierarchy;
1595 }
1596
1597 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1598                                         u64 val)
1599 {
1600         int retval = 0;
1601         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1602         struct cgroup *parent = cont->parent;
1603         struct mem_cgroup *parent_mem = NULL;
1604
1605         if (parent)
1606                 parent_mem = mem_cgroup_from_cont(parent);
1607
1608         cgroup_lock();
1609         /*
1610          * If parent's use_hiearchy is set, we can't make any modifications
1611          * in the child subtrees. If it is unset, then the change can
1612          * occur, provided the current cgroup has no children.
1613          *
1614          * For the root cgroup, parent_mem is NULL, we allow value to be
1615          * set if there are no children.
1616          */
1617         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1618                                 (val == 1 || val == 0)) {
1619                 if (list_empty(&cont->children))
1620                         mem->use_hierarchy = val;
1621                 else
1622                         retval = -EBUSY;
1623         } else
1624                 retval = -EINVAL;
1625         cgroup_unlock();
1626
1627         return retval;
1628 }
1629
1630 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1631 {
1632         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1633         u64 val = 0;
1634         int type, name;
1635
1636         type = MEMFILE_TYPE(cft->private);
1637         name = MEMFILE_ATTR(cft->private);
1638         switch (type) {
1639         case _MEM:
1640                 val = res_counter_read_u64(&mem->res, name);
1641                 break;
1642         case _MEMSWAP:
1643                 if (do_swap_account)
1644                         val = res_counter_read_u64(&mem->memsw, name);
1645                 break;
1646         default:
1647                 BUG();
1648                 break;
1649         }
1650         return val;
1651 }
1652 /*
1653  * The user of this function is...
1654  * RES_LIMIT.
1655  */
1656 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1657                             const char *buffer)
1658 {
1659         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1660         int type, name;
1661         unsigned long long val;
1662         int ret;
1663
1664         type = MEMFILE_TYPE(cft->private);
1665         name = MEMFILE_ATTR(cft->private);
1666         switch (name) {
1667         case RES_LIMIT:
1668                 /* This function does all necessary parse...reuse it */
1669                 ret = res_counter_memparse_write_strategy(buffer, &val);
1670                 if (ret)
1671                         break;
1672                 if (type == _MEM)
1673                         ret = mem_cgroup_resize_limit(memcg, val);
1674                 else
1675                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1676                 break;
1677         default:
1678                 ret = -EINVAL; /* should be BUG() ? */
1679                 break;
1680         }
1681         return ret;
1682 }
1683
1684 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1685 {
1686         struct mem_cgroup *mem;
1687         int type, name;
1688
1689         mem = mem_cgroup_from_cont(cont);
1690         type = MEMFILE_TYPE(event);
1691         name = MEMFILE_ATTR(event);
1692         switch (name) {
1693         case RES_MAX_USAGE:
1694                 if (type == _MEM)
1695                         res_counter_reset_max(&mem->res);
1696                 else
1697                         res_counter_reset_max(&mem->memsw);
1698                 break;
1699         case RES_FAILCNT:
1700                 if (type == _MEM)
1701                         res_counter_reset_failcnt(&mem->res);
1702                 else
1703                         res_counter_reset_failcnt(&mem->memsw);
1704                 break;
1705         }
1706         return 0;
1707 }
1708
1709 static const struct mem_cgroup_stat_desc {
1710         const char *msg;
1711         u64 unit;
1712 } mem_cgroup_stat_desc[] = {
1713         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1714         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1715         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1716         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1717 };
1718
1719 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1720                                  struct cgroup_map_cb *cb)
1721 {
1722         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1723         struct mem_cgroup_stat *stat = &mem_cont->stat;
1724         int i;
1725
1726         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1727                 s64 val;
1728
1729                 val = mem_cgroup_read_stat(stat, i);
1730                 val *= mem_cgroup_stat_desc[i].unit;
1731                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1732         }
1733         /* showing # of active pages */
1734         {
1735                 unsigned long active_anon, inactive_anon;
1736                 unsigned long active_file, inactive_file;
1737                 unsigned long unevictable;
1738
1739                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1740                                                 LRU_INACTIVE_ANON);
1741                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1742                                                 LRU_ACTIVE_ANON);
1743                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1744                                                 LRU_INACTIVE_FILE);
1745                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1746                                                 LRU_ACTIVE_FILE);
1747                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1748                                                         LRU_UNEVICTABLE);
1749
1750                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1751                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1752                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1753                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1754                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1755
1756         }
1757         return 0;
1758 }
1759
1760
1761 static struct cftype mem_cgroup_files[] = {
1762         {
1763                 .name = "usage_in_bytes",
1764                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1765                 .read_u64 = mem_cgroup_read,
1766         },
1767         {
1768                 .name = "max_usage_in_bytes",
1769                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1770                 .trigger = mem_cgroup_reset,
1771                 .read_u64 = mem_cgroup_read,
1772         },
1773         {
1774                 .name = "limit_in_bytes",
1775                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1776                 .write_string = mem_cgroup_write,
1777                 .read_u64 = mem_cgroup_read,
1778         },
1779         {
1780                 .name = "failcnt",
1781                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1782                 .trigger = mem_cgroup_reset,
1783                 .read_u64 = mem_cgroup_read,
1784         },
1785         {
1786                 .name = "stat",
1787                 .read_map = mem_control_stat_show,
1788         },
1789         {
1790                 .name = "force_empty",
1791                 .trigger = mem_cgroup_force_empty_write,
1792         },
1793         {
1794                 .name = "use_hierarchy",
1795                 .write_u64 = mem_cgroup_hierarchy_write,
1796                 .read_u64 = mem_cgroup_hierarchy_read,
1797         },
1798 };
1799
1800 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1801 static struct cftype memsw_cgroup_files[] = {
1802         {
1803                 .name = "memsw.usage_in_bytes",
1804                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1805                 .read_u64 = mem_cgroup_read,
1806         },
1807         {
1808                 .name = "memsw.max_usage_in_bytes",
1809                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1810                 .trigger = mem_cgroup_reset,
1811                 .read_u64 = mem_cgroup_read,
1812         },
1813         {
1814                 .name = "memsw.limit_in_bytes",
1815                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1816                 .write_string = mem_cgroup_write,
1817                 .read_u64 = mem_cgroup_read,
1818         },
1819         {
1820                 .name = "memsw.failcnt",
1821                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1822                 .trigger = mem_cgroup_reset,
1823                 .read_u64 = mem_cgroup_read,
1824         },
1825 };
1826
1827 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1828 {
1829         if (!do_swap_account)
1830                 return 0;
1831         return cgroup_add_files(cont, ss, memsw_cgroup_files,
1832                                 ARRAY_SIZE(memsw_cgroup_files));
1833 };
1834 #else
1835 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1836 {
1837         return 0;
1838 }
1839 #endif
1840
1841 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1842 {
1843         struct mem_cgroup_per_node *pn;
1844         struct mem_cgroup_per_zone *mz;
1845         enum lru_list l;
1846         int zone, tmp = node;
1847         /*
1848          * This routine is called against possible nodes.
1849          * But it's BUG to call kmalloc() against offline node.
1850          *
1851          * TODO: this routine can waste much memory for nodes which will
1852          *       never be onlined. It's better to use memory hotplug callback
1853          *       function.
1854          */
1855         if (!node_state(node, N_NORMAL_MEMORY))
1856                 tmp = -1;
1857         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1858         if (!pn)
1859                 return 1;
1860
1861         mem->info.nodeinfo[node] = pn;
1862         memset(pn, 0, sizeof(*pn));
1863
1864         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1865                 mz = &pn->zoneinfo[zone];
1866                 for_each_lru(l)
1867                         INIT_LIST_HEAD(&mz->lists[l]);
1868         }
1869         return 0;
1870 }
1871
1872 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1873 {
1874         kfree(mem->info.nodeinfo[node]);
1875 }
1876
1877 static int mem_cgroup_size(void)
1878 {
1879         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1880         return sizeof(struct mem_cgroup) + cpustat_size;
1881 }
1882
1883 static struct mem_cgroup *mem_cgroup_alloc(void)
1884 {
1885         struct mem_cgroup *mem;
1886         int size = mem_cgroup_size();
1887
1888         if (size < PAGE_SIZE)
1889                 mem = kmalloc(size, GFP_KERNEL);
1890         else
1891                 mem = vmalloc(size);
1892
1893         if (mem)
1894                 memset(mem, 0, size);
1895         return mem;
1896 }
1897
1898 /*
1899  * At destroying mem_cgroup, references from swap_cgroup can remain.
1900  * (scanning all at force_empty is too costly...)
1901  *
1902  * Instead of clearing all references at force_empty, we remember
1903  * the number of reference from swap_cgroup and free mem_cgroup when
1904  * it goes down to 0.
1905  *
1906  * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
1907  * entry which points to this memcg will be ignore at swapin.
1908  *
1909  * Removal of cgroup itself succeeds regardless of refs from swap.
1910  */
1911
1912 static void mem_cgroup_free(struct mem_cgroup *mem)
1913 {
1914         int node;
1915
1916         if (atomic_read(&mem->refcnt) > 0)
1917                 return;
1918
1919
1920         for_each_node_state(node, N_POSSIBLE)
1921                 free_mem_cgroup_per_zone_info(mem, node);
1922
1923         if (mem_cgroup_size() < PAGE_SIZE)
1924                 kfree(mem);
1925         else
1926                 vfree(mem);
1927 }
1928
1929 static void mem_cgroup_get(struct mem_cgroup *mem)
1930 {
1931         atomic_inc(&mem->refcnt);
1932 }
1933
1934 static void mem_cgroup_put(struct mem_cgroup *mem)
1935 {
1936         if (atomic_dec_and_test(&mem->refcnt)) {
1937                 if (!mem->obsolete)
1938                         return;
1939                 mem_cgroup_free(mem);
1940         }
1941 }
1942
1943
1944 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1945 static void __init enable_swap_cgroup(void)
1946 {
1947         if (!mem_cgroup_disabled() && really_do_swap_account)
1948                 do_swap_account = 1;
1949 }
1950 #else
1951 static void __init enable_swap_cgroup(void)
1952 {
1953 }
1954 #endif
1955
1956 static struct cgroup_subsys_state *
1957 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1958 {
1959         struct mem_cgroup *mem, *parent;
1960         int node;
1961
1962         mem = mem_cgroup_alloc();
1963         if (!mem)
1964                 return ERR_PTR(-ENOMEM);
1965
1966         for_each_node_state(node, N_POSSIBLE)
1967                 if (alloc_mem_cgroup_per_zone_info(mem, node))
1968                         goto free_out;
1969         /* root ? */
1970         if (cont->parent == NULL) {
1971                 enable_swap_cgroup();
1972                 parent = NULL;
1973         } else {
1974                 parent = mem_cgroup_from_cont(cont->parent);
1975                 mem->use_hierarchy = parent->use_hierarchy;
1976         }
1977
1978         if (parent && parent->use_hierarchy) {
1979                 res_counter_init(&mem->res, &parent->res);
1980                 res_counter_init(&mem->memsw, &parent->memsw);
1981         } else {
1982                 res_counter_init(&mem->res, NULL);
1983                 res_counter_init(&mem->memsw, NULL);
1984         }
1985
1986         mem->last_scanned_child = NULL;
1987
1988         return &mem->css;
1989 free_out:
1990         for_each_node_state(node, N_POSSIBLE)
1991                 free_mem_cgroup_per_zone_info(mem, node);
1992         mem_cgroup_free(mem);
1993         return ERR_PTR(-ENOMEM);
1994 }
1995
1996 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1997                                         struct cgroup *cont)
1998 {
1999         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2000         mem->obsolete = 1;
2001         mem_cgroup_force_empty(mem, false);
2002 }
2003
2004 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2005                                 struct cgroup *cont)
2006 {
2007         mem_cgroup_free(mem_cgroup_from_cont(cont));
2008 }
2009
2010 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2011                                 struct cgroup *cont)
2012 {
2013         int ret;
2014
2015         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2016                                 ARRAY_SIZE(mem_cgroup_files));
2017
2018         if (!ret)
2019                 ret = register_memsw_files(cont, ss);
2020         return ret;
2021 }
2022
2023 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2024                                 struct cgroup *cont,
2025                                 struct cgroup *old_cont,
2026                                 struct task_struct *p)
2027 {
2028         /*
2029          * FIXME: It's better to move charges of this process from old
2030          * memcg to new memcg. But it's just on TODO-List now.
2031          */
2032 }
2033
2034 struct cgroup_subsys mem_cgroup_subsys = {
2035         .name = "memory",
2036         .subsys_id = mem_cgroup_subsys_id,
2037         .create = mem_cgroup_create,
2038         .pre_destroy = mem_cgroup_pre_destroy,
2039         .destroy = mem_cgroup_destroy,
2040         .populate = mem_cgroup_populate,
2041         .attach = mem_cgroup_move_task,
2042         .early_init = 0,
2043 };
2044
2045 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2046
2047 static int __init disable_swap_account(char *s)
2048 {
2049         really_do_swap_account = 0;
2050         return 1;
2051 }
2052 __setup("noswapaccount", disable_swap_account);
2053 #endif