X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmemcontrol.c;h=8a79a6f0f029842860fae7cd9b15c8c90600d56c;hb=5e8aa85253513b9c1ade8bd71dc341218a752a65;hp=7226e60e52af5a9b8526f74255dba985e807a120;hpb=21ae2956ce289f61f11863cc67080f9a28101ae0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7226e60..8a79a6f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6,6 +6,10 @@ * Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov * + * Memory thresholds + * Copyright (C) 2009 Nokia Corporation + * Author: Kirill A. Shutemov + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -21,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -32,12 +37,16 @@ #include #include #include +#include #include +#include +#include #include #include #include #include #include +#include #include "internal.h" #include @@ -54,8 +63,15 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/ #define do_swap_account (0) #endif -static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */ -#define SOFTLIMIT_EVENTS_THRESH (1000) +/* + * Per memcg event counter is incremented at every pagein/pageout. This counter + * is used for trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + * + * These values will be used as !((event) & ((1 <<(thresh)) - 1)) + */ +#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */ +#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */ /* * Statistics for memory cgroup. @@ -66,65 +82,19 @@ enum mem_cgroup_stat_index { */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ - MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */ + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ - MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ + MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */ MEM_CGROUP_STAT_NSTATS, }; struct mem_cgroup_stat_cpu { s64 count[MEM_CGROUP_STAT_NSTATS]; -} ____cacheline_aligned_in_smp; - -struct mem_cgroup_stat { - struct mem_cgroup_stat_cpu cpustat[0]; }; -static inline void -__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx) -{ - stat->count[idx] = 0; -} - -static inline s64 -__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx) -{ - return stat->count[idx]; -} - -/* - * For accounting under irq disable, no need for increment preempt count. - */ -static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx, int val) -{ - stat->count[idx] += val; -} - -static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, - enum mem_cgroup_stat_index idx) -{ - int cpu; - s64 ret = 0; - for_each_possible_cpu(cpu) - ret += stat->cpustat[cpu].count[idx]; - return ret; -} - -static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat) -{ - s64 ret; - - ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE); - ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS); - return ret; -} - /* * per-zone information in memory controller. */ @@ -174,6 +144,22 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + u64 threshold; +}; + +struct mem_cgroup_threshold_ary { + /* An array index points to threshold just below usage. */ + atomic_t current_threshold; + /* Size of entries[] */ + unsigned int size; + /* Array of thresholds */ + struct mem_cgroup_threshold entries[0]; +}; + +static void mem_cgroup_threshold(struct mem_cgroup *mem); + /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -209,7 +195,7 @@ struct mem_cgroup { int prev_priority; /* for recording reclaim priority */ /* - * While reclaiming in a hiearchy, we cache the last child we + * While reclaiming in a hierarchy, we cache the last child we * reclaimed from. */ int last_scanned_child; @@ -217,7 +203,7 @@ struct mem_cgroup { * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; - unsigned long last_oom_jiffies; + atomic_t oom_lock; atomic_t refcnt; unsigned int swappiness; @@ -225,10 +211,48 @@ struct mem_cgroup { /* set when res.limit == memsw.limit */ bool memsw_is_minimum; + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_threshold_ary *thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_threshold_ary *memsw_thresholds; + + /* + * Should we move charges of a task when a task is moved into this + * mem_cgroup ? And what type of charges should we move ? + */ + unsigned long move_charge_at_immigrate; + /* - * statistics. This must be placed at the end of memcg. + * percpu counter. */ - struct mem_cgroup_stat stat; + struct mem_cgroup_stat_cpu *stat; +}; + +/* Stuffs for move charges at task migration. */ +/* + * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a + * left-shifted bitmap of these types. + */ +enum move_type { + MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ + NR_MOVE_TYPE, +}; + +/* "mc" and its members are protected by cgroup_mutex */ +static struct move_charge_struct { + struct mem_cgroup *from; + struct mem_cgroup *to; + unsigned long precharge; + unsigned long moved_charge; + unsigned long moved_swap; + struct task_struct *moving_task; /* a task moving charges */ + wait_queue_head_t waitq; /* a waitq for other context */ +} mc = { + .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), }; /* @@ -275,6 +299,7 @@ enum charge_type { static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); +static void drain_all_stock_async(void); static struct mem_cgroup_per_zone * mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) @@ -282,6 +307,11 @@ mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) return &mem->info.nodeinfo[nid]->zoneinfo[zid]; } +struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) +{ + return &mem->css; +} + static struct mem_cgroup_per_zone * page_cgroup_zoneinfo(struct page_cgroup *pc) { @@ -365,23 +395,6 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem, spin_unlock(&mctz->lock); } -static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) -{ - bool ret = false; - int cpu; - s64 val; - struct mem_cgroup_stat_cpu *cpustat; - - cpu = get_cpu(); - cpustat = &mem->stat.cpustat[cpu]; - val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS); - if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) { - __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS); - ret = true; - } - put_cpu(); - return ret; -} static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) { @@ -475,17 +488,31 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) return mz; } +static s64 mem_cgroup_read_stat(struct mem_cgroup *mem, + enum mem_cgroup_stat_index idx) +{ + int cpu; + s64 val = 0; + + for_each_possible_cpu(cpu) + val += per_cpu(mem->stat->count[idx], cpu); + return val; +} + +static s64 mem_cgroup_local_usage(struct mem_cgroup *mem) +{ + s64 ret; + + ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); + ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); + return ret; +} + static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, bool charge) { int val = (charge) ? 1 : -1; - struct mem_cgroup_stat *stat = &mem->stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu = get_cpu(); - - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val); - put_cpu(); + this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); } static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, @@ -493,24 +520,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, bool charge) { int val = (charge) ? 1 : -1; - struct mem_cgroup_stat *stat = &mem->stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu = get_cpu(); - cpustat = &stat->cpustat[cpu]; + preempt_disable(); + if (PageCgroupCache(pc)) - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); + __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val); else - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val); + __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val); if (charge) - __mem_cgroup_stat_add_safe(cpustat, - MEM_CGROUP_STAT_PGPGIN_COUNT, 1); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); else - __mem_cgroup_stat_add_safe(cpustat, - MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1); - put_cpu(); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]); + + preempt_enable(); } static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, @@ -528,6 +552,29 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, return total; } +static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) +{ + s64 val; + + val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]); + + return !(val & ((1 << event_mask_shift) - 1)); +} + +/* + * Check events in order. + * + */ +static void memcg_check_events(struct mem_cgroup *mem, struct page *page) +{ + /* threshold event is triggered in finer grain than soft limit */ + if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) { + mem_cgroup_threshold(mem); + if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH))) + mem_cgroup_update_tree(mem, page); + } +} + static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) { return container_of(cgroup_subsys_state(cont, @@ -758,7 +805,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) task_unlock(task); if (!curr) return 0; - if (curr->use_hierarchy) + /* + * We should check use_hierarchy of "mem" not "curr". Because checking + * use_hierarchy of "curr" here make this function true if hierarchy is + * enabled in "curr" and "curr" is a child of "mem" in *cgroup* + * hierarchy(even if use_hierarchy is disabled in "mem"). + */ + if (mem->use_hierarchy) ret = css_is_ancestor(&curr->css, &mem->css); else ret = (curr == mem); @@ -988,7 +1041,7 @@ static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data) } /** - * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode. + * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1007,7 +1060,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) static char memcg_name[PATH_MAX]; int ret; - if (!memcg) + if (!memcg || !p) return; @@ -1137,6 +1190,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, victim = mem_cgroup_select_victim(root_mem); if (victim == root_mem) { loop++; + if (loop >= 1) + drain_all_stock_async(); if (loop >= 2) { /* * If we have not been able to reclaim @@ -1160,7 +1215,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, } } } - if (!mem_cgroup_local_usage(&victim->stat)) { + if (!mem_cgroup_local_usage(victim)) { /* this cgroup's local usage == 0 */ css_put(&victim->css); continue; @@ -1191,90 +1246,284 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, return total; } -bool mem_cgroup_oom_called(struct task_struct *task) +static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data) { - bool ret = false; - struct mem_cgroup *mem; - struct mm_struct *mm; + int *val = (int *)data; + int x; + /* + * Logically, we can stop scanning immediately when we find + * a memcg is already locked. But condidering unlock ops and + * creation/removal of memcg, scan-all is simple operation. + */ + x = atomic_inc_return(&mem->oom_lock); + *val = max(x, *val); + return 0; +} +/* + * Check OOM-Killer is already running under our hierarchy. + * If someone is running, return false. + */ +static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) +{ + int lock_count = 0; - rcu_read_lock(); - mm = task->mm; - if (!mm) - mm = &init_mm; - mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10)) - ret = true; - rcu_read_unlock(); - return ret; + mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb); + + if (lock_count == 1) + return true; + return false; } -static int record_last_oom_cb(struct mem_cgroup *mem, void *data) +static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data) { - mem->last_oom_jiffies = jiffies; + /* + * When a new child is created while the hierarchy is under oom, + * mem_cgroup_oom_lock() may not be called. We have to use + * atomic_add_unless() here. + */ + atomic_add_unless(&mem->oom_lock, -1, 0); return 0; } -static void record_last_oom(struct mem_cgroup *mem) +static void mem_cgroup_oom_unlock(struct mem_cgroup *mem) +{ + mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb); +} + +static DEFINE_MUTEX(memcg_oom_mutex); +static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); + +/* + * try to call OOM killer. returns false if we should exit memory-reclaim loop. + */ +bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) { - mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb); + DEFINE_WAIT(wait); + bool locked; + + /* At first, try to OOM lock hierarchy under mem.*/ + mutex_lock(&memcg_oom_mutex); + locked = mem_cgroup_oom_lock(mem); + /* + * Even if signal_pending(), we can't quit charge() loop without + * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL + * under OOM is always welcomed, use TASK_KILLABLE here. + */ + if (!locked) + prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE); + mutex_unlock(&memcg_oom_mutex); + + if (locked) + mem_cgroup_out_of_memory(mem, mask); + else { + schedule(); + finish_wait(&memcg_oom_waitq, &wait); + } + mutex_lock(&memcg_oom_mutex); + mem_cgroup_oom_unlock(mem); + /* + * Here, we use global waitq .....more fine grained waitq ? + * Assume following hierarchy. + * A/ + * 01 + * 02 + * assume OOM happens both in A and 01 at the same time. Tthey are + * mutually exclusive by lock. (kill in 01 helps A.) + * When we use per memcg waitq, we have to wake up waiters on A and 02 + * in addtion to waiters on 01. We use global waitq for avoiding mess. + * It will not be a big problem. + * (And a task may be moved to other groups while it's waiting for OOM.) + */ + wake_up_all(&memcg_oom_waitq); + mutex_unlock(&memcg_oom_mutex); + + if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) + return false; + /* Give chance to dying process */ + schedule_timeout(1); + return true; } /* * Currently used to update mapped file statistics, but the routine can be * generalized to update other statistics as well. */ -void mem_cgroup_update_mapped_file_stat(struct page *page, int val) +void mem_cgroup_update_file_mapped(struct page *page, int val) { struct mem_cgroup *mem; - struct mem_cgroup_stat *stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu; struct page_cgroup *pc; - if (!page_is_file_cache(page)) - return; - pc = lookup_page_cgroup(page); if (unlikely(!pc)) return; lock_page_cgroup(pc); mem = pc->mem_cgroup; - if (!mem) - goto done; - - if (!PageCgroupUsed(pc)) + if (!mem || !PageCgroupUsed(pc)) goto done; /* - * Preemption is already disabled, we don't need get_cpu() + * Preemption is already disabled. We can use __this_cpu_xxx */ - cpu = smp_processor_id(); - stat = &mem->stat; - cpustat = &stat->cpustat[cpu]; + if (val > 0) { + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + SetPageCgroupFileMapped(pc); + } else { + __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + ClearPageCgroupFileMapped(pc); + } - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val); done: unlock_page_cgroup(pc); } /* + * size of first charge trial. "32" comes from vmscan.c's magic value. + * TODO: maybe necessary to use big numbers in big irons. + */ +#define CHARGE_SIZE (32 * PAGE_SIZE) +struct memcg_stock_pcp { + struct mem_cgroup *cached; /* this never be root cgroup */ + int charge; + struct work_struct work; +}; +static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); +static atomic_t memcg_drain_count; + +/* + * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed + * from local stock and true is returned. If the stock is 0 or charges from a + * cgroup which is not current target, returns false. This stock will be + * refilled. + */ +static bool consume_stock(struct mem_cgroup *mem) +{ + struct memcg_stock_pcp *stock; + bool ret = true; + + stock = &get_cpu_var(memcg_stock); + if (mem == stock->cached && stock->charge) + stock->charge -= PAGE_SIZE; + else /* need to call res_counter_charge */ + ret = false; + put_cpu_var(memcg_stock); + return ret; +} + +/* + * Returns stocks cached in percpu to res_counter and reset cached information. + */ +static void drain_stock(struct memcg_stock_pcp *stock) +{ + struct mem_cgroup *old = stock->cached; + + if (stock->charge) { + res_counter_uncharge(&old->res, stock->charge); + if (do_swap_account) + res_counter_uncharge(&old->memsw, stock->charge); + } + stock->cached = NULL; + stock->charge = 0; +} + +/* + * This must be called under preempt disabled or must be called by + * a thread which is pinned to local cpu. + */ +static void drain_local_stock(struct work_struct *dummy) +{ + struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); + drain_stock(stock); +} + +/* + * Cache charges(val) which is from res_counter, to local per_cpu area. + * This will be consumed by consumt_stock() function, later. + */ +static void refill_stock(struct mem_cgroup *mem, int val) +{ + struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); + + if (stock->cached != mem) { /* reset if necessary */ + drain_stock(stock); + stock->cached = mem; + } + stock->charge += val; + put_cpu_var(memcg_stock); +} + +/* + * Tries to drain stocked charges in other cpus. This function is asynchronous + * and just put a work per cpu for draining localy on each cpu. Caller can + * expects some charges will be back to res_counter later but cannot wait for + * it. + */ +static void drain_all_stock_async(void) +{ + int cpu; + /* This function is for scheduling "drain" in asynchronous way. + * The result of "drain" is not directly handled by callers. Then, + * if someone is calling drain, we don't have to call drain more. + * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if + * there is a race. We just do loose check here. + */ + if (atomic_read(&memcg_drain_count)) + return; + /* Notify other cpus that system-wide "drain" is running */ + atomic_inc(&memcg_drain_count); + get_online_cpus(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + schedule_work_on(cpu, &stock->work); + } + put_online_cpus(); + atomic_dec(&memcg_drain_count); + /* We don't wait for flush_work */ +} + +/* This is a synchronous drain interface. */ +static void drain_all_stock_sync(void) +{ + /* called when force_empty is called */ + atomic_inc(&memcg_drain_count); + schedule_on_each_cpu(drain_local_stock); + atomic_dec(&memcg_drain_count); +} + +static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb, + unsigned long action, + void *hcpu) +{ + int cpu = (unsigned long)hcpu; + struct memcg_stock_pcp *stock; + + if (action != CPU_DEAD) + return NOTIFY_OK; + stock = &per_cpu(memcg_stock, cpu); + drain_stock(stock); + return NOTIFY_OK; +} + +/* * Unlike exported interface, "oom" parameter is added. if oom==true, * oom-killer can be invoked. */ static int __mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcg, - bool oom, struct page *page) + gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) { struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct res_counter *fail_res; + int csize = CHARGE_SIZE; - if (unlikely(test_thread_flag(TIF_MEMDIE))) { - /* Don't account this! */ - *memcg = NULL; - return 0; - } + /* + * Unlike gloval-vm's OOM-kill, we're not in memory shortage + * in system level. So, allow to go ahead dying process in addition to + * MEMDIE process. + */ + if (unlikely(test_thread_flag(TIF_MEMDIE) + || fatal_signal_pending(current))) + goto bypass; /* * We always charge the cgroup the mm_struct belongs to. @@ -1293,23 +1542,25 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, return 0; VM_BUG_ON(css_is_removed(&mem->css)); + if (mem_cgroup_is_root(mem)) + goto done; while (1) { int ret = 0; unsigned long flags = 0; - if (mem_cgroup_is_root(mem)) + if (consume_stock(mem)) goto done; - ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res); + + ret = res_counter_charge(&mem->res, csize, &fail_res); if (likely(!ret)) { if (!do_swap_account) break; - ret = res_counter_charge(&mem->memsw, PAGE_SIZE, - &fail_res); + ret = res_counter_charge(&mem->memsw, csize, &fail_res); if (likely(!ret)) break; /* mem+swap counter fails */ - res_counter_uncharge(&mem->res, PAGE_SIZE); + res_counter_uncharge(&mem->res, csize); flags |= MEM_CGROUP_RECLAIM_NOSWAP; mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); @@ -1318,6 +1569,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); + /* reduce request size and retry */ + if (csize > PAGE_SIZE) { + csize = PAGE_SIZE; + continue; + } if (!(gfp_mask & __GFP_WAIT)) goto nomem; @@ -1337,27 +1593,92 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, if (mem_cgroup_check_under_limit(mem_over_limit)) continue; + /* try to avoid oom while someone is moving charge */ + if (mc.moving_task && current != mc.moving_task) { + struct mem_cgroup *from, *to; + bool do_continue = false; + /* + * There is a small race that "from" or "to" can be + * freed by rmdir, so we use css_tryget(). + */ + from = mc.from; + to = mc.to; + if (from && css_tryget(&from->css)) { + if (mem_over_limit->use_hierarchy) + do_continue = css_is_ancestor( + &from->css, + &mem_over_limit->css); + else + do_continue = (from == mem_over_limit); + css_put(&from->css); + } + if (!do_continue && to && css_tryget(&to->css)) { + if (mem_over_limit->use_hierarchy) + do_continue = css_is_ancestor( + &to->css, + &mem_over_limit->css); + else + do_continue = (to == mem_over_limit); + css_put(&to->css); + } + if (do_continue) { + DEFINE_WAIT(wait); + prepare_to_wait(&mc.waitq, &wait, + TASK_INTERRUPTIBLE); + /* moving charge context might have finished. */ + if (mc.moving_task) + schedule(); + finish_wait(&mc.waitq, &wait); + continue; + } + } + if (!nr_retries--) { - if (oom) { - mutex_lock(&memcg_tasklist); - mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); - mutex_unlock(&memcg_tasklist); - record_last_oom(mem_over_limit); + if (!oom) + goto nomem; + if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) { + nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + continue; } - goto nomem; + /* When we reach here, current task is dying .*/ + css_put(&mem->css); + goto bypass; } } - /* - * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. - * if they exceeds softlimit. - */ - if (mem_cgroup_soft_limit_check(mem)) - mem_cgroup_update_tree(mem, page); + if (csize > PAGE_SIZE) + refill_stock(mem, csize - PAGE_SIZE); done: return 0; nomem: css_put(&mem->css); return -ENOMEM; +bypass: + *memcg = NULL; + return 0; +} + +/* + * Somemtimes we have to undo a charge we got by try_charge(). + * This function is for that and do uncharge, put css's refcnt. + * gotten by try_charge(). + */ +static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, + unsigned long count) +{ + if (!mem_cgroup_is_root(mem)) { + res_counter_uncharge(&mem->res, PAGE_SIZE * count); + if (do_swap_account) + res_counter_uncharge(&mem->memsw, PAGE_SIZE * count); + VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags)); + WARN_ON_ONCE(count > INT_MAX); + __css_put(&mem->css, (int)count); + } + /* we don't need css_put for root */ +} + +static void mem_cgroup_cancel_charge(struct mem_cgroup *mem) +{ + __mem_cgroup_cancel_charge(mem, 1); } /* @@ -1379,25 +1700,22 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) return container_of(css, struct mem_cgroup, css); } -static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page) +struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { - struct mem_cgroup *mem; + struct mem_cgroup *mem = NULL; struct page_cgroup *pc; unsigned short id; swp_entry_t ent; VM_BUG_ON(!PageLocked(page)); - if (!PageSwapCache(page)) - return NULL; - pc = lookup_page_cgroup(page); lock_page_cgroup(pc); if (PageCgroupUsed(pc)) { mem = pc->mem_cgroup; if (mem && !css_tryget(&mem->css)) mem = NULL; - } else { + } else if (PageSwapCache(page)) { ent.val = page_private(page); id = lookup_swap_cgroup(ent); rcu_read_lock(); @@ -1426,12 +1744,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, lock_page_cgroup(pc); if (unlikely(PageCgroupUsed(pc))) { unlock_page_cgroup(pc); - if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&mem->memsw, PAGE_SIZE); - } - css_put(&mem->css); + mem_cgroup_cancel_charge(mem); return; } @@ -1461,88 +1774,83 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, mem_cgroup_charge_statistics(mem, pc, true); unlock_page_cgroup(pc); + /* + * "charge_statistics" updated event counter. Then, check it. + * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. + * if they exceeds softlimit. + */ + memcg_check_events(mem, pc->page); } /** - * mem_cgroup_move_account - move account of the page + * __mem_cgroup_move_account - move account of the page * @pc: page_cgroup of the page. * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. + * @uncharge: whether we should call uncharge and css_put against @from. * * The caller must confirm following. * - page is not on LRU (isolate_page() is useful.) + * - the pc is locked, used, and ->mem_cgroup points to @from. * - * returns 0 at success, - * returns -EBUSY when lock is busy or "pc" is unstable. - * - * This function does "uncharge" from old cgroup but doesn't do "charge" to - * new cgroup. It should be done by a caller. + * This function doesn't do "charge" nor css_get to new cgroup. It should be + * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is + * true, this function does "uncharge" from old cgroup, but it doesn't if + * @uncharge is false, so a caller should do "uncharge". */ -static int mem_cgroup_move_account(struct page_cgroup *pc, - struct mem_cgroup *from, struct mem_cgroup *to) +static void __mem_cgroup_move_account(struct page_cgroup *pc, + struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) { - struct mem_cgroup_per_zone *from_mz, *to_mz; - int nid, zid; - int ret = -EBUSY; - struct page *page; - int cpu; - struct mem_cgroup_stat *stat; - struct mem_cgroup_stat_cpu *cpustat; - VM_BUG_ON(from == to); VM_BUG_ON(PageLRU(pc->page)); - - nid = page_cgroup_nid(pc); - zid = page_cgroup_zid(pc); - from_mz = mem_cgroup_zoneinfo(from, nid, zid); - to_mz = mem_cgroup_zoneinfo(to, nid, zid); - - if (!trylock_page_cgroup(pc)) - return ret; - - if (!PageCgroupUsed(pc)) - goto out; - - if (pc->mem_cgroup != from) - goto out; - - if (!mem_cgroup_is_root(from)) - res_counter_uncharge(&from->res, PAGE_SIZE); - mem_cgroup_charge_statistics(from, pc, false); - - page = pc->page; - if (page_is_file_cache(page) && page_mapped(page)) { - cpu = smp_processor_id(); - /* Update mapped_file data for mem_cgroup "from" */ - stat = &from->stat; - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, - -1); - - /* Update mapped_file data for mem_cgroup "to" */ - stat = &to->stat; - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, - 1); + VM_BUG_ON(!PageCgroupLocked(pc)); + VM_BUG_ON(!PageCgroupUsed(pc)); + VM_BUG_ON(pc->mem_cgroup != from); + + if (PageCgroupFileMapped(pc)) { + /* Update mapped_file data for mem_cgroup */ + preempt_disable(); + __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + preempt_enable(); } + mem_cgroup_charge_statistics(from, pc, false); + if (uncharge) + /* This is not "cancel", but cancel_charge does all we need. */ + mem_cgroup_cancel_charge(from); - if (do_swap_account && !mem_cgroup_is_root(from)) - res_counter_uncharge(&from->memsw, PAGE_SIZE); - css_put(&from->css); - - css_get(&to->css); + /* caller should have done css_get */ pc->mem_cgroup = to; mem_cgroup_charge_statistics(to, pc, true); - ret = 0; -out: - unlock_page_cgroup(pc); /* * We charges against "to" which may not have any tasks. Then, "to" * can be under rmdir(). But in current implementation, caller of - * this function is just force_empty() and it's garanteed that - * "to" is never removed. So, we don't check rmdir status here. + * this function is just force_empty() and move charge, so it's + * garanteed that "to" is never removed. So, we don't check rmdir + * status here. + */ +} + +/* + * check whether the @pc is valid for moving account and call + * __mem_cgroup_move_account() + */ +static int mem_cgroup_move_account(struct page_cgroup *pc, + struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) +{ + int ret = -EINVAL; + lock_page_cgroup(pc); + if (PageCgroupUsed(pc) && pc->mem_cgroup == from) { + __mem_cgroup_move_account(pc, from, to, uncharge); + ret = 0; + } + unlock_page_cgroup(pc); + /* + * check events */ + memcg_check_events(to, pc->page); + memcg_check_events(from, pc->page); return ret; } @@ -1564,45 +1872,25 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, if (!pcg) return -EINVAL; + ret = -EBUSY; + if (!get_page_unless_zero(page)) + goto out; + if (isolate_lru_page(page)) + goto put; parent = mem_cgroup_from_cont(pcg); - - - ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page); + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); if (ret || !parent) - return ret; - - if (!get_page_unless_zero(page)) { - ret = -EBUSY; - goto uncharge; - } - - ret = isolate_lru_page(page); + goto put_back; + ret = mem_cgroup_move_account(pc, child, parent, true); if (ret) - goto cancel; - - ret = mem_cgroup_move_account(pc, child, parent); - + mem_cgroup_cancel_charge(parent); +put_back: putback_lru_page(page); - if (!ret) { - put_page(page); - /* drop extra refcnt by try_charge() */ - css_put(&parent->css); - return 0; - } - -cancel: +put: put_page(page); -uncharge: - /* drop extra refcnt by try_charge() */ - css_put(&parent->css); - /* uncharge if move fails */ - if (!mem_cgroup_is_root(parent)) { - res_counter_uncharge(&parent->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&parent->memsw, PAGE_SIZE); - } +out: return ret; } @@ -1627,7 +1915,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, prefetchw(pc); mem = memcg; - ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page); + ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); if (ret || !mem) return ret; @@ -1737,23 +2025,24 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, goto charge_cur_mm; /* * A racing thread's fault, or swapoff, may have already updated - * the pte, and even removed page from swap cache: return success - * to go on to do_swap_page()'s pte_same() test, which should fail. + * the pte, and even removed page from swap cache: in those cases + * do_swap_page()'s pte_same() test will fail; but there's also a + * KSM case which does need to charge the page. */ if (!PageSwapCache(page)) - return 0; - mem = try_get_mem_cgroup_from_swapcache(page); + goto charge_cur_mm; + mem = try_get_mem_cgroup_from_page(page); if (!mem) goto charge_cur_mm; *ptr = mem; - ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page); + ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); /* drop extra refcnt from tryget */ css_put(&mem->css); return ret; charge_cur_mm: if (unlikely(!mm)) mm = &init_mm; - return __mem_cgroup_try_charge(mm, mask, ptr, true, page); + return __mem_cgroup_try_charge(mm, mask, ptr, true); } static void @@ -1818,14 +2107,53 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) return; if (!mem) return; - if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&mem->memsw, PAGE_SIZE); - } - css_put(&mem->css); + mem_cgroup_cancel_charge(mem); } +static void +__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype) +{ + struct memcg_batch_info *batch = NULL; + bool uncharge_memsw = true; + /* If swapout, usage of swap doesn't decrease */ + if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) + uncharge_memsw = false; + /* + * do_batch > 0 when unmapping pages or inode invalidate/truncate. + * In those cases, all pages freed continously can be expected to be in + * the same cgroup and we have chance to coalesce uncharges. + * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) + * because we want to do uncharge as soon as possible. + */ + if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE)) + goto direct_uncharge; + + batch = ¤t->memcg_batch; + /* + * In usual, we do css_get() when we remember memcg pointer. + * But in this case, we keep res->usage until end of a series of + * uncharges. Then, it's ok to ignore memcg's refcnt. + */ + if (!batch->memcg) + batch->memcg = mem; + /* + * In typical case, batch->memcg == mem. This means we can + * merge a series of uncharges to an uncharge of res_counter. + * If not, we uncharge res_counter ony by one. + */ + if (batch->memcg != mem) + goto direct_uncharge; + /* remember freed charge and uncharge it later */ + batch->bytes += PAGE_SIZE; + if (uncharge_memsw) + batch->memsw_bytes += PAGE_SIZE; + return; +direct_uncharge: + res_counter_uncharge(&mem->res, PAGE_SIZE); + if (uncharge_memsw) + res_counter_uncharge(&mem->memsw, PAGE_SIZE); + return; +} /* * uncharge if !page_mapped(page) @@ -1874,12 +2202,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) break; } - if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE); - if (do_swap_account && - (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) - res_counter_uncharge(&mem->memsw, PAGE_SIZE); - } + if (!mem_cgroup_is_root(mem)) + __do_uncharge(mem, ctype); if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) mem_cgroup_swap_statistics(mem, true); mem_cgroup_charge_statistics(mem, pc, false); @@ -1895,8 +2219,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mz = page_cgroup_zoneinfo(pc); unlock_page_cgroup(pc); - if (mem_cgroup_soft_limit_check(mem)) - mem_cgroup_update_tree(mem, page); + memcg_check_events(mem, page); /* at swapout, this memcg will be accessed to record to swap */ if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) css_put(&mem->css); @@ -1925,6 +2248,50 @@ void mem_cgroup_uncharge_cache_page(struct page *page) __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); } +/* + * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. + * In that cases, pages are freed continuously and we can expect pages + * are in the same memcg. All these calls itself limits the number of + * pages freed at once, then uncharge_start/end() is called properly. + * This may be called prural(2) times in a context, + */ + +void mem_cgroup_uncharge_start(void) +{ + current->memcg_batch.do_batch++; + /* We can do nest. */ + if (current->memcg_batch.do_batch == 1) { + current->memcg_batch.memcg = NULL; + current->memcg_batch.bytes = 0; + current->memcg_batch.memsw_bytes = 0; + } +} + +void mem_cgroup_uncharge_end(void) +{ + struct memcg_batch_info *batch = ¤t->memcg_batch; + + if (!batch->do_batch) + return; + + batch->do_batch--; + if (batch->do_batch) /* If stacked, do nothing. */ + return; + + if (!batch->memcg) + return; + /* + * This "batch->memcg" is valid without any css_get/put etc... + * bacause we hide charges behind us. + */ + if (batch->bytes) + res_counter_uncharge(&batch->memcg->res, batch->bytes); + if (batch->memsw_bytes) + res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes); + /* forget this pointer (for sanity check) */ + batch->memcg = NULL; +} + #ifdef CONFIG_SWAP /* * called after __delete_from_swap_cache() and drop "page" account. @@ -1979,6 +2346,64 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent) } rcu_read_unlock(); } + +/** + * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. + * @entry: swap entry to be moved + * @from: mem_cgroup which the entry is moved from + * @to: mem_cgroup which the entry is moved to + * @need_fixup: whether we should fixup res_counters and refcounts. + * + * It succeeds only when the swap_cgroup's record for this entry is the same + * as the mem_cgroup's id of @from. + * + * Returns 0 on success, -EINVAL on failure. + * + * The caller must have charged to @to, IOW, called res_counter_charge() about + * both res and memsw, and called css_get(). + */ +static int mem_cgroup_move_swap_account(swp_entry_t entry, + struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) +{ + unsigned short old_id, new_id; + + old_id = css_id(&from->css); + new_id = css_id(&to->css); + + if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { + mem_cgroup_swap_statistics(from, false); + mem_cgroup_swap_statistics(to, true); + /* + * This function is only called from task migration context now. + * It postpones res_counter and refcount handling till the end + * of task migration(mem_cgroup_clear_mc()) for performance + * improvement. But we cannot postpone mem_cgroup_get(to) + * because if the process that has been moved to @to does + * swap-in, the refcount of @to might be decreased to 0. + */ + mem_cgroup_get(to); + if (need_fixup) { + if (!mem_cgroup_is_root(from)) + res_counter_uncharge(&from->memsw, PAGE_SIZE); + mem_cgroup_put(from); + /* + * we charged both to->res and to->memsw, so we should + * uncharge to->res. + */ + if (!mem_cgroup_is_root(to)) + res_counter_uncharge(&to->res, PAGE_SIZE); + css_put(&to->css); + } + return 0; + } + return -EINVAL; +} +#else +static inline int mem_cgroup_move_swap_account(swp_entry_t entry, + struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) +{ + return -EINVAL; +} #endif /* @@ -2002,12 +2427,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) } unlock_page_cgroup(pc); + *ptr = mem; if (mem) { - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, - page); + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); css_put(&mem->css); } - *ptr = mem; return ret; } @@ -2100,7 +2524,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) { int retry_count; - int progress; u64 memswlimit; int ret = 0; int children = mem_cgroup_count_children(memcg); @@ -2144,8 +2567,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!ret) break; - progress = mem_cgroup_hierarchical_reclaim(memcg, NULL, - GFP_KERNEL, + mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, MEM_CGROUP_RECLAIM_SHRINK); curusage = res_counter_read_u64(&memcg->res, RES_USAGE); /* Usage is reduced ? */ @@ -2334,7 +2756,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, pc = list_entry(list->prev, struct page_cgroup, lru); if (busy == pc) { list_move(&pc->lru, list); - busy = 0; + busy = NULL; spin_unlock_irqrestore(&zone->lru_lock, flags); continue; } @@ -2375,7 +2797,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) if (free_all) goto try_to_free; move_account: - while (mem->res.usage > 0) { + do { ret = -EBUSY; if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) goto out; @@ -2384,6 +2806,7 @@ move_account: goto out; /* This is for making all *used* pages to be on LRU. */ lru_add_drain_all(); + drain_all_stock_sync(); ret = 0; for_each_node_state(node, N_HIGH_MEMORY) { for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { @@ -2402,8 +2825,8 @@ move_account: if (ret == -ENOMEM) goto try_to_free; cond_resched(); - } - ret = 0; + /* "ret" should also be checked to ensure all lists are empty. */ + } while (mem->res.usage > 0 || ret); out: css_put(&mem->css); return ret; @@ -2436,10 +2859,7 @@ try_to_free: } lru_add_drain(); /* try move_account...there may be some *locked* pages. */ - if (mem->res.usage) - goto move_account; - ret = 0; - goto out; + goto move_account; } int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) @@ -2466,7 +2886,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, cgroup_lock(); /* - * If parent's use_hiearchy is set, we can't make any modifications + * If parent's use_hierarchy is set, we can't make any modifications * in the child subtrees. If it is unset, then the change can * occur, provided the current cgroup has no children. * @@ -2495,7 +2915,7 @@ static int mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data) { struct mem_cgroup_idx_data *d = data; - d->val += mem_cgroup_read_stat(&mem->stat, d->idx); + d->val += mem_cgroup_read_stat(mem, d->idx); return 0; } @@ -2510,39 +2930,50 @@ mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem, *val = d.val; } +static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) +{ + u64 idx_val, val; + + if (!mem_cgroup_is_root(mem)) { + if (!swap) + return res_counter_read_u64(&mem->res, RES_USAGE); + else + return res_counter_read_u64(&mem->memsw, RES_USAGE); + } + + mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val); + val = idx_val; + mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val); + val += idx_val; + + if (swap) { + mem_cgroup_get_recursive_idx_stat(mem, + MEM_CGROUP_STAT_SWAPOUT, &idx_val); + val += idx_val; + } + + return val << PAGE_SHIFT; +} + static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - u64 idx_val, val; + u64 val; int type, name; type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); switch (type) { case _MEM: - if (name == RES_USAGE && mem_cgroup_is_root(mem)) { - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_CACHE, &idx_val); - val = idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_RSS, &idx_val); - val += idx_val; - val <<= PAGE_SHIFT; - } else + if (name == RES_USAGE) + val = mem_cgroup_usage(mem, false); + else val = res_counter_read_u64(&mem->res, name); break; case _MEMSWAP: - if (name == RES_USAGE && mem_cgroup_is_root(mem)) { - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_CACHE, &idx_val); - val = idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_RSS, &idx_val); - val += idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_SWAPOUT, &idx_val); - val <<= PAGE_SHIFT; - } else + if (name == RES_USAGE) + val = mem_cgroup_usage(mem, true); + else val = res_counter_read_u64(&mem->memsw, name); break; default: @@ -2655,12 +3086,45 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) return 0; } +static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, + struct cftype *cft) +{ + return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; +} + +#ifdef CONFIG_MMU +static int mem_cgroup_move_charge_write(struct cgroup *cgrp, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); + + if (val >= (1 << NR_MOVE_TYPE)) + return -EINVAL; + /* + * We check this value several times in both in can_attach() and + * attach(), so we need cgroup lock to prevent this value from being + * inconsistent. + */ + cgroup_lock(); + mem->move_charge_at_immigrate = val; + cgroup_unlock(); + + return 0; +} +#else +static int mem_cgroup_move_charge_write(struct cgroup *cgrp, + struct cftype *cft, u64 val) +{ + return -ENOSYS; +} +#endif + /* For read statistics */ enum { MCS_CACHE, MCS_RSS, - MCS_MAPPED_FILE, + MCS_FILE_MAPPED, MCS_PGPGIN, MCS_PGPGOUT, MCS_SWAP, @@ -2700,18 +3164,18 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data) s64 val; /* per cpu stat */ - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); s->stat[MCS_CACHE] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); s->stat[MCS_RSS] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); - s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); + s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT); s->stat[MCS_PGPGIN] += val; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT); s->stat[MCS_PGPGOUT] += val; if (do_swap_account) { - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); s->stat[MCS_SWAP] += val * PAGE_SIZE; } @@ -2839,12 +3303,249 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, return 0; } +static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) +{ + struct mem_cgroup_threshold_ary *t; + u64 usage; + int i; + + rcu_read_lock(); + if (!swap) + t = rcu_dereference(memcg->thresholds); + else + t = rcu_dereference(memcg->memsw_thresholds); + + if (!t) + goto unlock; + + usage = mem_cgroup_usage(memcg, swap); + + /* + * current_threshold points to threshold just below usage. + * If it's not true, a threshold was crossed after last + * call of __mem_cgroup_threshold(). + */ + i = atomic_read(&t->current_threshold); + + /* + * Iterate backward over array of thresholds starting from + * current_threshold and check if a threshold is crossed. + * If none of thresholds below usage is crossed, we read + * only one element of the array here. + */ + for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) + eventfd_signal(t->entries[i].eventfd, 1); + + /* i = current_threshold + 1 */ + i++; + + /* + * Iterate forward over array of thresholds starting from + * current_threshold+1 and check if a threshold is crossed. + * If none of thresholds above usage is crossed, we read + * only one element of the array here. + */ + for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) + eventfd_signal(t->entries[i].eventfd, 1); + + /* Update current_threshold */ + atomic_set(&t->current_threshold, i - 1); +unlock: + rcu_read_unlock(); +} + +static void mem_cgroup_threshold(struct mem_cgroup *memcg) +{ + __mem_cgroup_threshold(memcg, false); + if (do_swap_account) + __mem_cgroup_threshold(memcg, true); +} + +static int compare_thresholds(const void *a, const void *b) +{ + const struct mem_cgroup_threshold *_a = a; + const struct mem_cgroup_threshold *_b = b; + + return _a->threshold - _b->threshold; +} + +static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd, const char *args) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; + int type = MEMFILE_TYPE(cft->private); + u64 threshold, usage; + int size; + int i, ret; + + ret = res_counter_memparse_write_strategy(args, &threshold); + if (ret) + return ret; + + mutex_lock(&memcg->thresholds_lock); + if (type == _MEM) + thresholds = memcg->thresholds; + else if (type == _MEMSWAP) + thresholds = memcg->memsw_thresholds; + else + BUG(); + + usage = mem_cgroup_usage(memcg, type == _MEMSWAP); + + /* Check if a threshold crossed before adding a new one */ + if (thresholds) + __mem_cgroup_threshold(memcg, type == _MEMSWAP); + + if (thresholds) + size = thresholds->size + 1; + else + size = 1; + + /* Allocate memory for new array of thresholds */ + thresholds_new = kmalloc(sizeof(*thresholds_new) + + size * sizeof(struct mem_cgroup_threshold), + GFP_KERNEL); + if (!thresholds_new) { + ret = -ENOMEM; + goto unlock; + } + thresholds_new->size = size; + + /* Copy thresholds (if any) to new array */ + if (thresholds) + memcpy(thresholds_new->entries, thresholds->entries, + thresholds->size * + sizeof(struct mem_cgroup_threshold)); + /* Add new threshold */ + thresholds_new->entries[size - 1].eventfd = eventfd; + thresholds_new->entries[size - 1].threshold = threshold; + + /* Sort thresholds. Registering of new threshold isn't time-critical */ + sort(thresholds_new->entries, size, + sizeof(struct mem_cgroup_threshold), + compare_thresholds, NULL); + + /* Find current threshold */ + atomic_set(&thresholds_new->current_threshold, -1); + for (i = 0; i < size; i++) { + if (thresholds_new->entries[i].threshold < usage) { + /* + * thresholds_new->current_threshold will not be used + * until rcu_assign_pointer(), so it's safe to increment + * it here. + */ + atomic_inc(&thresholds_new->current_threshold); + } + } + + if (type == _MEM) + rcu_assign_pointer(memcg->thresholds, thresholds_new); + else + rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); + + /* To be sure that nobody uses thresholds before freeing it */ + synchronize_rcu(); + + kfree(thresholds); +unlock: + mutex_unlock(&memcg->thresholds_lock); + + return ret; +} + +static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; + int type = MEMFILE_TYPE(cft->private); + u64 usage; + int size = 0; + int i, j, ret; + + mutex_lock(&memcg->thresholds_lock); + if (type == _MEM) + thresholds = memcg->thresholds; + else if (type == _MEMSWAP) + thresholds = memcg->memsw_thresholds; + else + BUG(); + + /* + * Something went wrong if we trying to unregister a threshold + * if we don't have thresholds + */ + BUG_ON(!thresholds); + + usage = mem_cgroup_usage(memcg, type == _MEMSWAP); + + /* Check if a threshold crossed before removing */ + __mem_cgroup_threshold(memcg, type == _MEMSWAP); + + /* Calculate new number of threshold */ + for (i = 0; i < thresholds->size; i++) { + if (thresholds->entries[i].eventfd != eventfd) + size++; + } + + /* Set thresholds array to NULL if we don't have thresholds */ + if (!size) { + thresholds_new = NULL; + goto assign; + } + + /* Allocate memory for new array of thresholds */ + thresholds_new = kmalloc(sizeof(*thresholds_new) + + size * sizeof(struct mem_cgroup_threshold), + GFP_KERNEL); + if (!thresholds_new) { + ret = -ENOMEM; + goto unlock; + } + thresholds_new->size = size; + + /* Copy thresholds and find current threshold */ + atomic_set(&thresholds_new->current_threshold, -1); + for (i = 0, j = 0; i < thresholds->size; i++) { + if (thresholds->entries[i].eventfd == eventfd) + continue; + + thresholds_new->entries[j] = thresholds->entries[i]; + if (thresholds_new->entries[j].threshold < usage) { + /* + * thresholds_new->current_threshold will not be used + * until rcu_assign_pointer(), so it's safe to increment + * it here. + */ + atomic_inc(&thresholds_new->current_threshold); + } + j++; + } + +assign: + if (type == _MEM) + rcu_assign_pointer(memcg->thresholds, thresholds_new); + else + rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); + + /* To be sure that nobody uses thresholds before freeing it */ + synchronize_rcu(); + + kfree(thresholds); +unlock: + mutex_unlock(&memcg->thresholds_lock); + + return ret; +} static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), .read_u64 = mem_cgroup_read, + .register_event = mem_cgroup_register_event, + .unregister_event = mem_cgroup_unregister_event, }, { .name = "max_usage_in_bytes", @@ -2888,6 +3589,11 @@ static struct cftype mem_cgroup_files[] = { .read_u64 = mem_cgroup_swappiness_read, .write_u64 = mem_cgroup_swappiness_write, }, + { + .name = "move_charge_at_immigrate", + .read_u64 = mem_cgroup_move_charge_read, + .write_u64 = mem_cgroup_move_charge_write, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -2896,6 +3602,8 @@ static struct cftype memsw_cgroup_files[] = { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), .read_u64 = mem_cgroup_read, + .register_event = mem_cgroup_register_event, + .unregister_event = mem_cgroup_unregister_event, }, { .name = "memsw.max_usage_in_bytes", @@ -2970,24 +3678,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) kfree(mem->info.nodeinfo[node]); } -static int mem_cgroup_size(void) -{ - int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu); - return sizeof(struct mem_cgroup) + cpustat_size; -} - static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *mem; - int size = mem_cgroup_size(); + int size = sizeof(struct mem_cgroup); + /* Can be very big if MAX_NUMNODES is very big */ if (size < PAGE_SIZE) mem = kmalloc(size, GFP_KERNEL); else mem = vmalloc(size); - if (mem) - memset(mem, 0, size); + if (!mem) + return NULL; + + memset(mem, 0, size); + mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); + if (!mem->stat) { + if (size < PAGE_SIZE) + kfree(mem); + else + vfree(mem); + mem = NULL; + } return mem; } @@ -3012,7 +3725,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem) for_each_node_state(node, N_POSSIBLE) free_mem_cgroup_per_zone_info(mem, node); - if (mem_cgroup_size() < PAGE_SIZE) + free_percpu(mem->stat); + if (sizeof(struct mem_cgroup) < PAGE_SIZE) kfree(mem); else vfree(mem); @@ -3023,9 +3737,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem) atomic_inc(&mem->refcnt); } -static void mem_cgroup_put(struct mem_cgroup *mem) +static void __mem_cgroup_put(struct mem_cgroup *mem, int count) { - if (atomic_dec_and_test(&mem->refcnt)) { + if (atomic_sub_and_test(count, &mem->refcnt)) { struct mem_cgroup *parent = parent_mem_cgroup(mem); __mem_cgroup_free(mem); if (parent) @@ -3033,6 +3747,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem) } } +static void mem_cgroup_put(struct mem_cgroup *mem) +{ + __mem_cgroup_put(mem, 1); +} + /* * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. */ @@ -3097,12 +3816,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) /* root ? */ if (cont->parent == NULL) { + int cpu; enable_swap_cgroup(); parent = NULL; root_mem_cgroup = mem; if (mem_cgroup_soft_limit_tree_init()) goto free_out; - + for_each_possible_cpu(cpu) { + struct memcg_stock_pcp *stock = + &per_cpu(memcg_stock, cpu); + INIT_WORK(&stock->work, drain_local_stock); + } + hotcpu_notifier(memcg_stock_cpu_callback, 0); } else { parent = mem_cgroup_from_cont(cont->parent); mem->use_hierarchy = parent->use_hierarchy; @@ -3128,6 +3853,8 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (parent) mem->swappiness = get_swappiness(parent); atomic_set(&mem->refcnt, 1); + mem->move_charge_at_immigrate = 0; + mutex_init(&mem->thresholds_lock); return &mem->css; free_out: __mem_cgroup_free(mem); @@ -3164,19 +3891,445 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss, return ret; } +#ifdef CONFIG_MMU +/* Handlers for move charge at task migration. */ +#define PRECHARGE_COUNT_AT_ONCE 256 +static int mem_cgroup_do_precharge(unsigned long count) +{ + int ret = 0; + int batch_count = PRECHARGE_COUNT_AT_ONCE; + struct mem_cgroup *mem = mc.to; + + if (mem_cgroup_is_root(mem)) { + mc.precharge += count; + /* we don't need css_get for root */ + return ret; + } + /* try to charge at once */ + if (count > 1) { + struct res_counter *dummy; + /* + * "mem" cannot be under rmdir() because we've already checked + * by cgroup_lock_live_cgroup() that it is not removed and we + * are still under the same cgroup_mutex. So we can postpone + * css_get(). + */ + if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) + goto one_by_one; + if (do_swap_account && res_counter_charge(&mem->memsw, + PAGE_SIZE * count, &dummy)) { + res_counter_uncharge(&mem->res, PAGE_SIZE * count); + goto one_by_one; + } + mc.precharge += count; + VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags)); + WARN_ON_ONCE(count > INT_MAX); + __css_get(&mem->css, (int)count); + return ret; + } +one_by_one: + /* fall back to one by one charge */ + while (count--) { + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (!batch_count--) { + batch_count = PRECHARGE_COUNT_AT_ONCE; + cond_resched(); + } + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); + if (ret || !mem) + /* mem_cgroup_clear_mc() will do uncharge later */ + return -ENOMEM; + mc.precharge++; + } + return ret; +} + +/** + * is_target_pte_for_mc - check a pte whether it is valid for move charge + * @vma: the vma the pte to be checked belongs + * @addr: the address corresponding to the pte to be checked + * @ptent: the pte to be checked + * @target: the pointer the target page or swap ent will be stored(can be NULL) + * + * Returns + * 0(MC_TARGET_NONE): if the pte is not a target for move charge. + * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for + * move charge. if @target is not NULL, the page is stored in target->page + * with extra refcnt got(Callers should handle it). + * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a + * target for charge migration. if @target is not NULL, the entry is stored + * in target->ent. + * + * Called with pte lock held. + */ +union mc_target { + struct page *page; + swp_entry_t ent; +}; + +enum mc_target_type { + MC_TARGET_NONE, /* not used */ + MC_TARGET_PAGE, + MC_TARGET_SWAP, +}; + +static int is_target_pte_for_mc(struct vm_area_struct *vma, + unsigned long addr, pte_t ptent, union mc_target *target) +{ + struct page *page = NULL; + struct page_cgroup *pc; + int ret = 0; + swp_entry_t ent = { .val = 0 }; + int usage_count = 0; + bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON, + &mc.to->move_charge_at_immigrate); + + if (!pte_present(ptent)) { + /* TODO: handle swap of shmes/tmpfs */ + if (pte_none(ptent) || pte_file(ptent)) + return 0; + else if (is_swap_pte(ptent)) { + ent = pte_to_swp_entry(ptent); + if (!move_anon || non_swap_entry(ent)) + return 0; + usage_count = mem_cgroup_count_swap_user(ent, &page); + } + } else { + page = vm_normal_page(vma, addr, ptent); + if (!page || !page_mapped(page)) + return 0; + /* + * TODO: We don't move charges of file(including shmem/tmpfs) + * pages for now. + */ + if (!move_anon || !PageAnon(page)) + return 0; + if (!get_page_unless_zero(page)) + return 0; + usage_count = page_mapcount(page); + } + if (usage_count > 1) { + /* + * TODO: We don't move charges of shared(used by multiple + * processes) pages for now. + */ + if (page) + put_page(page); + return 0; + } + if (page) { + pc = lookup_page_cgroup(page); + /* + * Do only loose check w/o page_cgroup lock. + * mem_cgroup_move_account() checks the pc is valid or not under + * the lock. + */ + if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + ret = MC_TARGET_PAGE; + if (target) + target->page = page; + } + if (!ret || !target) + put_page(page); + } + /* throught */ + if (ent.val && do_swap_account && !ret && + css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { + ret = MC_TARGET_SWAP; + if (target) + target->ent = ent; + } + return ret; +} + +static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->private; + pte_t *pte; + spinlock_t *ptl; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) + if (is_target_pte_for_mc(vma, addr, *pte, NULL)) + mc.precharge++; /* increment precharge temporarily */ + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + + return 0; +} + +static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) +{ + unsigned long precharge; + struct vm_area_struct *vma; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + struct mm_walk mem_cgroup_count_precharge_walk = { + .pmd_entry = mem_cgroup_count_precharge_pte_range, + .mm = mm, + .private = vma, + }; + if (is_vm_hugetlb_page(vma)) + continue; + /* TODO: We don't move charges of shmem/tmpfs pages for now. */ + if (vma->vm_flags & VM_SHARED) + continue; + walk_page_range(vma->vm_start, vma->vm_end, + &mem_cgroup_count_precharge_walk); + } + up_read(&mm->mmap_sem); + + precharge = mc.precharge; + mc.precharge = 0; + + return precharge; +} + +static int mem_cgroup_precharge_mc(struct mm_struct *mm) +{ + return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm)); +} + +static void mem_cgroup_clear_mc(void) +{ + /* we must uncharge all the leftover precharges from mc.to */ + if (mc.precharge) { + __mem_cgroup_cancel_charge(mc.to, mc.precharge); + mc.precharge = 0; + } + /* + * we didn't uncharge from mc.from at mem_cgroup_move_account(), so + * we must uncharge here. + */ + if (mc.moved_charge) { + __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); + mc.moved_charge = 0; + } + /* we must fixup refcnts and charges */ + if (mc.moved_swap) { + WARN_ON_ONCE(mc.moved_swap > INT_MAX); + /* uncharge swap account from the old cgroup */ + if (!mem_cgroup_is_root(mc.from)) + res_counter_uncharge(&mc.from->memsw, + PAGE_SIZE * mc.moved_swap); + __mem_cgroup_put(mc.from, mc.moved_swap); + + if (!mem_cgroup_is_root(mc.to)) { + /* + * we charged both to->res and to->memsw, so we should + * uncharge to->res. + */ + res_counter_uncharge(&mc.to->res, + PAGE_SIZE * mc.moved_swap); + VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags)); + __css_put(&mc.to->css, mc.moved_swap); + } + /* we've already done mem_cgroup_get(mc.to) */ + + mc.moved_swap = 0; + } + mc.from = NULL; + mc.to = NULL; + mc.moving_task = NULL; + wake_up_all(&mc.waitq); +} + +static int mem_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + int ret = 0; + struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); + + if (mem->move_charge_at_immigrate) { + struct mm_struct *mm; + struct mem_cgroup *from = mem_cgroup_from_task(p); + + VM_BUG_ON(from == mem); + + mm = get_task_mm(p); + if (!mm) + return 0; + /* We move charges only when we move a owner of the mm */ + if (mm->owner == p) { + VM_BUG_ON(mc.from); + VM_BUG_ON(mc.to); + VM_BUG_ON(mc.precharge); + VM_BUG_ON(mc.moved_charge); + VM_BUG_ON(mc.moved_swap); + VM_BUG_ON(mc.moving_task); + mc.from = from; + mc.to = mem; + mc.precharge = 0; + mc.moved_charge = 0; + mc.moved_swap = 0; + mc.moving_task = current; + + ret = mem_cgroup_precharge_mc(mm); + if (ret) + mem_cgroup_clear_mc(); + } + mmput(mm); + } + return ret; +} + +static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + mem_cgroup_clear_mc(); +} + +static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + int ret = 0; + struct vm_area_struct *vma = walk->private; + pte_t *pte; + spinlock_t *ptl; + +retry: + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; addr += PAGE_SIZE) { + pte_t ptent = *(pte++); + union mc_target target; + int type; + struct page *page; + struct page_cgroup *pc; + swp_entry_t ent; + + if (!mc.precharge) + break; + + type = is_target_pte_for_mc(vma, addr, ptent, &target); + switch (type) { + case MC_TARGET_PAGE: + page = target.page; + if (isolate_lru_page(page)) + goto put; + pc = lookup_page_cgroup(page); + if (!mem_cgroup_move_account(pc, + mc.from, mc.to, false)) { + mc.precharge--; + /* we uncharge from mc.from later. */ + mc.moved_charge++; + } + putback_lru_page(page); +put: /* is_target_pte_for_mc() gets the page */ + put_page(page); + break; + case MC_TARGET_SWAP: + ent = target.ent; + if (!mem_cgroup_move_swap_account(ent, + mc.from, mc.to, false)) { + mc.precharge--; + /* we fixup refcnts and charges later. */ + mc.moved_swap++; + } + break; + default: + break; + } + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + + if (addr != end) { + /* + * We have consumed all precharges we got in can_attach(). + * We try charge one by one, but don't do any additional + * charges to mc.to if we have failed in charge once in attach() + * phase. + */ + ret = mem_cgroup_do_precharge(1); + if (!ret) + goto retry; + } + + return ret; +} + +static void mem_cgroup_move_charge(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + lru_add_drain_all(); + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + int ret; + struct mm_walk mem_cgroup_move_charge_walk = { + .pmd_entry = mem_cgroup_move_charge_pte_range, + .mm = mm, + .private = vma, + }; + if (is_vm_hugetlb_page(vma)) + continue; + /* TODO: We don't move charges of shmem/tmpfs pages for now. */ + if (vma->vm_flags & VM_SHARED) + continue; + ret = walk_page_range(vma->vm_start, vma->vm_end, + &mem_cgroup_move_charge_walk); + if (ret) + /* + * means we have consumed all precharges and failed in + * doing additional charge. Just abandon here. + */ + break; + } + up_read(&mm->mmap_sem); +} + static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *cont, struct cgroup *old_cont, struct task_struct *p, bool threadgroup) { - mutex_lock(&memcg_tasklist); - /* - * FIXME: It's better to move charges of this process from old - * memcg to new memcg. But it's just on TODO-List now. - */ - mutex_unlock(&memcg_tasklist); + struct mm_struct *mm; + + if (!mc.to) + /* no need to move charge */ + return; + + mm = get_task_mm(p); + if (mm) { + mem_cgroup_move_charge(mm); + mmput(mm); + } + mem_cgroup_clear_mc(); } +#else /* !CONFIG_MMU */ +static int mem_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + return 0; +} +static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ +} +static void mem_cgroup_move_task(struct cgroup_subsys *ss, + struct cgroup *cont, + struct cgroup *old_cont, + struct task_struct *p, + bool threadgroup) +{ +} +#endif struct cgroup_subsys mem_cgroup_subsys = { .name = "memory", @@ -3185,6 +4338,8 @@ struct cgroup_subsys mem_cgroup_subsys = { .pre_destroy = mem_cgroup_pre_destroy, .destroy = mem_cgroup_destroy, .populate = mem_cgroup_populate, + .can_attach = mem_cgroup_can_attach, + .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, .early_init = 0, .use_id = 1,