#include <linux/backing-dev.h>
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/limits.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/swap.h>
#define MEM_CGROUP_RECLAIM_RETRIES 5
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
-/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
int do_swap_account __read_mostly;
static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#else
* For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
return ret;
}
+static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
+{
+ s64 ret;
+
+ ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
+ ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
+ return ret;
+}
+
/*
* per-zone information in memory controller.
*/
/*
* While reclaiming in a hiearchy, we cache the last child we
- * reclaimed from. Protected by hierarchy_mutex
+ * reclaimed from.
*/
- struct mem_cgroup *last_scanned_child;
+ int last_scanned_child;
/*
* Should the accounting and control be hierarchical, per subtree?
*/
unsigned int swappiness;
+ /* set when res.limit == memsw.limit */
+ bool memsw_is_minimum;
+
/*
* statistics. This must be placed at the end of memcg.
*/
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+ MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
NR_CHARGE_TYPE,
};
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
return mem_cgroup_zoneinfo(mem, nid, zid);
}
-static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
+static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
enum lru_list idx)
{
int nid, zid;
static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
struct mem_cgroup *mem = NULL;
+
+ if (!mm)
+ return NULL;
/*
* Because we have no locks, mm->owner's may be being moved to other
* cgroup. We use css_tryget() here even if this looks
return mem;
}
-static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
+/*
+ * Call callback function against all cgroup under hierarchy tree.
+ */
+static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
+ int (*func)(struct mem_cgroup *, void *))
{
- if (!mem)
- return true;
- return css_is_removed(&mem->css);
+ int found, ret, nextid;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *mem;
+
+ if (!root->use_hierarchy)
+ return (*func)(root, data);
+
+ nextid = 1;
+ do {
+ ret = 0;
+ mem = NULL;
+
+ rcu_read_lock();
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
+ &found);
+ if (css && css_tryget(css))
+ mem = container_of(css, struct mem_cgroup, css);
+ rcu_read_unlock();
+
+ if (mem) {
+ ret = (*func)(mem, data);
+ css_put(&mem->css);
+ }
+ nextid = found + 1;
+ } while (!ret && css);
+
+ return ret;
}
/*
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
int ret;
+ struct mem_cgroup *curr = NULL;
task_lock(task);
- ret = task->mm && mm_match_cgroup(task->mm, mem);
+ rcu_read_lock();
+ curr = try_get_mem_cgroup_from_mm(task->mm);
+ rcu_read_unlock();
task_unlock(task);
+ if (!curr)
+ return 0;
+ if (curr->use_hierarchy)
+ ret = css_is_ancestor(&curr->css, &mem->css);
+ else
+ ret = (curr == mem);
+ css_put(&curr->css);
return ret;
}
/*
- * Calculate mapped_ratio under memory controller. This will be used in
- * vmscan.c for deteremining we have to reclaim mapped pages.
- */
-int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
- long total, rss;
-
- /*
- * usage is recorded in bytes. But, here, we assume the number of
- * physical pages can be represented by "long" on any arch.
- */
- total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
- rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
- return (int)((rss * 100L) / total);
-}
-
-/*
* prev_priority control...this will be used in memory reclaim path.
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
unsigned long gb;
unsigned long inactive_ratio;
- inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
- active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+ inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
return 0;
}
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+ unsigned long active;
+ unsigned long inactive;
+
+ inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
+ active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
+
+ return (active > inactive);
+}
+
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
struct zone *zone,
enum lru_list lru)
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
int lru = LRU_FILE * !!file + !!active;
+ int ret;
BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
continue;
scan++;
- if (__isolate_lru_page(page, mode, file) == 0) {
+ ret = __isolate_lru_page(page, mode, file);
+ switch (ret) {
+ case 0:
list_move(&page->lru, dst);
+ mem_cgroup_del_lru(page);
nr_taken++;
+ break;
+ case -EBUSY:
+ /* we don't affect global LRU but rotate in our LRU */
+ mem_cgroup_rotate_lru_list(page, page_lru(page));
+ break;
+ default:
+ break;
}
}
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
-/*
- * This routine finds the DFS walk successor. This routine should be
- * called with hierarchy_mutex held
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+ if (do_swap_account) {
+ if (res_counter_check_under_limit(&mem->res) &&
+ res_counter_check_under_limit(&mem->memsw))
+ return true;
+ } else
+ if (res_counter_check_under_limit(&mem->res))
+ return true;
+ return false;
+}
+
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+ struct cgroup *cgrp = memcg->css.cgroup;
+ unsigned int swappiness;
+
+ /* root ? */
+ if (cgrp->parent == NULL)
+ return vm_swappiness;
+
+ spin_lock(&memcg->reclaim_param_lock);
+ swappiness = memcg->swappiness;
+ spin_unlock(&memcg->reclaim_param_lock);
+
+ return swappiness;
+}
+
+static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
+{
+ int *val = data;
+ (*val)++;
+ return 0;
+}
+
+/**
+ * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
+ * @memcg: The memory cgroup that went over limit
+ * @p: Task that is going to be killed
+ *
+ * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
+ * enabled
*/
-static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
- struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
+ struct cgroup *task_cgrp;
+ struct cgroup *mem_cgrp;
+ /*
+ * Need a buffer in BSS, can't rely on allocations. The code relies
+ * on the assumption that OOM is serialized for memory controller.
+ * If this assumption is broken, revisit this code.
+ */
+ static char memcg_name[PATH_MAX];
+ int ret;
+
+ if (!memcg)
+ return;
- curr_cgroup = curr->css.cgroup;
- root_cgroup = root_mem->css.cgroup;
- if (!list_empty(&curr_cgroup->children)) {
+ rcu_read_lock();
+
+ mem_cgrp = memcg->css.cgroup;
+ task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
+
+ ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
/*
- * Walk down to children
+ * Unfortunately, we are unable to convert to a useful name
+ * But we'll still print out the usage information
*/
- mem_cgroup_put(curr);
- cgroup = list_entry(curr_cgroup->children.next,
- struct cgroup, sibling);
- curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
+ rcu_read_unlock();
goto done;
}
+ rcu_read_unlock();
-visit_parent:
- if (curr_cgroup == root_cgroup) {
- mem_cgroup_put(curr);
- curr = root_mem;
- mem_cgroup_get(curr);
- goto done;
- }
+ printk(KERN_INFO "Task in %s killed", memcg_name);
- /*
- * Goto next sibling
- */
- if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
- mem_cgroup_put(curr);
- cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
- sibling);
- curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
+ rcu_read_lock();
+ ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
+ if (ret < 0) {
+ rcu_read_unlock();
goto done;
}
+ rcu_read_unlock();
/*
- * Go up to next parent and next parent's sibling if need be
+ * Continues from above, so we don't need an KERN_ level
*/
- curr_cgroup = curr_cgroup->parent;
- goto visit_parent;
-
+ printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
done:
- root_mem->last_scanned_child = curr;
- return curr;
+
+ printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+ res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->res, RES_FAILCNT));
+ printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
+ "failcnt %llu\n",
+ res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
+}
+
+/*
+ * This function returns the number of memcg under hierarchy tree. Returns
+ * 1(self count) if no children.
+ */
+static int mem_cgroup_count_children(struct mem_cgroup *mem)
+{
+ int num = 0;
+ mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+ return num;
}
/*
* that to reclaim free pages from.
*/
static struct mem_cgroup *
-mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+mem_cgroup_select_victim(struct mem_cgroup *root_mem)
{
- struct cgroup *cgroup;
- struct mem_cgroup *ret;
- bool obsolete;
-
- obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
+ struct mem_cgroup *ret = NULL;
+ struct cgroup_subsys_state *css;
+ int nextid, found;
- /*
- * Scan all children under the mem_cgroup mem
- */
- mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
- if (list_empty(&root_mem->css.cgroup->children)) {
+ if (!root_mem->use_hierarchy) {
+ css_get(&root_mem->css);
ret = root_mem;
- goto done;
}
- if (!root_mem->last_scanned_child || obsolete) {
-
- if (obsolete && root_mem->last_scanned_child)
- mem_cgroup_put(root_mem->last_scanned_child);
-
- cgroup = list_first_entry(&root_mem->css.cgroup->children,
- struct cgroup, sibling);
- ret = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(ret);
- } else
- ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
- root_mem);
+ while (!ret) {
+ rcu_read_lock();
+ nextid = root_mem->last_scanned_child + 1;
+ css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
+ &found);
+ if (css && css_tryget(css))
+ ret = container_of(css, struct mem_cgroup, css);
+
+ rcu_read_unlock();
+ /* Updates scanning parameter */
+ spin_lock(&root_mem->reclaim_param_lock);
+ if (!css) {
+ /* this means start scan from ID:1 */
+ root_mem->last_scanned_child = 0;
+ } else
+ root_mem->last_scanned_child = found;
+ spin_unlock(&root_mem->reclaim_param_lock);
+ }
-done:
- root_mem->last_scanned_child = ret;
- mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
return ret;
}
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
-{
- if (do_swap_account) {
- if (res_counter_check_under_limit(&mem->res) &&
- res_counter_check_under_limit(&mem->memsw))
- return true;
- } else
- if (res_counter_check_under_limit(&mem->res))
- return true;
- return false;
-}
-
-static unsigned int get_swappiness(struct mem_cgroup *memcg)
-{
- struct cgroup *cgrp = memcg->css.cgroup;
- unsigned int swappiness;
-
- /* root ? */
- if (cgrp->parent == NULL)
- return vm_swappiness;
-
- spin_lock(&memcg->reclaim_param_lock);
- swappiness = memcg->swappiness;
- spin_unlock(&memcg->reclaim_param_lock);
-
- return swappiness;
-}
-
/*
- * Dance down the hierarchy if needed to reclaim memory. We remember the
- * last child we reclaimed from, so that we don't end up penalizing
- * one child extensively based on its position in the children list.
+ * Scan the hierarchy if needed to reclaim memory. We remember the last child
+ * we reclaimed from, so that we don't end up penalizing one child extensively
+ * based on its position in the children list.
*
* root_mem is the original ancestor that we've been reclaim from.
+ *
+ * We give up and return to the caller when we visit root_mem twice.
+ * (other groups can be removed while we're walking....)
+ *
+ * If shrink==true, for avoiding to free too much, this returns immedieately.
*/
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
- gfp_t gfp_mask, bool noswap)
-{
- struct mem_cgroup *next_mem;
- int ret = 0;
-
- /*
- * Reclaim unconditionally and don't check for return value.
- * We need to reclaim in the current group and down the tree.
- * One might think about checking for children before reclaiming,
- * but there might be left over accounting, even after children
- * have left.
- */
- ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
- get_swappiness(root_mem));
- if (mem_cgroup_check_under_limit(root_mem))
- return 0;
- if (!root_mem->use_hierarchy)
- return ret;
-
- next_mem = mem_cgroup_get_first_node(root_mem);
-
- while (next_mem != root_mem) {
- if (mem_cgroup_is_obsolete(next_mem)) {
- mem_cgroup_put(next_mem);
- next_mem = mem_cgroup_get_first_node(root_mem);
+ gfp_t gfp_mask, bool noswap, bool shrink)
+{
+ struct mem_cgroup *victim;
+ int ret, total = 0;
+ int loop = 0;
+
+ /* If memsw_is_minimum==1, swap-out is of-no-use. */
+ if (root_mem->memsw_is_minimum)
+ noswap = true;
+
+ while (loop < 2) {
+ victim = mem_cgroup_select_victim(root_mem);
+ if (victim == root_mem)
+ loop++;
+ if (!mem_cgroup_local_usage(&victim->stat)) {
+ /* this cgroup's local usage == 0 */
+ css_put(&victim->css);
continue;
}
- ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
- get_swappiness(next_mem));
+ /* we use swappiness of local cgroup */
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
+ get_swappiness(victim));
+ css_put(&victim->css);
+ /*
+ * At shrinking usage, we can't check we should stop here or
+ * reclaim more. It's depends on callers. last_scanned_child
+ * will work enough for keeping fairness under tree.
+ */
+ if (shrink)
+ return ret;
+ total += ret;
if (mem_cgroup_check_under_limit(root_mem))
- return 0;
- mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
- next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
- mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+ return 1 + total;
}
- return ret;
+ return total;
}
bool mem_cgroup_oom_called(struct task_struct *task)
rcu_read_unlock();
return ret;
}
+
+static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
+{
+ mem->last_oom_jiffies = jiffies;
+ return 0;
+}
+
+static void record_last_oom(struct mem_cgroup *mem)
+{
+ mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
+}
+
+/*
+ * Currently used to update mapped file statistics, but the routine can be
+ * generalized to update other statistics as well.
+ */
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+{
+ struct mem_cgroup *mem;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
+ int cpu;
+ struct page_cgroup *pc;
+
+ if (!page_is_file_cache(page))
+ return;
+
+ pc = lookup_page_cgroup(page);
+ if (unlikely(!pc))
+ return;
+
+ lock_page_cgroup(pc);
+ mem = pc->mem_cgroup;
+ if (!mem)
+ goto done;
+
+ if (!PageCgroupUsed(pc))
+ goto done;
+
+ /*
+ * Preemption is already disabled, we don't need get_cpu()
+ */
+ cpu = smp_processor_id();
+ stat = &mem->stat;
+ cpustat = &stat->cpustat[cpu];
+
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+done:
+ unlock_page_cgroup(pc);
+}
+
/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
if (unlikely(!mem))
return 0;
- VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+ VM_BUG_ON(css_is_removed(&mem->css));
while (1) {
int ret;
goto nomem;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
- noswap);
+ noswap, false);
+ if (ret)
+ continue;
/*
* try_to_free_mem_cgroup_pages() might not give us a full
mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
mutex_unlock(&memcg_tasklist);
- mem_over_limit->last_oom_jiffies = jiffies;
+ record_last_oom(mem_over_limit);
}
goto nomem;
}
return -ENOMEM;
}
+
+/*
+ * A helper function to get mem_cgroup from ID. must be called under
+ * rcu_read_lock(). The caller must check css_is_removed() or some if
+ * it's concern. (dropping refcnt from swap can be called against removed
+ * memcg.)
+ */
+static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
+{
+ struct cgroup_subsys_state *css;
+
+ /* ID 0 is unused ID */
+ if (!id)
+ return NULL;
+ css = css_lookup(&mem_cgroup_subsys, id);
+ if (!css)
+ return NULL;
+ return container_of(css, struct mem_cgroup, css);
+}
+
static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
{
struct mem_cgroup *mem;
+ struct page_cgroup *pc;
+ unsigned short id;
swp_entry_t ent;
+ VM_BUG_ON(!PageLocked(page));
+
if (!PageSwapCache(page))
return NULL;
- ent.val = page_private(page);
- mem = lookup_swap_cgroup(ent);
- if (!mem)
- return NULL;
- if (!css_tryget(&mem->css))
- return NULL;
+ pc = lookup_page_cgroup(page);
+ lock_page_cgroup(pc);
+ if (PageCgroupUsed(pc)) {
+ mem = pc->mem_cgroup;
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ } else {
+ ent.val = page_private(page);
+ id = lookup_swap_cgroup(ent);
+ rcu_read_lock();
+ mem = mem_cgroup_lookup(id);
+ if (mem && !css_tryget(&mem->css))
+ mem = NULL;
+ rcu_read_unlock();
+ }
+ unlock_page_cgroup(pc);
return mem;
}
struct mem_cgroup_per_zone *from_mz, *to_mz;
int nid, zid;
int ret = -EBUSY;
+ struct page *page;
+ int cpu;
+ struct mem_cgroup_stat *stat;
+ struct mem_cgroup_stat_cpu *cpustat;
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
+
+ page = pc->page;
+ if (page_is_file_cache(page) && page_mapped(page)) {
+ cpu = smp_processor_id();
+ /* Update mapped_file data for mem_cgroup "from" */
+ stat = &from->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ -1);
+
+ /* Update mapped_file data for mem_cgroup "to" */
+ stat = &to->stat;
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ 1);
+ }
+
if (do_swap_account)
res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);
ret = 0;
out:
unlock_page_cgroup(pc);
+ /*
+ * We charges against "to" which may not have any tasks. Then, "to"
+ * can be under rmdir(). But in current implementation, caller of
+ * this function is just force_empty() and it's garanteed that
+ * "to" is never removed. So, we don't check rmdir status here.
+ */
return ret;
}
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
}
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype);
+
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
unlock_page_cgroup(pc);
}
- if (do_swap_account && PageSwapCache(page)) {
- mem = try_get_mem_cgroup_from_swapcache(page);
- if (mem)
- mm = NULL;
- else
- mem = NULL;
- /* SwapCache may be still linked to LRU now. */
- mem_cgroup_lru_del_before_commit_swapcache(page);
- }
-
if (unlikely(!mm && !mem))
mm = &init_mm;
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
- ret = mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (mem)
- css_put(&mem->css);
- if (PageSwapCache(page))
- mem_cgroup_lru_add_after_commit_swapcache(page);
+ /* shmem */
+ if (PageSwapCache(page)) {
+ ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+ if (!ret)
+ __mem_cgroup_commit_charge_swapin(page, mem,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM);
+ } else
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
- if (do_swap_account && !ret && PageSwapCache(page)) {
- swp_entry_t ent = {.val = page_private(page)};
- /* avoid double counting */
- mem = swap_cgroup_record(ent, NULL);
- if (mem) {
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- mem_cgroup_put(mem);
- }
- }
return ret;
}
return __mem_cgroup_try_charge(mm, mask, ptr, true);
}
-void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+static void
+__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
+ enum charge_type ctype)
{
struct page_cgroup *pc;
return;
if (!ptr)
return;
+ cgroup_exclude_rmdir(&ptr->css);
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page);
- __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ __mem_cgroup_commit_charge(ptr, pc, ctype);
mem_cgroup_lru_add_after_commit_swapcache(page);
/*
* Now swap is on-memory. This means this page may be
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
+ unsigned short id;
struct mem_cgroup *memcg;
- memcg = swap_cgroup_record(ent, NULL);
+
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * This recorded memcg can be obsolete one. So, avoid
+ * calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
-
+ rcu_read_unlock();
}
- /* add this page(page_cgroup) to the LRU we want. */
+ /*
+ * At swapin, we may charge account against cgroup which has no tasks.
+ * So, rmdir()->pre_destroy() can be called while we do this charge.
+ * In that case, we need to call pre_destroy() again. check it here.
+ */
+ cgroup_release_and_wakeup_rmdir(&ptr->css);
+}
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+ __mem_cgroup_commit_charge_swapin(page, ptr,
+ MEM_CGROUP_CHARGE_TYPE_MAPPED);
}
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ case MEM_CGROUP_CHARGE_TYPE_DROP:
if (page_mapped(page))
goto unlock_out;
break;
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-
mem_cgroup_charge_statistics(mem, pc, false);
+
ClearPageCgroupUsed(pc);
/*
* pc->mem_cgroup is not cleared here. It will be accessed when it's
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}
+#ifdef CONFIG_SWAP
/*
- * called from __delete_from_swap_cache() and drop "page" account.
+ * called after __delete_from_swap_cache() and drop "page" account.
* memcg information is recorded to swap_cgroup of "ent"
*/
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
{
struct mem_cgroup *memcg;
+ int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+ if (!swapout) /* this was a swap cache but the swap is unused ! */
+ ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+ memcg = __mem_cgroup_uncharge_common(page, ctype);
- memcg = __mem_cgroup_uncharge_common(page,
- MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
- if (do_swap_account && memcg) {
- swap_cgroup_record(ent, memcg);
+ if (do_swap_account && swapout && memcg) {
+ swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
- if (memcg)
+ if (swapout && memcg)
css_put(&memcg->css);
}
+#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/*
void mem_cgroup_uncharge_swap(swp_entry_t ent)
{
struct mem_cgroup *memcg;
+ unsigned short id;
if (!do_swap_account)
return;
- memcg = swap_cgroup_record(ent, NULL);
+ id = swap_cgroup_record(ent, 0);
+ rcu_read_lock();
+ memcg = mem_cgroup_lookup(id);
if (memcg) {
+ /*
+ * We uncharge this because swap is freed.
+ * This memcg can be obsolete one. We avoid calling css_tryget
+ */
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_put(memcg);
}
+ rcu_read_unlock();
}
#endif
if (!mem)
return;
-
+ cgroup_exclude_rmdir(&mem->css);
/* at migration success, oldpage->mapping is NULL. */
if (oldpage->mapping) {
target = oldpage;
*/
if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
mem_cgroup_uncharge_page(target);
+ /*
+ * At migration, we may charge account against cgroup which has no tasks
+ * So, rmdir()->pre_destroy() can be called while we do this charge.
+ * In that case, we need to call pre_destroy() again. check it here.
+ */
+ cgroup_release_and_wakeup_rmdir(&mem->css);
}
/*
- * A call to try to shrink memory usage under specified resource controller.
- * This is typically used for page reclaiming for shmem for reducing side
- * effect of page allocation from shmem, which is used by some mem_cgroup.
+ * A call to try to shrink memory usage on charge failure at shmem's swapin.
+ * Calling hierarchical_reclaim is not enough because we should update
+ * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
+ * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
+ * not from the memcg which this page would be charged to.
+ * try_charge_swapin does all of these works properly.
*/
-int mem_cgroup_shrink_usage(struct page *page,
+int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm,
gfp_t gfp_mask)
{
struct mem_cgroup *mem = NULL;
- int progress = 0;
- int retry = MEM_CGROUP_RECLAIM_RETRIES;
+ int ret;
if (mem_cgroup_disabled())
return 0;
- if (page)
- mem = try_get_mem_cgroup_from_swapcache(page);
- if (!mem && mm)
- mem = try_get_mem_cgroup_from_mm(mm);
- if (unlikely(!mem))
- return 0;
- do {
- progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
- progress += mem_cgroup_check_under_limit(mem);
- } while (!progress && --retry);
+ ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
+ if (!ret)
+ mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
- css_put(&mem->css);
- if (!retry)
- return -ENOMEM;
- return 0;
+ return ret;
}
static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
-
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
int progress;
u64 memswlimit;
int ret = 0;
+ int children = mem_cgroup_count_children(memcg);
+ u64 curusage, oldusage;
+
+ /*
+ * For keeping hierarchical_reclaim simple, how long we should retry
+ * is depends on callers. We set our retry-count to be function
+ * of # of children which we should visit in this loop.
+ */
+ retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
+
+ oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
break;
}
ret = res_counter_set_limit(&memcg->res, val);
+ if (!ret) {
+ if (memswlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
- false);
- if (!progress) retry_count--;
+ false, true);
+ curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
+ /* Usage is reduced ? */
+ if (curusage >= oldusage)
+ retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
}
-int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
- unsigned long long val)
+static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
{
- int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ int retry_count;
u64 memlimit, oldusage, curusage;
- int ret;
-
- if (!do_swap_account)
- return -EINVAL;
+ int children = mem_cgroup_count_children(memcg);
+ int ret = -EBUSY;
+ /* see mem_cgroup_resize_res_limit */
+ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
+ oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
ret = res_counter_set_limit(&memcg->memsw, val);
+ if (!ret) {
+ if (memlimit == val)
+ memcg->memsw_is_minimum = true;
+ else
+ memcg->memsw_is_minimum = false;
+ }
mutex_unlock(&set_limit_mutex);
if (!ret)
break;
- oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ /* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
+ else
+ oldusage = curusage;
}
return ret;
}
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
ret = 0;
- for_each_node_state(node, N_POSSIBLE) {
+ for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
enum lru_list l;
for_each_lru(l) {
if (!progress) {
nr_retries--;
/* maybe some writeback is necessary */
- congestion_wait(WRITE, HZ/10);
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
}
}
val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- if (do_swap_account)
- val = res_counter_read_u64(&mem->memsw, name);
+ val = res_counter_read_u64(&mem->memsw, name);
break;
default:
BUG();
return 0;
}
-static const struct mem_cgroup_stat_desc {
- const char *msg;
- u64 unit;
-} mem_cgroup_stat_desc[] = {
- [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
- [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
- [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
- [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
+
+/* For read statistics */
+enum {
+ MCS_CACHE,
+ MCS_RSS,
+ MCS_MAPPED_FILE,
+ MCS_PGPGIN,
+ MCS_PGPGOUT,
+ MCS_INACTIVE_ANON,
+ MCS_ACTIVE_ANON,
+ MCS_INACTIVE_FILE,
+ MCS_ACTIVE_FILE,
+ MCS_UNEVICTABLE,
+ NR_MCS_STAT,
};
+struct mcs_total_stat {
+ s64 stat[NR_MCS_STAT];
+};
+
+struct {
+ char *local_name;
+ char *total_name;
+} memcg_stat_strings[NR_MCS_STAT] = {
+ {"cache", "total_cache"},
+ {"rss", "total_rss"},
+ {"mapped_file", "total_mapped_file"},
+ {"pgpgin", "total_pgpgin"},
+ {"pgpgout", "total_pgpgout"},
+ {"inactive_anon", "total_inactive_anon"},
+ {"active_anon", "total_active_anon"},
+ {"inactive_file", "total_inactive_file"},
+ {"active_file", "total_active_file"},
+ {"unevictable", "total_unevictable"}
+};
+
+
+static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+{
+ struct mcs_total_stat *s = data;
+ s64 val;
+
+ /* per cpu stat */
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+ s->stat[MCS_CACHE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+ s->stat[MCS_RSS] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+ s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
+ s->stat[MCS_PGPGIN] += val;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+ s->stat[MCS_PGPGOUT] += val;
+
+ /* per zone stat */
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
+ s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
+ s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
+ s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
+ s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
+ s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
+ return 0;
+}
+
+static void
+mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
+{
+ mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+}
+
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
- struct mem_cgroup_stat *stat = &mem_cont->stat;
+ struct mcs_total_stat mystat;
int i;
- for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
- s64 val;
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_local_stat(mem_cont, &mystat);
- val = mem_cgroup_read_stat(stat, i);
- val *= mem_cgroup_stat_desc[i].unit;
- cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
- }
- /* showing # of active pages */
- {
- unsigned long active_anon, inactive_anon;
- unsigned long active_file, inactive_file;
- unsigned long unevictable;
-
- inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_ANON);
- active_anon = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_ANON);
- inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_INACTIVE_FILE);
- active_file = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_ACTIVE_FILE);
- unevictable = mem_cgroup_get_all_zonestat(mem_cont,
- LRU_UNEVICTABLE);
-
- cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
- cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
- cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
- cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
- cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
- }
+ /* Hierarchical information */
{
unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
}
+ memset(&mystat, 0, sizeof(mystat));
+ mem_cgroup_get_total_stat(mem_cont, &mystat);
+ for (i = 0; i < NR_MCS_STAT; i++)
+ cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+
+
#ifdef CONFIG_DEBUG_VM
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup *parent;
+
if (val > 100)
return -EINVAL;
return -EINVAL;
parent = mem_cgroup_from_cont(cgrp->parent);
+
+ cgroup_lock();
+
/* If under hierarchy, only empty-root can set this value */
if ((parent->use_hierarchy) ||
- (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+ (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
+ cgroup_unlock();
return -EINVAL;
+ }
spin_lock(&memcg->reclaim_param_lock);
memcg->swappiness = val;
spin_unlock(&memcg->reclaim_param_lock);
+ cgroup_unlock();
+
return 0;
}
{
int node;
+ free_css_id(&mem_cgroup_subsys, &mem->css);
+
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
static void mem_cgroup_put(struct mem_cgroup *mem)
{
- if (atomic_dec_and_test(&mem->refcnt))
+ if (atomic_dec_and_test(&mem->refcnt)) {
+ struct mem_cgroup *parent = parent_mem_cgroup(mem);
__mem_cgroup_free(mem);
+ if (parent)
+ mem_cgroup_put(parent);
+ }
}
+/*
+ * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
+ */
+static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
+{
+ if (!mem->res.parent)
+ return NULL;
+ return mem_cgroup_from_res_counter(mem->res.parent, res);
+}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
}
#endif
-static struct cgroup_subsys_state *
+static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem, *parent;
+ long error = -ENOMEM;
int node;
mem = mem_cgroup_alloc();
if (!mem)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
if (parent && parent->use_hierarchy) {
res_counter_init(&mem->res, &parent->res);
res_counter_init(&mem->memsw, &parent->memsw);
+ /*
+ * We increment refcnt of the parent to ensure that we can
+ * safely access it on res_counter_charge/uncharge.
+ * This refcnt will be decremented when freeing this
+ * mem_cgroup(see mem_cgroup_put).
+ */
+ mem_cgroup_get(parent);
} else {
res_counter_init(&mem->res, NULL);
res_counter_init(&mem->memsw, NULL);
}
- mem->last_scanned_child = NULL;
+ mem->last_scanned_child = 0;
spin_lock_init(&mem->reclaim_param_lock);
if (parent)
return &mem->css;
free_out:
__mem_cgroup_free(mem);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
-static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
+static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- mem_cgroup_force_empty(mem, false);
+
+ return mem_cgroup_force_empty(mem, false);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- mem_cgroup_put(mem_cgroup_from_cont(cont));
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+
+ mem_cgroup_put(mem);
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,
.populate = mem_cgroup_populate,
.attach = mem_cgroup_move_task,
.early_init = 0,
+ .use_id = 1,
};
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP