X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmemcontrol.c;h=36896f3eb7f5e5c2e4c3803cc8125fe461a71669;hb=f0c929251e01a7a86b6254c775cb6b65c6457f10;hp=10833d969e3fa0c5c276b65e7d86bdfd6bc6f042;hpb=8697d33194faae6fdd6b2e799f6308aa00cfdf67;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 10833d9..36896f3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -21,18 +21,97 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include +#include +#include #include -struct cgroup_subsys mem_cgroup_subsys; -static const int MEM_CGROUP_RECLAIM_RETRIES = 5; +struct cgroup_subsys mem_cgroup_subsys __read_mostly; +static struct kmem_cache *page_cgroup_cache __read_mostly; +#define MEM_CGROUP_RECLAIM_RETRIES 5 + +/* + * Statistics for memory cgroup. + */ +enum mem_cgroup_stat_index { + /* + * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. + */ + MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ + MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ + MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ + MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ + + MEM_CGROUP_STAT_NSTATS, +}; + +struct mem_cgroup_stat_cpu { + s64 count[MEM_CGROUP_STAT_NSTATS]; +} ____cacheline_aligned_in_smp; + +struct mem_cgroup_stat { + struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; +}; + +/* + * For accounting under irq disable, no need for increment preempt count. + */ +static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, + enum mem_cgroup_stat_index idx, int val) +{ + int cpu = smp_processor_id(); + stat->cpustat[cpu].count[idx] += val; +} + +static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, + enum mem_cgroup_stat_index idx) +{ + int cpu; + s64 ret = 0; + for_each_possible_cpu(cpu) + ret += stat->cpustat[cpu].count[idx]; + return ret; +} + +/* + * per-zone information in memory controller. + */ + +enum mem_cgroup_zstat_index { + MEM_CGROUP_ZSTAT_ACTIVE, + MEM_CGROUP_ZSTAT_INACTIVE, + + NR_MEM_CGROUP_ZSTAT, +}; + +struct mem_cgroup_per_zone { + /* + * spin_lock to protect the per cgroup LRU + */ + spinlock_t lru_lock; + struct list_head active_list; + struct list_head inactive_list; + unsigned long count[NR_MEM_CGROUP_ZSTAT]; +}; +/* Macro for accessing counter */ +#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) + +struct mem_cgroup_per_node { + struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; +}; + +struct mem_cgroup_lru_info { + struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; +}; /* * The memory controller data structure. The memory controller controls both @@ -54,24 +133,30 @@ struct mem_cgroup { /* * Per cgroup active and inactive list, similar to the * per zone LRU lists. - * TODO: Consider making these lists per zone */ - struct list_head active_list; - struct list_head inactive_list; + struct mem_cgroup_lru_info info; + + int prev_priority; /* for recording reclaim priority */ /* - * spin_lock to protect the per cgroup LRU + * statistics. */ - spinlock_t lru_lock; - unsigned long control_type; /* control RSS or RSS+Pagecache */ + struct mem_cgroup_stat stat; }; +static struct mem_cgroup init_mem_cgroup; /* * We use the lower bit of the page->page_cgroup pointer as a bit spin - * lock. We need to ensure that page->page_cgroup is atleast two - * byte aligned (based on comments from Nick Piggin) + * lock. We need to ensure that page->page_cgroup is at least two + * byte aligned (based on comments from Nick Piggin). But since + * bit_spin_lock doesn't actually set that lock bit in a non-debug + * uniprocessor kernel, we should avoid setting it here too. */ #define PAGE_CGROUP_LOCK_BIT 0x0 -#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) +#else +#define PAGE_CGROUP_LOCK 0x0 +#endif /* * A page_cgroup page is associated with every page descriptor. The @@ -81,109 +166,306 @@ struct page_cgroup { struct list_head lru; /* per cgroup LRU list */ struct page *page; struct mem_cgroup *mem_cgroup; - atomic_t ref_cnt; /* Helpful when pages move b/w */ - /* mapped and cached states */ + int flags; }; +#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ +#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ -enum { - MEM_CGROUP_TYPE_UNSPEC = 0, - MEM_CGROUP_TYPE_MAPPED, - MEM_CGROUP_TYPE_CACHED, - MEM_CGROUP_TYPE_ALL, - MEM_CGROUP_TYPE_MAX, +static int page_cgroup_nid(struct page_cgroup *pc) +{ + return page_to_nid(pc->page); +} + +static enum zone_type page_cgroup_zid(struct page_cgroup *pc) +{ + return page_zonenum(pc->page); +} + +enum charge_type { + MEM_CGROUP_CHARGE_TYPE_CACHE = 0, + MEM_CGROUP_CHARGE_TYPE_MAPPED, + MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ }; -static struct mem_cgroup init_mem_cgroup; +/* + * Always modified under lru lock. Then, not necessary to preempt_disable() + */ +static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, + bool charge) +{ + int val = (charge)? 1 : -1; + struct mem_cgroup_stat *stat = &mem->stat; + + VM_BUG_ON(!irqs_disabled()); + if (flags & PAGE_CGROUP_FLAG_CACHE) + __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); + else + __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); -static inline -struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) + if (charge) + __mem_cgroup_stat_add_safe(stat, + MEM_CGROUP_STAT_PGPGIN_COUNT, 1); + else + __mem_cgroup_stat_add_safe(stat, + MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); +} + +static struct mem_cgroup_per_zone * +mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) +{ + return &mem->info.nodeinfo[nid]->zoneinfo[zid]; +} + +static struct mem_cgroup_per_zone * +page_cgroup_zoneinfo(struct page_cgroup *pc) +{ + struct mem_cgroup *mem = pc->mem_cgroup; + int nid = page_cgroup_nid(pc); + int zid = page_cgroup_zid(pc); + + return mem_cgroup_zoneinfo(mem, nid, zid); +} + +static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, + enum mem_cgroup_zstat_index idx) +{ + int nid, zid; + struct mem_cgroup_per_zone *mz; + u64 total = 0; + + for_each_online_node(nid) + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + mz = mem_cgroup_zoneinfo(mem, nid, zid); + total += MEM_CGROUP_ZSTAT(mz, idx); + } + return total; +} + +static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) { return container_of(cgroup_subsys_state(cont, mem_cgroup_subsys_id), struct mem_cgroup, css); } -static inline struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) { + /* + * mm_update_next_owner() may clear mm->owner to NULL + * if it races with swapoff, page migration, etc. + * So this can be called with p == NULL. + */ + if (unlikely(!p)) + return NULL; + return container_of(task_subsys_state(p, mem_cgroup_subsys_id), struct mem_cgroup, css); } -void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) +static inline int page_cgroup_locked(struct page *page) { - struct mem_cgroup *mem; - - mem = mem_cgroup_from_task(p); - css_get(&mem->css); - mm->mem_cgroup = mem; + return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); } -void mm_free_cgroup(struct mm_struct *mm) +static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) { - css_put(&mm->mem_cgroup->css); + VM_BUG_ON(!page_cgroup_locked(page)); + page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); } -static inline int page_cgroup_locked(struct page *page) +struct page_cgroup *page_get_page_cgroup(struct page *page) { - return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, - &page->page_cgroup); + return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); } -void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) +static void lock_page_cgroup(struct page *page) { - int locked; + bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); +} - /* - * While resetting the page_cgroup we might not hold the - * page_cgroup lock. free_hot_cold_page() is an example - * of such a scenario - */ - if (pc) - VM_BUG_ON(!page_cgroup_locked(page)); - locked = (page->page_cgroup & PAGE_CGROUP_LOCK); - page->page_cgroup = ((unsigned long)pc | locked); +static int try_lock_page_cgroup(struct page *page) +{ + return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); } -struct page_cgroup *page_get_page_cgroup(struct page *page) +static void unlock_page_cgroup(struct page *page) { - return (struct page_cgroup *) - (page->page_cgroup & ~PAGE_CGROUP_LOCK); + bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); } -static void __always_inline lock_page_cgroup(struct page *page) +static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, + struct page_cgroup *pc) { - bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); - VM_BUG_ON(!page_cgroup_locked(page)); + int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; + + if (from) + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; + else + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; + + mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); + list_del(&pc->lru); } -static void __always_inline unlock_page_cgroup(struct page *page) +static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, + struct page_cgroup *pc) { - bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); + int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; + + if (!to) { + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; + list_add(&pc->lru, &mz->inactive_list); + } else { + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; + list_add(&pc->lru, &mz->active_list); + } + mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); } static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) { - if (active) - list_move(&pc->lru, &pc->mem_cgroup->active_list); + int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; + struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); + + if (from) + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; else - list_move(&pc->lru, &pc->mem_cgroup->inactive_list); + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; + + if (active) { + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; + pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; + list_move(&pc->lru, &mz->active_list); + } else { + MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; + pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; + list_move(&pc->lru, &mz->inactive_list); + } +} + +int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) +{ + int ret; + + task_lock(task); + ret = task->mm && mm_match_cgroup(task->mm, mem); + task_unlock(task); + return ret; } /* * This routine assumes that the appropriate zone's lru lock is already held */ -void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) +void mem_cgroup_move_lists(struct page *page, bool active) { - struct mem_cgroup *mem; - if (!pc) + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; + unsigned long flags; + + if (mem_cgroup_subsys.disabled) return; - mem = pc->mem_cgroup; + /* + * We cannot lock_page_cgroup while holding zone's lru_lock, + * because other holders of lock_page_cgroup can be interrupted + * with an attempt to rotate_reclaimable_page. But we cannot + * safely get to page_cgroup without it, so just try_lock it: + * mem_cgroup_isolate_pages allows for page left on wrong list. + */ + if (!try_lock_page_cgroup(page)) + return; + + pc = page_get_page_cgroup(page); + if (pc) { + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); + __mem_cgroup_move_lists(pc, active); + spin_unlock_irqrestore(&mz->lru_lock, flags); + } + unlock_page_cgroup(page); +} + +/* + * Calculate mapped_ratio under memory controller. This will be used in + * vmscan.c for deteremining we have to reclaim mapped pages. + */ +int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) +{ + long total, rss; + + /* + * usage is recorded in bytes. But, here, we assume the number of + * physical pages can be represented by "long" on any arch. + */ + total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; + rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); + return (int)((rss * 100L) / total); +} + +/* + * This function is called from vmscan.c. In page reclaiming loop. balance + * between active and inactive list is calculated. For memory controller + * page reclaiming, we should use using mem_cgroup's imbalance rather than + * zone's global lru imbalance. + */ +long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) +{ + unsigned long active, inactive; + /* active and inactive are the number of pages. 'long' is ok.*/ + active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); + inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); + return (long) (active / (inactive + 1)); +} + +/* + * prev_priority control...this will be used in memory reclaim path. + */ +int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) +{ + return mem->prev_priority; +} + +void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) +{ + if (priority < mem->prev_priority) + mem->prev_priority = priority; +} + +void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) +{ + mem->prev_priority = priority; +} + +/* + * Calculate # of pages to be scanned in this priority/zone. + * See also vmscan.c + * + * priority starts from "DEF_PRIORITY" and decremented in each loop. + * (see include/linux/mmzone.h) + */ + +long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, + struct zone *zone, int priority) +{ + long nr_active; + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); + + nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); + return (nr_active >> priority); +} + +long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, + struct zone *zone, int priority) +{ + long nr_inactive; + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); - spin_lock(&mem->lru_lock); - __mem_cgroup_move_lists(pc, active); - spin_unlock(&mem->lru_lock); + nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); + return (nr_inactive >> priority); } unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, @@ -198,44 +480,40 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, unsigned long scan; LIST_HEAD(pc_list); struct list_head *src; - struct page_cgroup *pc; + struct page_cgroup *pc, *tmp; + int nid = z->zone_pgdat->node_id; + int zid = zone_idx(z); + struct mem_cgroup_per_zone *mz; + BUG_ON(!mem_cont); + mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); if (active) - src = &mem_cont->active_list; + src = &mz->active_list; else - src = &mem_cont->inactive_list; + src = &mz->inactive_list; + - spin_lock(&mem_cont->lru_lock); - for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { - pc = list_entry(src->prev, struct page_cgroup, lru); + spin_lock(&mz->lru_lock); + scan = 0; + list_for_each_entry_safe_reverse(pc, tmp, src, lru) { + if (scan >= nr_to_scan) + break; page = pc->page; - VM_BUG_ON(!pc); + + if (unlikely(!PageLRU(page))) + continue; if (PageActive(page) && !active) { __mem_cgroup_move_lists(pc, true); - scan--; continue; } if (!PageActive(page) && active) { __mem_cgroup_move_lists(pc, false); - scan--; continue; } - /* - * Reclaim, per zone - * TODO: make the active/inactive lists per zone - */ - if (page_zone(page) != z) - continue; - - /* - * Check if the meta page went away from under us - */ - if (!list_empty(&pc->lru)) - list_move(&pc->lru, &pc_list); - else - continue; + scan++; + list_move(&pc->lru, &pc_list); if (__isolate_lru_page(page, mode) == 0) { list_move(&page->lru, dst); @@ -244,7 +522,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, } list_splice(&pc_list, src); - spin_unlock(&mem_cont->lru_lock); + spin_unlock(&mz->lru_lock); *scanned = scan; return nr_taken; @@ -256,313 +534,636 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, * 0 if the charge was successful * < 0 if the cgroup is over its limit */ -int mem_cgroup_charge(struct page *page, struct mm_struct *mm) +static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask, enum charge_type ctype, + struct mem_cgroup *memcg) { struct mem_cgroup *mem; - struct page_cgroup *pc, *race_pc; + struct page_cgroup *pc; unsigned long flags; unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + struct mem_cgroup_per_zone *mz; - /* - * Should page_cgroup's go to their own slab? - * One could optimize the performance of the charging routine - * by saving a bit in the page_flags and using it as a lock - * to see if the cgroup page already has a page_cgroup associated - * with it - */ -retry: - lock_page_cgroup(page); - pc = page_get_page_cgroup(page); - /* - * The page_cgroup exists and the page has already been accounted - */ - if (pc) { - if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { - /* this page is under being uncharged ? */ - unlock_page_cgroup(page); - cpu_relax(); - goto retry; - } else - goto done; - } - - unlock_page_cgroup(page); - - pc = kzalloc(sizeof(struct page_cgroup), GFP_KERNEL); - if (pc == NULL) + pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); + if (unlikely(pc == NULL)) goto err; - rcu_read_lock(); /* - * We always charge the cgroup the mm_struct belongs to - * the mm_struct's mem_cgroup changes on task migration if the + * We always charge the cgroup the mm_struct belongs to. + * The mm_struct's mem_cgroup changes on task migration if the * thread group leader migrates. It's possible that mm is not * set, if so charge the init_mm (happens for pagecache usage). */ - if (!mm) - mm = &init_mm; - - mem = rcu_dereference(mm->mem_cgroup); - /* - * For every charge from the cgroup, increment reference - * count - */ - css_get(&mem->css); - rcu_read_unlock(); + if (likely(!memcg)) { + rcu_read_lock(); + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!mem)) { + rcu_read_unlock(); + kmem_cache_free(page_cgroup_cache, pc); + return 0; + } + /* + * For every charge from the cgroup, increment reference count + */ + css_get(&mem->css); + rcu_read_unlock(); + } else { + mem = memcg; + css_get(&memcg->css); + } - /* - * If we created the page_cgroup, we should free it on exceeding - * the cgroup limit. - */ while (res_counter_charge(&mem->res, PAGE_SIZE)) { - if (try_to_free_mem_cgroup_pages(mem)) + if (!(gfp_mask & __GFP_WAIT)) + goto out; + + if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) continue; /* - * try_to_free_mem_cgroup_pages() might not give us a full - * picture of reclaim. Some pages are reclaimed and might be - * moved to swap cache or just unmapped from the cgroup. - * Check the limit again to see if the reclaim reduced the - * current usage of the cgroup before giving up - */ + * try_to_free_mem_cgroup_pages() might not give us a full + * picture of reclaim. Some pages are reclaimed and might be + * moved to swap cache or just unmapped from the cgroup. + * Check the limit again to see if the reclaim reduced the + * current usage of the cgroup before giving up + */ if (res_counter_check_under_limit(&mem->res)) continue; - /* - * Since we control both RSS and cache, we end up with a - * very interesting scenario where we end up reclaiming - * memory (essentially RSS), since the memory is pushed - * to swap cache, we eventually end up adding those - * pages back to our list. Hence we give ourselves a - * few chances before we fail - */ - else if (nr_retries--) { - congestion_wait(WRITE, HZ/10); - continue; - } - css_put(&mem->css); - mem_cgroup_out_of_memory(mem, GFP_KERNEL); - goto free_pc; + if (!nr_retries--) { + mem_cgroup_out_of_memory(mem, gfp_mask); + goto out; + } } - lock_page_cgroup(page); + pc->mem_cgroup = mem; + pc->page = page; /* - * Check if somebody else beat us to allocating the page_cgroup + * If a page is accounted as a page cache, insert to inactive list. + * If anon, insert to active list. */ - race_pc = page_get_page_cgroup(page); - if (race_pc) { - kfree(pc); - pc = race_pc; - atomic_inc(&pc->ref_cnt); + if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) + pc->flags = PAGE_CGROUP_FLAG_CACHE; + else + pc->flags = PAGE_CGROUP_FLAG_ACTIVE; + + lock_page_cgroup(page); + if (unlikely(page_get_page_cgroup(page))) { + unlock_page_cgroup(page); res_counter_uncharge(&mem->res, PAGE_SIZE); css_put(&mem->css); + kmem_cache_free(page_cgroup_cache, pc); goto done; } - - atomic_set(&pc->ref_cnt, 1); - pc->mem_cgroup = mem; - pc->page = page; page_assign_page_cgroup(page, pc); - spin_lock_irqsave(&mem->lru_lock, flags); - list_add(&pc->lru, &mem->active_list); - spin_unlock_irqrestore(&mem->lru_lock, flags); + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); + __mem_cgroup_add_list(mz, pc); + spin_unlock_irqrestore(&mz->lru_lock, flags); -done: unlock_page_cgroup(page); +done: return 0; -free_pc: - kfree(pc); +out: + css_put(&mem->css); + kmem_cache_free(page_cgroup_cache, pc); err: return -ENOMEM; } -/* - * See if the cached pages should be charged at all? - */ -int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm) +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - struct mem_cgroup *mem; - if (!mm) + if (mem_cgroup_subsys.disabled) + return 0; + + /* + * If already mapped, we don't have to account. + * If page cache, page->mapping has address_space. + * But page->mapping may have out-of-use anon_vma pointer, + * detecit it by PageAnon() check. newly-mapped-anon's page->mapping + * is NULL. + */ + if (page_mapped(page) || (page->mapping && !PageAnon(page))) + return 0; + if (unlikely(!mm)) mm = &init_mm; + return mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); +} - mem = rcu_dereference(mm->mem_cgroup); - if (mem->control_type == MEM_CGROUP_TYPE_ALL) - return mem_cgroup_charge(page, mm); - else +int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) +{ + if (mem_cgroup_subsys.disabled) return 0; + + /* + * Corner case handling. This is called from add_to_page_cache() + * in usual. But some FS (shmem) precharges this page before calling it + * and call add_to_page_cache() with GFP_NOWAIT. + * + * For GFP_NOWAIT case, the page may be pre-charged before calling + * add_to_page_cache(). (See shmem.c) check it here and avoid to call + * charge twice. (It works but has to pay a bit larger cost.) + */ + if (!(gfp_mask & __GFP_WAIT)) { + struct page_cgroup *pc; + + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); + if (pc) { + VM_BUG_ON(pc->page != page); + VM_BUG_ON(!pc->mem_cgroup); + unlock_page_cgroup(page); + return 0; + } + unlock_page_cgroup(page); + } + + if (unlikely(!mm)) + mm = &init_mm; + + return mem_cgroup_charge_common(page, mm, gfp_mask, + MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); } /* - * Uncharging is always a welcome operation, we never complain, simply - * uncharge. + * uncharge if !page_mapped(page) */ -void mem_cgroup_uncharge(struct page_cgroup *pc) +static void +__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) { + struct page_cgroup *pc; struct mem_cgroup *mem; - struct page *page; + struct mem_cgroup_per_zone *mz; unsigned long flags; + if (mem_cgroup_subsys.disabled) + return; + /* - * This can handle cases when a page is not charged at all and we - * are switching between handling the control_type. + * Check if our page_cgroup is valid */ - if (!pc) - return; + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); + if (unlikely(!pc)) + goto unlock; - if (atomic_dec_and_test(&pc->ref_cnt)) { - page = pc->page; - lock_page_cgroup(page); + VM_BUG_ON(pc->page != page); + + if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) + && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) + || page_mapped(page))) + goto unlock; + + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); + __mem_cgroup_remove_list(mz, pc); + spin_unlock_irqrestore(&mz->lru_lock, flags); + + page_assign_page_cgroup(page, NULL); + unlock_page_cgroup(page); + + mem = pc->mem_cgroup; + res_counter_uncharge(&mem->res, PAGE_SIZE); + css_put(&mem->css); + + kmem_cache_free(page_cgroup_cache, pc); + return; +unlock: + unlock_page_cgroup(page); +} + +void mem_cgroup_uncharge_page(struct page *page) +{ + __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); +} + +void mem_cgroup_uncharge_cache_page(struct page *page) +{ + VM_BUG_ON(page_mapped(page)); + __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); +} + +/* + * Before starting migration, account against new page. + */ +int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +{ + struct page_cgroup *pc; + struct mem_cgroup *mem = NULL; + enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; + int ret = 0; + + if (mem_cgroup_subsys.disabled) + return 0; + + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); + if (pc) { mem = pc->mem_cgroup; + css_get(&mem->css); + if (pc->flags & PAGE_CGROUP_FLAG_CACHE) + ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; + } + unlock_page_cgroup(page); + if (mem) { + ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, + ctype, mem); css_put(&mem->css); - page_assign_page_cgroup(page, NULL); - unlock_page_cgroup(page); - res_counter_uncharge(&mem->res, PAGE_SIZE); - - spin_lock_irqsave(&mem->lru_lock, flags); - list_del_init(&pc->lru); - spin_unlock_irqrestore(&mem->lru_lock, flags); - kfree(pc); } + return ret; } -int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) +/* remove redundant charge if migration failed*/ +void mem_cgroup_end_migration(struct page *newpage) { - *tmp = memparse(buf, &buf); - if (*buf != '\0') - return -EINVAL; - /* - * Round up the value to the closest page size + * At success, page->mapping is not NULL. + * special rollback care is necessary when + * 1. at migration failure. (newpage->mapping is cleared in this case) + * 2. the newpage was moved but not remapped again because the task + * exits and the newpage is obsolete. In this case, the new page + * may be a swapcache. So, we just call mem_cgroup_uncharge_page() + * always for avoiding mess. The page_cgroup will be removed if + * unnecessary. File cache pages is still on radix-tree. Don't + * care it. */ - *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; - return 0; + if (!newpage->mapping) + __mem_cgroup_uncharge_common(newpage, + MEM_CGROUP_CHARGE_TYPE_FORCE); + else if (PageAnon(newpage)) + mem_cgroup_uncharge_page(newpage); } -static ssize_t mem_cgroup_read(struct cgroup *cont, - struct cftype *cft, struct file *file, - char __user *userbuf, size_t nbytes, loff_t *ppos) +/* + * A call to try to shrink memory usage under specified resource controller. + * This is typically used for page reclaiming for shmem for reducing side + * effect of page allocation from shmem, which is used by some mem_cgroup. + */ +int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) { - return res_counter_read(&mem_cgroup_from_cont(cont)->res, - cft->private, userbuf, nbytes, ppos, - NULL); + struct mem_cgroup *mem; + int progress = 0; + int retry = MEM_CGROUP_RECLAIM_RETRIES; + + if (mem_cgroup_subsys.disabled) + return 0; + if (!mm) + return 0; + + rcu_read_lock(); + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!mem)) { + rcu_read_unlock(); + return 0; + } + css_get(&mem->css); + rcu_read_unlock(); + + do { + progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); + progress += res_counter_check_under_limit(&mem->res); + } while (!progress && --retry); + + css_put(&mem->css); + if (!retry) + return -ENOMEM; + return 0; } -static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, - struct file *file, const char __user *userbuf, - size_t nbytes, loff_t *ppos) +int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) { - return res_counter_write(&mem_cgroup_from_cont(cont)->res, - cft->private, userbuf, nbytes, ppos, - mem_cgroup_write_strategy); + + int retry_count = MEM_CGROUP_RECLAIM_RETRIES; + int progress; + int ret = 0; + + while (res_counter_set_limit(&memcg->res, val)) { + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (!retry_count) { + ret = -EBUSY; + break; + } + progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); + if (!progress) + retry_count--; + } + return ret; } -static ssize_t mem_control_type_write(struct cgroup *cont, - struct cftype *cft, struct file *file, - const char __user *userbuf, - size_t nbytes, loff_t *pos) -{ - int ret; - char *buf, *end; - unsigned long tmp; - struct mem_cgroup *mem; - mem = mem_cgroup_from_cont(cont); - buf = kmalloc(nbytes + 1, GFP_KERNEL); - ret = -ENOMEM; - if (buf == NULL) - goto out; +/* + * This routine traverse page_cgroup in given list and drop them all. + * *And* this routine doesn't reclaim page itself, just removes page_cgroup. + */ +#define FORCE_UNCHARGE_BATCH (128) +static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, + struct mem_cgroup_per_zone *mz, + int active) +{ + struct page_cgroup *pc; + struct page *page; + int count = FORCE_UNCHARGE_BATCH; + unsigned long flags; + struct list_head *list; - buf[nbytes] = 0; - ret = -EFAULT; - if (copy_from_user(buf, userbuf, nbytes)) - goto out_free; + if (active) + list = &mz->active_list; + else + list = &mz->inactive_list; - ret = -EINVAL; - tmp = simple_strtoul(buf, &end, 10); - if (*end != '\0') - goto out_free; + spin_lock_irqsave(&mz->lru_lock, flags); + while (!list_empty(list)) { + pc = list_entry(list->prev, struct page_cgroup, lru); + page = pc->page; + get_page(page); + spin_unlock_irqrestore(&mz->lru_lock, flags); + /* + * Check if this page is on LRU. !LRU page can be found + * if it's under page migration. + */ + if (PageLRU(page)) { + __mem_cgroup_uncharge_common(page, + MEM_CGROUP_CHARGE_TYPE_FORCE); + put_page(page); + if (--count <= 0) { + count = FORCE_UNCHARGE_BATCH; + cond_resched(); + } + } else + cond_resched(); + spin_lock_irqsave(&mz->lru_lock, flags); + } + spin_unlock_irqrestore(&mz->lru_lock, flags); +} - if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) - goto out_free; +/* + * make mem_cgroup's charge to be 0 if there is no task. + * This enables deleting this mem_cgroup. + */ +static int mem_cgroup_force_empty(struct mem_cgroup *mem) +{ + int ret = -EBUSY; + int node, zid; - mem->control_type = tmp; - ret = nbytes; -out_free: - kfree(buf); + css_get(&mem->css); + /* + * page reclaim code (kswapd etc..) will move pages between + * active_list <-> inactive_list while we don't take a lock. + * So, we have to do loop here until all lists are empty. + */ + while (mem->res.usage > 0) { + if (atomic_read(&mem->css.cgroup->count) > 0) + goto out; + for_each_node_state(node, N_POSSIBLE) + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct mem_cgroup_per_zone *mz; + mz = mem_cgroup_zoneinfo(mem, node, zid); + /* drop all page_cgroup in active_list */ + mem_cgroup_force_empty_list(mem, mz, 1); + /* drop all page_cgroup in inactive_list */ + mem_cgroup_force_empty_list(mem, mz, 0); + } + } + ret = 0; out: + css_put(&mem->css); + return ret; +} + +static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) +{ + return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, + cft->private); +} +/* + * The user of this function is... + * RES_LIMIT. + */ +static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, + const char *buffer) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + unsigned long long val; + int ret; + + switch (cft->private) { + case RES_LIMIT: + /* This function does all necessary parse...reuse it */ + ret = res_counter_memparse_write_strategy(buffer, &val); + if (!ret) + ret = mem_cgroup_resize_limit(memcg, val); + break; + default: + ret = -EINVAL; /* should be BUG() ? */ + break; + } return ret; } -static ssize_t mem_control_type_read(struct cgroup *cont, - struct cftype *cft, - struct file *file, char __user *userbuf, - size_t nbytes, loff_t *ppos) +static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) { - unsigned long val; - char buf[64], *s; struct mem_cgroup *mem; mem = mem_cgroup_from_cont(cont); - s = buf; - val = mem->control_type; - s += sprintf(s, "%lu\n", val); - return simple_read_from_buffer((void __user *)userbuf, nbytes, - ppos, buf, s - buf); + switch (event) { + case RES_MAX_USAGE: + res_counter_reset_max(&mem->res); + break; + case RES_FAILCNT: + res_counter_reset_failcnt(&mem->res); + break; + } + return 0; +} + +static int mem_force_empty_write(struct cgroup *cont, unsigned int event) +{ + return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); +} + +static const struct mem_cgroup_stat_desc { + const char *msg; + u64 unit; +} mem_cgroup_stat_desc[] = { + [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, + [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, + [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, }, + [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, }, +}; + +static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); + struct mem_cgroup_stat *stat = &mem_cont->stat; + int i; + + for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { + s64 val; + + val = mem_cgroup_read_stat(stat, i); + val *= mem_cgroup_stat_desc[i].unit; + cb->fill(cb, mem_cgroup_stat_desc[i].msg, val); + } + /* showing # of active pages */ + { + unsigned long active, inactive; + + inactive = mem_cgroup_get_all_zonestat(mem_cont, + MEM_CGROUP_ZSTAT_INACTIVE); + active = mem_cgroup_get_all_zonestat(mem_cont, + MEM_CGROUP_ZSTAT_ACTIVE); + cb->fill(cb, "active", (active) * PAGE_SIZE); + cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); + } + return 0; } static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = RES_USAGE, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read, + }, + { + .name = "max_usage_in_bytes", + .private = RES_MAX_USAGE, + .trigger = mem_cgroup_reset, + .read_u64 = mem_cgroup_read, }, { .name = "limit_in_bytes", .private = RES_LIMIT, - .write = mem_cgroup_write, - .read = mem_cgroup_read, + .write_string = mem_cgroup_write, + .read_u64 = mem_cgroup_read, }, { .name = "failcnt", .private = RES_FAILCNT, - .read = mem_cgroup_read, + .trigger = mem_cgroup_reset, + .read_u64 = mem_cgroup_read, }, { - .name = "control_type", - .write = mem_control_type_write, - .read = mem_control_type_read, + .name = "force_empty", + .trigger = mem_force_empty_write, + }, + { + .name = "stat", + .read_map = mem_control_stat_show, }, }; -static struct mem_cgroup init_mem_cgroup; +static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) +{ + struct mem_cgroup_per_node *pn; + struct mem_cgroup_per_zone *mz; + int zone, tmp = node; + /* + * This routine is called against possible nodes. + * But it's BUG to call kmalloc() against offline node. + * + * TODO: this routine can waste much memory for nodes which will + * never be onlined. It's better to use memory hotplug callback + * function. + */ + if (!node_state(node, N_NORMAL_MEMORY)) + tmp = -1; + pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); + if (!pn) + return 1; + + mem->info.nodeinfo[node] = pn; + memset(pn, 0, sizeof(*pn)); + + for (zone = 0; zone < MAX_NR_ZONES; zone++) { + mz = &pn->zoneinfo[zone]; + INIT_LIST_HEAD(&mz->active_list); + INIT_LIST_HEAD(&mz->inactive_list); + spin_lock_init(&mz->lru_lock); + } + return 0; +} + +static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) +{ + kfree(mem->info.nodeinfo[node]); +} + +static struct mem_cgroup *mem_cgroup_alloc(void) +{ + struct mem_cgroup *mem; + + if (sizeof(*mem) < PAGE_SIZE) + mem = kmalloc(sizeof(*mem), GFP_KERNEL); + else + mem = vmalloc(sizeof(*mem)); + + if (mem) + memset(mem, 0, sizeof(*mem)); + return mem; +} + +static void mem_cgroup_free(struct mem_cgroup *mem) +{ + if (sizeof(*mem) < PAGE_SIZE) + kfree(mem); + else + vfree(mem); +} + static struct cgroup_subsys_state * mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) { struct mem_cgroup *mem; + int node; if (unlikely((cont->parent) == NULL)) { mem = &init_mem_cgroup; - init_mm.mem_cgroup = mem; - } else - mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); - - if (mem == NULL) - return NULL; + page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); + } else { + mem = mem_cgroup_alloc(); + if (!mem) + return ERR_PTR(-ENOMEM); + } res_counter_init(&mem->res); - INIT_LIST_HEAD(&mem->active_list); - INIT_LIST_HEAD(&mem->inactive_list); - spin_lock_init(&mem->lru_lock); - mem->control_type = MEM_CGROUP_TYPE_ALL; + + for_each_node_state(node, N_POSSIBLE) + if (alloc_mem_cgroup_per_zone_info(mem, node)) + goto free_out; + return &mem->css; +free_out: + for_each_node_state(node, N_POSSIBLE) + free_mem_cgroup_per_zone_info(mem, node); + if (cont->parent != NULL) + mem_cgroup_free(mem); + return ERR_PTR(-ENOMEM); +} + +static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + mem_cgroup_force_empty(mem); } static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - kfree(mem_cgroup_from_cont(cont)); + int node; + struct mem_cgroup *mem = mem_cgroup_from_cont(cont); + + for_each_node_state(node, N_POSSIBLE) + free_mem_cgroup_per_zone_info(mem, node); + + mem_cgroup_free(mem_cgroup_from_cont(cont)); } static int mem_cgroup_populate(struct cgroup_subsys *ss, @@ -587,31 +1188,24 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, mem = mem_cgroup_from_cont(cont); old_mem = mem_cgroup_from_cont(old_cont); - if (mem == old_mem) - goto out; - /* * Only thread group leaders are allowed to migrate, the mm_struct is * in effect owned by the leader */ - if (p->tgid != p->pid) + if (!thread_group_leader(p)) goto out; - css_get(&mem->css); - rcu_assign_pointer(mm->mem_cgroup, mem); - css_put(&old_mem->css); - out: mmput(mm); - return; } struct cgroup_subsys mem_cgroup_subsys = { .name = "memory", .subsys_id = mem_cgroup_subsys_id, .create = mem_cgroup_create, + .pre_destroy = mem_cgroup_pre_destroy, .destroy = mem_cgroup_destroy, .populate = mem_cgroup_populate, .attach = mem_cgroup_move_task, - .early_init = 1, + .early_init = 0, };