RPC/RDMA: maintain the RPC task bytes-sent statistic.
[safe/jmp/linux-2.6] / mm / memcontrol.c
index f46b861..36896f3 100644 (file)
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
+       /*
+        * mm_update_next_owner() may clear mm->owner to NULL
+        * if it races with swapoff, page migration, etc.
+        * So this can be called with p == NULL.
+        */
+       if (unlikely(!p))
+               return NULL;
+
        return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
                                struct mem_cgroup, css);
 }
@@ -354,6 +362,9 @@ void mem_cgroup_move_lists(struct page *page, bool active)
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        /*
         * We cannot lock_page_cgroup while holding zone's lru_lock,
         * because other holders of lock_page_cgroup can be interrupted
@@ -533,33 +544,8 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
-               return 0;
-
-       /*
-        * Should page_cgroup's go to their own slab?
-        * One could optimize the performance of the charging routine
-        * by saving a bit in the page_flags and using it as a lock
-        * to see if the cgroup page already has a page_cgroup associated
-        * with it
-        */
-retry:
-       lock_page_cgroup(page);
-       pc = page_get_page_cgroup(page);
-       /*
-        * The page_cgroup exists and
-        * the page has already been accounted.
-        */
-       if (pc) {
-               VM_BUG_ON(pc->page != page);
-               VM_BUG_ON(!pc->mem_cgroup);
-               unlock_page_cgroup(page);
-               goto done;
-       }
-       unlock_page_cgroup(page);
-
        pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
-       if (pc == NULL)
+       if (unlikely(pc == NULL))
                goto err;
 
        /*
@@ -571,6 +557,11 @@ retry:
        if (likely(!memcg)) {
                rcu_read_lock();
                mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+               if (unlikely(!mem)) {
+                       rcu_read_unlock();
+                       kmem_cache_free(page_cgroup_cache, pc);
+                       return 0;
+               }
                /*
                 * For every charge from the cgroup, increment reference count
                 */
@@ -616,17 +607,12 @@ retry:
                pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
 
        lock_page_cgroup(page);
-       if (page_get_page_cgroup(page)) {
+       if (unlikely(page_get_page_cgroup(page))) {
                unlock_page_cgroup(page);
-               /*
-                * Another charge has been added to this page already.
-                * We take lock_page_cgroup(page) again and read
-                * page->cgroup, increment refcnt.... just retry is OK.
-                */
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
                kmem_cache_free(page_cgroup_cache, pc);
-               goto retry;
+               goto done;
        }
        page_assign_page_cgroup(page, pc);
 
@@ -647,6 +633,9 @@ err:
 
 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        /*
         * If already mapped, we don't have to account.
         * If page cache, page->mapping has address_space.
@@ -665,8 +654,35 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
+       /*
+        * Corner case handling. This is called from add_to_page_cache()
+        * in usual. But some FS (shmem) precharges this page before calling it
+        * and call add_to_page_cache() with GFP_NOWAIT.
+        *
+        * For GFP_NOWAIT case, the page may be pre-charged before calling
+        * add_to_page_cache(). (See shmem.c) check it here and avoid to call
+        * charge twice. (It works but has to pay a bit larger cost.)
+        */
+       if (!(gfp_mask & __GFP_WAIT)) {
+               struct page_cgroup *pc;
+
+               lock_page_cgroup(page);
+               pc = page_get_page_cgroup(page);
+               if (pc) {
+                       VM_BUG_ON(pc->page != page);
+                       VM_BUG_ON(!pc->mem_cgroup);
+                       unlock_page_cgroup(page);
+                       return 0;
+               }
+               unlock_page_cgroup(page);
+       }
+
        if (unlikely(!mm))
                mm = &init_mm;
+
        return mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
 }
@@ -690,7 +706,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
         */
        lock_page_cgroup(page);
        pc = page_get_page_cgroup(page);
-       if (!pc)
+       if (unlikely(!pc))
                goto unlock;
 
        VM_BUG_ON(pc->page != page);
@@ -791,13 +807,23 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
        int progress = 0;
        int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+       if (!mm)
+               return 0;
+
        rcu_read_lock();
        mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (unlikely(!mem)) {
+               rcu_read_unlock();
+               return 0;
+       }
        css_get(&mem->css);
        rcu_read_unlock();
 
        do {
                progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
+               progress += res_counter_check_under_limit(&mem->res);
        } while (!progress && --retry);
 
        css_put(&mem->css);
@@ -806,6 +832,30 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
        return 0;
 }
 
+int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
+{
+
+       int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+       int progress;
+       int ret = 0;
+
+       while (res_counter_set_limit(&memcg->res, val)) {
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+               if (!retry_count) {
+                       ret = -EBUSY;
+                       break;
+               }
+               progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
+               if (!progress)
+                       retry_count--;
+       }
+       return ret;
+}
+
+
 /*
  * This routine traverse page_cgroup in given list and drop them all.
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
@@ -860,9 +910,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
        int ret = -EBUSY;
        int node, zid;
 
-       if (mem_cgroup_subsys.disabled)
-               return 0;
-
        css_get(&mem->css);
        /*
         * page reclaim code (kswapd etc..) will move pages between
@@ -893,13 +940,29 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
        return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
                                    cft->private);
 }
-
+/*
+ * The user of this function is...
+ * RES_LIMIT.
+ */
 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                            const char *buffer)
 {
-       return res_counter_write(&mem_cgroup_from_cont(cont)->res,
-                                cft->private, buffer,
-                                res_counter_memparse_write_strategy);
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       unsigned long long val;
+       int ret;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               /* This function does all necessary parse...reuse it */
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (!ret)
+                       ret = mem_cgroup_resize_limit(memcg, val);
+               break;
+       default:
+               ret = -EINVAL; /* should be BUG() ? */
+               break;
+       }
+       return ret;
 }
 
 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
@@ -1106,8 +1169,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
                                struct cgroup *cont)
 {
-       if (mem_cgroup_subsys.disabled)
-               return 0;
        return cgroup_add_files(cont, ss, mem_cgroup_files,
                                        ARRAY_SIZE(mem_cgroup_files));
 }
@@ -1120,9 +1181,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
        struct mm_struct *mm;
        struct mem_cgroup *mem, *old_mem;
 
-       if (mem_cgroup_subsys.disabled)
-               return;
-
        mm = get_task_mm(p);
        if (mm == NULL)
                return;
@@ -1130,9 +1188,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
        mem = mem_cgroup_from_cont(cont);
        old_mem = mem_cgroup_from_cont(old_cont);
 
-       if (mem == old_mem)
-               goto out;
-
        /*
         * Only thread group leaders are allowed to migrate, the mm_struct is
         * in effect owned by the leader