nfs: nfs4xdr: get rid of COPYMEM
[safe/jmp/linux-2.6] / mm / memcontrol.c
index a83e039..fd4529d 100644 (file)
@@ -177,6 +177,9 @@ struct mem_cgroup {
 
        unsigned int    swappiness;
 
+       /* set when res.limit == memsw.limit */
+       bool            memsw_is_minimum;
+
        /*
         * statistics. This must be placed at the end of memcg.
         */
@@ -646,6 +649,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        int zid = zone_idx(z);
        struct mem_cgroup_per_zone *mz;
        int lru = LRU_FILE * !!file + !!active;
+       int ret;
 
        BUG_ON(!mem_cont);
        mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
@@ -663,9 +667,19 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                        continue;
 
                scan++;
-               if (__isolate_lru_page(page, mode, file) == 0) {
+               ret = __isolate_lru_page(page, mode, file);
+               switch (ret) {
+               case 0:
                        list_move(&page->lru, dst);
+                       mem_cgroup_del_lru(page);
                        nr_taken++;
+                       break;
+               case -EBUSY:
+                       /* we don't affect global LRU but rotate in our LRU */
+                       mem_cgroup_rotate_lru_list(page, page_lru(page));
+                       break;
+               default:
+                       break;
                }
        }
 
@@ -847,6 +861,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        int ret, total = 0;
        int loop = 0;
 
+       /* If memsw_is_minimum==1, swap-out is of-no-use. */
+       if (root_mem->memsw_is_minimum)
+               noswap = true;
+
        while (loop < 2) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem)
@@ -1189,6 +1207,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        ret = 0;
 out:
        unlock_page_cgroup(pc);
+       /*
+        * We charges against "to" which may not have any tasks. Then, "to"
+        * can be under rmdir(). But in current implementation, caller of
+        * this function is just force_empty() and it's garanteed that
+        * "to" is never removed. So, we don't check rmdir status here.
+        */
        return ret;
 }
 
@@ -1410,6 +1434,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                return;
        if (!ptr)
                return;
+       cgroup_exclude_rmdir(&ptr->css);
        pc = lookup_page_cgroup(page);
        mem_cgroup_lru_del_before_commit_swapcache(page);
        __mem_cgroup_commit_charge(ptr, pc, ctype);
@@ -1439,8 +1464,12 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                }
                rcu_read_unlock();
        }
-       /* add this page(page_cgroup) to the LRU we want. */
-
+       /*
+        * At swapin, we may charge account against cgroup which has no tasks.
+        * So, rmdir()->pre_destroy() can be called while we do this charge.
+        * In that case, we need to call pre_destroy() again. check it here.
+        */
+       cgroup_release_and_wakeup_rmdir(&ptr->css);
 }
 
 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
@@ -1646,7 +1675,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
 
        if (!mem)
                return;
-
+       cgroup_exclude_rmdir(&mem->css);
        /* at migration success, oldpage->mapping is NULL. */
        if (oldpage->mapping) {
                target = oldpage;
@@ -1686,6 +1715,12 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
         */
        if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
                mem_cgroup_uncharge_page(target);
+       /*
+        * At migration, we may charge account against cgroup which has no tasks
+        * So, rmdir()->pre_destroy() can be called while we do this charge.
+        * In that case, we need to call pre_destroy() again. check it here.
+        */
+       cgroup_release_and_wakeup_rmdir(&mem->css);
 }
 
 /*
@@ -1752,6 +1787,12 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        break;
                }
                ret = res_counter_set_limit(&memcg->res, val);
+               if (!ret) {
+                       if (memswlimit == val)
+                               memcg->memsw_is_minimum = true;
+                       else
+                               memcg->memsw_is_minimum = false;
+               }
                mutex_unlock(&set_limit_mutex);
 
                if (!ret)
@@ -1799,6 +1840,12 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                        break;
                }
                ret = res_counter_set_limit(&memcg->memsw, val);
+               if (!ret) {
+                       if (memlimit == val)
+                               memcg->memsw_is_minimum = true;
+                       else
+                               memcg->memsw_is_minimum = false;
+               }
                mutex_unlock(&set_limit_mutex);
 
                if (!ret)
@@ -1943,7 +1990,7 @@ try_to_free:
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
                }
 
        }