memcg: mem_cgroup_charge never NULL
[safe/jmp/linux-2.6] / mm / memcontrol.c
index 9e170d3..83ba13a 100644 (file)
@@ -301,25 +301,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
 }
 
 /*
- * Tie new page_cgroup to struct page under lock_page_cgroup()
- * This can fail if the page has been tied to a page_cgroup.
- * If success, returns 0.
- */
-static int page_cgroup_assign_new_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
-{
-       int ret = 0;
-
-       lock_page_cgroup(page);
-       if (!page_get_page_cgroup(page))
-               page_assign_page_cgroup(page, pc);
-       else /* A page is tied to other pc. */
-               ret = 1;
-       unlock_page_cgroup(page);
-       return ret;
-}
-
-/*
  * Clear page->page_cgroup member under lock_page_cgroup().
  * If given "pc" value is different from one page->page_cgroup,
  * page->cgroup is not cleared.
@@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
         * with it
         */
 retry:
-       if (page) {
-               lock_page_cgroup(page);
-               pc = page_get_page_cgroup(page);
-               /*
-                * The page_cgroup exists and
-                * the page has already been accounted.
-                */
-               if (pc) {
-                       if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
-                               /* this page is under being uncharged ? */
-                               unlock_page_cgroup(page);
-                               cpu_relax();
-                               goto retry;
-                       } else {
-                               unlock_page_cgroup(page);
-                               goto done;
-                       }
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       /*
+        * The page_cgroup exists and
+        * the page has already been accounted.
+        */
+       if (pc) {
+               if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
+                       /* this page is under being uncharged ? */
+                       unlock_page_cgroup(page);
+                       cpu_relax();
+                       goto retry;
+               } else {
+                       unlock_page_cgroup(page);
+                       goto done;
                }
-               unlock_page_cgroup(page);
        }
+       unlock_page_cgroup(page);
 
        pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
@@ -663,7 +642,9 @@ retry:
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
                pc->flags |= PAGE_CGROUP_FLAG_CACHE;
 
-       if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
+       lock_page_cgroup(page);
+       if (page_get_page_cgroup(page)) {
+               unlock_page_cgroup(page);
                /*
                 * Another charge has been added to this page already.
                 * We take lock_page_cgroup(page) again and read
@@ -672,10 +653,10 @@ retry:
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
                kfree(pc);
-               if (!page)
-                       goto done;
                goto retry;
        }
+       page_assign_page_cgroup(page, pc);
+       unlock_page_cgroup(page);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);