sdhci: implement CAP_CLOCK_BASE_BROKEN quirk
[safe/jmp/linux-2.6] / mm / memcontrol.c
index 00dda35..c8569bc 100644 (file)
@@ -1359,16 +1359,19 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
 
        lock_page_cgroup(pc);
        mem = pc->mem_cgroup;
-       if (!mem)
-               goto done;
-
-       if (!PageCgroupUsed(pc))
+       if (!mem || !PageCgroupUsed(pc))
                goto done;
 
        /*
         * Preemption is already disabled. We can use __this_cpu_xxx
         */
-       __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);
+       if (val > 0) {
+               __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+               SetPageCgroupFileMapped(pc);
+       } else {
+               __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+               ClearPageCgroupFileMapped(pc);
+       }
 
 done:
        unlock_page_cgroup(pc);
@@ -1435,7 +1438,7 @@ static void drain_local_stock(struct work_struct *dummy)
 
 /*
  * Cache charges(val) which is from res_counter, to local per_cpu area.
- * This will be consumed by consumt_stock() function, later.
+ * This will be consumed by consume_stock() function, later.
  */
 static void refill_stock(struct mem_cgroup *mem, int val)
 {
@@ -1598,7 +1601,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                         * There is a small race that "from" or "to" can be
                         * freed by rmdir, so we use css_tryget().
                         */
-                       rcu_read_lock();
                        from = mc.from;
                        to = mc.to;
                        if (from && css_tryget(&from->css)) {
@@ -1619,7 +1621,6 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                                        do_continue = (to == mem_over_limit);
                                css_put(&to->css);
                        }
-                       rcu_read_unlock();
                        if (do_continue) {
                                DEFINE_WAIT(wait);
                                prepare_to_wait(&mc.waitq, &wait,
@@ -1801,16 +1802,13 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
 static void __mem_cgroup_move_account(struct page_cgroup *pc,
        struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
 {
-       struct page *page;
-
        VM_BUG_ON(from == to);
        VM_BUG_ON(PageLRU(pc->page));
        VM_BUG_ON(!PageCgroupLocked(pc));
        VM_BUG_ON(!PageCgroupUsed(pc));
        VM_BUG_ON(pc->mem_cgroup != from);
 
-       page = pc->page;
-       if (page_mapped(page) && !PageAnon(page)) {
+       if (PageCgroupFileMapped(pc)) {
                /* Update mapped_file data for mem_cgroup */
                preempt_disable();
                __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
@@ -2429,11 +2427,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        }
        unlock_page_cgroup(pc);
 
+       *ptr = mem;
        if (mem) {
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
                css_put(&mem->css);
        }
-       *ptr = mem;
        return ret;
 }
 
@@ -3691,8 +3689,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        else
                mem = vmalloc(size);
 
-       if (mem)
-               memset(mem, 0, size);
+       if (!mem)
+               return NULL;
+
+       memset(mem, 0, size);
        mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
        if (!mem->stat) {
                if (size < PAGE_SIZE)