MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
[safe/jmp/linux-2.6] / mm / memory.c
index 543c446..72fb5f3 100644 (file)
@@ -956,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                details = NULL;
 
        BUG_ON(addr >= end);
+       mem_cgroup_uncharge_start();
        tlb_start_vma(tlb, vma);
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
@@ -968,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                                zap_work, details);
        } while (pgd++, addr = next, (addr != end && *zap_work > 0));
        tlb_end_vma(tlb, vma);
+       mem_cgroup_uncharge_end();
 
        return addr;
 }
@@ -1591,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        /* Ok, finally just insert the thing.. */
        entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+       update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
@@ -2114,7 +2116,7 @@ reuse:
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (ptep_set_access_flags(vma, address, page_table, entry,1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache(vma, address, page_table);
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -2183,7 +2185,7 @@ gotten:
                 * new page to be mapped directly into the secondary page table.
                 */
                set_pte_at_notify(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2527,7 +2529,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        ret = VM_FAULT_HWPOISON;
                } else {
                        print_bad_pte(vma, address, orig_pte, NULL);
-                       ret = VM_FAULT_OOM;
+                       ret = VM_FAULT_SIGBUS;
                }
                goto out;
        }
@@ -2553,6 +2555,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
        } else if (PageHWPoison(page)) {
+               /*
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
+                */
                ret = VM_FAULT_HWPOISON;
                delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
                goto out_release;
@@ -2561,6 +2567,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+       page = ksm_might_need_to_copy(page, vma, address);
+       if (!page) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                goto out_page;
@@ -2617,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, pte);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
 out:
@@ -2682,7 +2694,7 @@ setpte:
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2843,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
        } else {
                if (charged)
                        mem_cgroup_uncharge_page(page);
@@ -2923,7 +2935,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                 * Page table corrupted: show pte and kill process.
                 */
                print_bad_pte(vma, address, orig_pte, NULL);
-               return VM_FAULT_OOM;
+               return VM_FAULT_SIGBUS;
        }
 
        pgoff = pte_to_pgoff(orig_pte);
@@ -2980,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        }
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, pte);
        } else {
                /*
                 * This is needed only for protection faults but the arch code