tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / mm / memory.c
index 60ea601..09e4b1b 100644 (file)
@@ -572,7 +572,7 @@ out:
  * covered by this vma.
  */
 
-static inline void
+static inline unsigned long
 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
                unsigned long addr, int *rss)
@@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       swap_duplicate(entry);
+                       if (swap_duplicate(entry) < 0)
+                               return entry.val;
+
                        /* make sure dst_mm is on swapoff's mmlist. */
                        if (unlikely(list_empty(&dst_mm->mmlist))) {
                                spin_lock(&mmlist_lock);
@@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 
 out_set_pte:
        set_pte_at(dst_mm, addr, dst_pte, pte);
+       return 0;
 }
 
 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
        int rss[2];
+       swp_entry_t entry = (swp_entry_t){0};
 
 again:
        rss[1] = rss[0] = 0;
@@ -674,7 +678,10 @@ again:
                        progress++;
                        continue;
                }
-               copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+               entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+                                                       vma, addr, rss);
+               if (entry.val)
+                       break;
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 
@@ -684,6 +691,12 @@ again:
        add_mm_rss(dst_mm, rss[0], rss[1]);
        pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
+
+       if (entry.val) {
+               if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+                       return -ENOMEM;
+               progress = 0;
+       }
        if (addr != end)
                goto again;
        return 0;
@@ -943,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                details = NULL;
 
        BUG_ON(addr >= end);
+       mem_cgroup_uncharge_start();
        tlb_start_vma(tlb, vma);
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
@@ -955,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                                zap_work, details);
        } while (pgd++, addr = next, (addr != end && *zap_work > 0));
        tlb_end_vma(tlb, vma);
+       mem_cgroup_uncharge_end();
 
        return addr;
 }
@@ -2514,7 +2529,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        ret = VM_FAULT_HWPOISON;
                } else {
                        print_bad_pte(vma, address, orig_pte, NULL);
-                       ret = VM_FAULT_OOM;
+                       ret = VM_FAULT_SIGBUS;
                }
                goto out;
        }
@@ -2540,14 +2555,24 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
        } else if (PageHWPoison(page)) {
+               /*
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
+                */
                ret = VM_FAULT_HWPOISON;
                delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-               goto out;
+               goto out_release;
        }
 
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+       page = ksm_might_need_to_copy(page, vma, address);
+       if (!page) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                goto out_page;
@@ -2614,6 +2639,7 @@ out_nomap:
        pte_unmap_unlock(page_table, ptl);
 out_page:
        unlock_page(page);
+out_release:
        page_cache_release(page);
        return ret;
 }
@@ -2909,7 +2935,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                 * Page table corrupted: show pte and kill process.
                 */
                print_bad_pte(vma, address, orig_pte, NULL);
-               return VM_FAULT_OOM;
+               return VM_FAULT_SIGBUS;
        }
 
        pgoff = pte_to_pgoff(orig_pte);