tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / mm / mlock.c
index e13918d..2b8335a 100644 (file)
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page)
        }
 }
 
-/*
- * called from munlock()/munmap() path with page supposedly on the LRU.
+/**
+ * munlock_vma_page - munlock a vma page
+ * @page - page to be unlocked
  *
- * Note:  unlike mlock_vma_page(), we can't just clear the PageMlocked
- * [in try_to_munlock()] and then attempt to isolate the page.  We must
- * isolate the page to keep others from messing with its unevictable
- * and mlocked state while trying to munlock.  However, we pre-clear the
- * mlocked state anyway as we might lose the isolation race and we might
- * not get another chance to clear PageMlocked.  If we successfully
- * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
- * mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma.  So, we go ahead and SetPageMlocked(),
- * perhaps redundantly.
- * If we lose the isolation race, and the page is mapped by other VM_LOCKED
- * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
- * either of which will restore the PageMlocked state by calling
- * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+ * munlock()ed or munmap()ed, we want to check whether other vmas hold the
+ * page locked so that we can leave it on the unevictable lru list and not
+ * bother vmscan with it.  However, to walk the page's rmap list in
+ * try_to_munlock() we must isolate the page from the LRU.  If some other
+ * task has removed the page from the LRU, we won't be able to do that.
+ * So we clear the PageMlocked as we might not get another chance.  If we
+ * can't isolate the page, we leave it for putback_lru_page() and vmscan
+ * [page_referenced()/try_to_unmap()] to deal with.
  */
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
 {
        BUG_ON(!PageLocked(page));
 
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page)
                        /*
                         * did try_to_unlock() succeed or punt?
                         */
-                       if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+                       if (ret != SWAP_MLOCK)
                                count_vm_event(UNEVICTABLE_PGMUNLOCKED);
 
                        putback_lru_page(page);
                } else {
                        /*
-                        * We lost the race.  let try_to_unmap() deal
-                        * with it.  At least we get the page state and
-                        * mlock stats right.  However, page is still on
-                        * the noreclaim list.  We'll fix that up when
-                        * the page is eventually freed or we scan the
-                        * noreclaim list.
+                        * Some other task has removed the page from the LRU.
+                        * putback_lru_page() will take care of removing the
+                        * page from the unevictable list, if necessary.
+                        * vmscan [page_referenced()] will move the page back
+                        * to the unevictable list if some other vma has it
+                        * mlocked.
                         */
                        if (PageUnevictable(page))
                                count_vm_event(UNEVICTABLE_PGSTRANDED);
@@ -166,9 +163,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = 0;
+       gup_flags = FOLL_TOUCH | FOLL_GET;
        if (vma->vm_flags & VM_WRITE)
-               gup_flags = GUP_FLAGS_WRITE;
+               gup_flags |= FOLL_WRITE;
 
        while (nr_pages > 0) {
                int i;
@@ -198,17 +195,26 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                for (i = 0; i < ret; i++) {
                        struct page *page = pages[i];
 
-                       lock_page(page);
-                       /*
-                        * Because we lock page here and migration is blocked
-                        * by the elevated reference, we need only check for
-                        * file-cache page truncation.  This page->mapping
-                        * check also neatly skips over the ZERO_PAGE(),
-                        * though if that's common we'd prefer not to lock it.
-                        */
-                       if (page->mapping)
-                               mlock_vma_page(page);
-                       unlock_page(page);
+                       if (page->mapping) {
+                               /*
+                                * That preliminary check is mainly to avoid
+                                * the pointless overhead of lock_page on the
+                                * ZERO_PAGE: which might bounce very badly if
+                                * there is contention.  However, we're still
+                                * dirtying its cacheline with get/put_page:
+                                * we'll add another __get_user_pages flag to
+                                * avoid it if that case turns out to matter.
+                                */
+                               lock_page(page);
+                               /*
+                                * Because we lock page here and migration is
+                                * blocked by the elevated reference, we need
+                                * only check for file-cache page truncation.
+                                */
+                               if (page->mapping)
+                                       mlock_vma_page(page);
+                               unlock_page(page);
+                       }
                        put_page(page); /* ref from get_user_pages() */
                }
 
@@ -309,9 +315,23 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
        vma->vm_flags &= ~VM_LOCKED;
 
        for (addr = start; addr < end; addr += PAGE_SIZE) {
-               struct page *page = follow_page(vma, addr, FOLL_GET);
-               if (page) {
+               struct page *page;
+               /*
+                * Although FOLL_DUMP is intended for get_dump_page(),
+                * it just so happens that its special treatment of the
+                * ZERO_PAGE (returning an error instead of doing get_page)
+                * suits munlock very well (and if somehow an abnormal page
+                * has sneaked into the range, we won't oops here: great).
+                */
+               page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+               if (page && !IS_ERR(page)) {
                        lock_page(page);
+                       /*
+                        * Like in __mlock_vma_pages_range(),
+                        * because we lock page here and migration is
+                        * blocked by the elevated reference, we need
+                        * only check for file-cache page truncation.
+                        */
                        if (page->mapping)
                                munlock_vma_page(page);
                        unlock_page(page);