}
EXPORT_SYMBOL(can_do_mlock);
-#ifdef CONFIG_UNEVICTABLE_LRU
/*
* Mlocked pages are marked with PageMlocked() flag for efficient testing
* in vmscan and, possibly, the fault path; and to support semi-accurate
return;
}
+ dec_zone_page_state(page, NR_MLOCK);
+ count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) {
putback_lru_page(page);
} else {
/*
- * Page not on the LRU yet. Flush all pagevecs and retry.
+ * We lost the race. the page already moved to evictable list.
*/
- lru_add_drain_all();
- if (!isolate_lru_page(page))
- putback_lru_page(page);
+ if (PageUnevictable(page))
+ count_vm_event(UNEVICTABLE_PGSTRANDED);
}
}
{
BUG_ON(!PageLocked(page));
- if (!TestSetPageMlocked(page) && !isolate_lru_page(page))
- putback_lru_page(page);
+ if (!TestSetPageMlocked(page)) {
+ inc_zone_page_state(page, NR_MLOCK);
+ count_vm_event(UNEVICTABLE_PGMLOCKED);
+ if (!isolate_lru_page(page))
+ putback_lru_page(page);
+ }
}
/*
{
BUG_ON(!PageLocked(page));
- if (TestClearPageMlocked(page) && !isolate_lru_page(page)) {
- try_to_munlock(page);
- putback_lru_page(page);
+ if (TestClearPageMlocked(page)) {
+ dec_zone_page_state(page, NR_MLOCK);
+ if (!isolate_lru_page(page)) {
+ int ret = try_to_munlock(page);
+ /*
+ * did try_to_unlock() succeed or punt?
+ */
+ if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+ count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+ putback_lru_page(page);
+ } else {
+ /*
+ * We lost the race. let try_to_unmap() deal
+ * with it. At least we get the page state and
+ * mlock stats right. However, page is still on
+ * the noreclaim list. We'll fix that up when
+ * the page is eventually freed or we scan the
+ * noreclaim list.
+ */
+ if (PageUnevictable(page))
+ count_vm_event(UNEVICTABLE_PGSTRANDED);
+ else
+ count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+ }
}
}
-/*
- * mlock a range of pages in the vma.
+/**
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
+ * @vma: target vma
+ * @start: start address
+ * @end: end address
*
* This takes care of making the pages present too.
*
- * vma->vm_mm->mmap_sem must be held for write.
+ * return 0 on success, negative error code on error.
+ *
+ * vma->vm_mm->mmap_sem must be held for at least read.
*/
-static int __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+static long __mlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
- int write = !!(vma->vm_flags & VM_WRITE);
int nr_pages = (end - start) / PAGE_SIZE;
- int ret;
+ int ret = 0;
+ int gup_flags;
- VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
- VM_BUG_ON(start < vma->vm_start || end > vma->vm_end);
- VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON(start < vma->vm_start);
+ VM_BUG_ON(end > vma->vm_end);
+ VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- lru_add_drain_all(); /* push cached pages to LRU */
+ gup_flags = FOLL_TOUCH | FOLL_GET;
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= FOLL_WRITE;
while (nr_pages > 0) {
int i;
* disable migration of this page. However, page may
* still be truncated out from under us.
*/
- ret = get_user_pages(current, mm, addr,
+ ret = __get_user_pages(current, mm, addr,
min_t(int, nr_pages, ARRAY_SIZE(pages)),
- write, 0, pages, NULL);
+ gup_flags, pages, NULL);
/*
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file.
- * We'll mlock the the pages if/when they get faulted in.
+ * We'll mlock the pages if/when they get faulted in.
*/
if (ret < 0)
break;
- if (ret == 0) {
- /*
- * We know the vma is there, so the only time
- * we cannot get a single page should be an
- * error (ret < 0) case.
- */
- WARN_ON(1);
- break;
- }
lru_add_drain(); /* push cached pages to LRU */
for (i = 0; i < ret; i++) {
struct page *page = pages[i];
- lock_page(page);
- /*
- * Because we lock page here and migration is blocked
- * by the elevated reference, we need only check for
- * page truncation (file-cache only).
- */
- if (page->mapping)
- mlock_vma_page(page);
- unlock_page(page);
- put_page(page); /* ref from get_user_pages() */
-
- /*
- * here we assume that get_user_pages() has given us
- * a list of virtually contiguous pages.
- */
- addr += PAGE_SIZE; /* for next get_user_pages() */
- nr_pages--;
- }
- }
-
- lru_add_drain_all(); /* to update stats */
-
- return 0; /* count entire vma as locked_vm */
-}
-
-/*
- * private structure for munlock page table walk
- */
-struct munlock_page_walk {
- struct vm_area_struct *vma;
- pmd_t *pmd; /* for migration_entry_wait() */
-};
-
-/*
- * munlock normal pages for present ptes
- */
-static int __munlock_pte_handler(pte_t *ptep, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- struct munlock_page_walk *mpw = walk->private;
- swp_entry_t entry;
- struct page *page;
- pte_t pte;
-
-retry:
- pte = *ptep;
- /*
- * If it's a swap pte, we might be racing with page migration.
- */
- if (unlikely(!pte_present(pte))) {
- if (!is_swap_pte(pte))
- goto out;
- entry = pte_to_swp_entry(pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mpw->vma->vm_mm, mpw->pmd, addr);
- goto retry;
+ if (page->mapping) {
+ /*
+ * That preliminary check is mainly to avoid
+ * the pointless overhead of lock_page on the
+ * ZERO_PAGE: which might bounce very badly if
+ * there is contention. However, we're still
+ * dirtying its cacheline with get/put_page:
+ * we'll add another __get_user_pages flag to
+ * avoid it if that case turns out to matter.
+ */
+ lock_page(page);
+ /*
+ * Because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ mlock_vma_page(page);
+ unlock_page(page);
+ }
+ put_page(page); /* ref from get_user_pages() */
}
- goto out;
- }
-
- page = vm_normal_page(mpw->vma, addr, pte);
- if (!page)
- goto out;
- lock_page(page);
- if (!page->mapping) {
- unlock_page(page);
- goto retry;
+ addr += ret * PAGE_SIZE;
+ nr_pages -= ret;
+ ret = 0;
}
- munlock_vma_page(page);
- unlock_page(page);
-out:
- return 0;
+ return ret; /* 0 or negative error code */
}
/*
- * Save pmd for pte handler for waiting on migration entries
+ * convert get_user_pages() return value to posix mlock() error
*/
-static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
+static int __mlock_posix_error_return(long retval)
{
- struct munlock_page_walk *mpw = walk->private;
-
- mpw->pmd = pmd;
- return 0;
+ if (retval == -EFAULT)
+ retval = -ENOMEM;
+ else if (retval == -ENOMEM)
+ retval = -EAGAIN;
+ return retval;
}
-
-/*
- * munlock a range of pages in the vma using standard page table walk.
+/**
+ * mlock_vma_pages_range() - mlock pages in specified vma range.
+ * @vma - the vma containing the specfied address range
+ * @start - starting address in @vma to mlock
+ * @end - end address [+1] in @vma to mlock
*
- * vma->vm_mm->mmap_sem must be held for write.
- */
-static void __munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct munlock_page_walk mpw = {
- .vma = vma,
- };
- struct mm_walk munlock_page_walk = {
- .pmd_entry = __munlock_pmd_handler,
- .pte_entry = __munlock_pte_handler,
- .private = &mpw,
- .mm = mm,
- };
-
- VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
- VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
- VM_BUG_ON(start < vma->vm_start);
- VM_BUG_ON(end > vma->vm_end);
-
- lru_add_drain_all(); /* push cached pages to LRU */
- walk_page_range(start, end, &munlock_page_walk);
- lru_add_drain_all(); /* to update stats */
-}
-
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED. No-op if unlocking.
- */
-static int __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_flags & VM_LOCKED)
- make_pages_present(start, end);
- return 0;
-}
-
-/*
- * munlock a range of pages in the vma -- no-op.
- */
-static void __munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * mlock all pages in this vma range. For mmap()/mremap()/...
+ * For mmap()/mremap()/expansion of mlocked vma.
+ *
+ * return 0 on success for "normal" vmas.
+ *
+ * return number of pages [> 0] to be removed from locked_vm on success
+ * of "special" vmas.
*/
-int mlock_vma_pages_range(struct vm_area_struct *vma,
+long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int nr_pages = (end - start) / PAGE_SIZE;
if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
is_vm_hugetlb_page(vma) ||
- vma == get_gate_vma(current)))
- return __mlock_vma_pages_range(vma, start, end);
+ vma == get_gate_vma(current))) {
+
+ __mlock_vma_pages_range(vma, start, end);
+
+ /* Hide errors from mmap() and other callers */
+ return 0;
+ }
/*
* User mapped kernel pages or huge pages:
no_mlock:
vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
- return nr_pages; /* pages NOT mlocked */
+ return nr_pages; /* error or pages NOT mlocked */
}
-
/*
- * munlock all pages in vma. For munmap() and exit().
+ * munlock_vma_pages_range() - munlock all pages in the vma range.'
+ * @vma - vma containing range to be munlock()ed.
+ * @start - start address in @vma of the range
+ * @end - end of range in @vma.
+ *
+ * For mremap(), munmap() and exit().
+ *
+ * Called with @vma VM_LOCKED.
+ *
+ * Returns with VM_LOCKED cleared. Callers must be prepared to
+ * deal with this.
+ *
+ * We don't save and restore VM_LOCKED here because pages are
+ * still on lru. In unmap path, pages might be scanned by reclaim
+ * and re-mlocked by try_to_{munlock|unmap} before we unmap and
+ * free them. This will result in freeing mlocked pages.
*/
-void munlock_vma_pages_all(struct vm_area_struct *vma)
+void munlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
+ unsigned long addr;
+
+ lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
- __munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+ /*
+ * Although FOLL_DUMP is intended for get_dump_page(),
+ * it just so happens that its special treatment of the
+ * ZERO_PAGE (returning an error instead of doing get_page)
+ * suits munlock very well (and if somehow an abnormal page
+ * has sneaked into the range, we won't oops here: great).
+ */
+ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ if (page && !IS_ERR(page)) {
+ lock_page(page);
+ /*
+ * Like in __mlock_vma_pages_range(),
+ * because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ munlock_vma_page(page);
+ unlock_page(page);
+ put_page(page);
+ }
+ cond_resched();
+ }
}
/*
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, __mlock_vma_pages_range will bring it back.
*/
- vma->vm_flags = newflags;
if (lock) {
+ vma->vm_flags = newflags;
ret = __mlock_vma_pages_range(vma, start, end);
- if (ret > 0) {
- mm->locked_vm -= ret;
- ret = 0;
- }
- } else
- __munlock_vma_pages_range(vma, start, end);
+ if (ret < 0)
+ ret = __mlock_posix_error_return(ret);
+ } else {
+ munlock_vma_pages_range(vma, start, end);
+ }
out:
*prev = vma;
return error;
}
-asmlinkage long sys_mlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
unsigned long locked;
unsigned long lock_limit;
if (!can_do_mlock())
return -EPERM;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(¤t->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
return error;
}
-asmlinkage long sys_munlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
return 0;
}
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
{
unsigned long lock_limit;
int ret = -EINVAL;
if (!can_do_mlock())
goto out;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(¤t->mm->mmap_sem);
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
return ret;
}
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
{
int ret;
spin_unlock(&shmlock_user_lock);
free_uid(user);
}
+
+int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size)
+{
+ unsigned long lim, vm, pgsz;
+ int error = -ENOMEM;
+
+ pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ down_write(&mm->mmap_sem);
+
+ lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+ vm = mm->total_vm + pgsz;
+ if (lim < vm)
+ goto out;
+
+ lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ vm = mm->locked_vm + pgsz;
+ if (lim < vm)
+ goto out;
+
+ mm->total_vm += pgsz;
+ mm->locked_vm += pgsz;
+
+ error = 0;
+ out:
+ up_write(&mm->mmap_sem);
+ return error;
+}
+
+void refund_locked_memory(struct mm_struct *mm, size_t size)
+{
+ unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ down_write(&mm->mmap_sem);
+
+ mm->total_vm -= pgsz;
+ mm->locked_vm -= pgsz;
+
+ up_write(&mm->mmap_sem);
+}