putback_lru_page(page);
} else {
/*
- * Page not on the LRU yet. Flush all pagevecs and retry.
+ * We lost the race. the page already moved to evictable list.
*/
- lru_add_drain_all();
- if (!isolate_lru_page(page))
- putback_lru_page(page);
- else if (PageUnevictable(page))
+ if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
-
}
}
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
- int ret;
+ int ret = 0;
int gup_flags = 0;
VM_BUG_ON(start & ~PAGE_MASK);
if (vma->vm_flags & VM_WRITE)
gup_flags |= GUP_FLAGS_WRITE;
- lru_add_drain_all(); /* push cached pages to LRU */
-
while (nr_pages > 0) {
int i;
addr += PAGE_SIZE; /* for next get_user_pages() */
nr_pages--;
}
+ ret = 0;
}
- lru_add_drain_all(); /* to update stats */
+ return ret; /* count entire vma as locked_vm */
+}
- return 0; /* count entire vma as locked_vm */
+/*
+ * convert get_user_pages() return value to posix mlock() error
+ */
+static int __mlock_posix_error_return(long retval)
+{
+ if (retval == -EFAULT)
+ retval = -ENOMEM;
+ else if (retval == -ENOMEM)
+ retval = -EAGAIN;
+ return retval;
}
#else /* CONFIG_UNEVICTABLE_LRU */
int mlock)
{
if (mlock && (vma->vm_flags & VM_LOCKED))
- make_pages_present(start, end);
+ return make_pages_present(start, end);
return 0;
}
+
+static inline int __mlock_posix_error_return(long retval)
+{
+ return 0;
+}
+
#endif /* CONFIG_UNEVICTABLE_LRU */
/**
downgrade_write(&mm->mmap_sem);
ret = __mlock_vma_pages_range(vma, start, end, 1);
- if (ret > 0) {
- mm->locked_vm -= ret;
- ret = 0;
- }
+
/*
* Need to reacquire mmap sem in write mode, as our callers
* expect this. We have no support for atomically upgrading
/* non-NULL *prev must contain @start, but need to check @end */
if (!(*prev) || end > (*prev)->vm_end)
ret = -ENOMEM;
+ else if (ret > 0) {
+ mm->locked_vm -= ret;
+ ret = 0;
+ } else
+ ret = __mlock_posix_error_return(ret); /* translate if needed */
} else {
/*
* TODO: for unlocking, pages will already be resident, so
if (!can_do_mlock())
return -EPERM;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(¤t->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
if (!can_do_mlock())
goto out;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(¤t->mm->mmap_sem);
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;