memcg: fix reclaim result checks
[safe/jmp/linux-2.6] / mm / mlock.c
index bce1b22..e125156 100644 (file)
@@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page)
                putback_lru_page(page);
        } else {
                /*
-                * Page not on the LRU yet.  Flush all pagevecs and retry.
+                * We lost the race. the page already moved to evictable list.
                 */
-               lru_add_drain_all();
-               if (!isolate_lru_page(page))
-                       putback_lru_page(page);
-               else if (PageUnevictable(page))
+               if (PageUnevictable(page))
                        count_vm_event(UNEVICTABLE_PGSTRANDED);
-
        }
 }
 
@@ -166,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        unsigned long addr = start;
        struct page *pages[16]; /* 16 gives a reasonable batch */
        int nr_pages = (end - start) / PAGE_SIZE;
-       int ret;
+       int ret = 0;
        int gup_flags = 0;
 
        VM_BUG_ON(start & ~PAGE_MASK);
@@ -177,18 +173,17 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                  (atomic_read(&mm->mm_users) != 0));
 
        /*
-        * mlock:   don't page populate if page has PROT_NONE permission.
-        * munlock: the pages always do munlock althrough
-        *          its has PROT_NONE permission.
+        * mlock:   don't page populate if vma has PROT_NONE permission.
+        * munlock: always do munlock although the vma has PROT_NONE
+        *          permission, or SIGKILL is pending.
         */
        if (!mlock)
-               gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS;
+               gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
+                            GUP_FLAGS_IGNORE_SIGKILL;
 
        if (vma->vm_flags & VM_WRITE)
                gup_flags |= GUP_FLAGS_WRITE;
 
-       lru_add_drain_all();    /* push cached pages to LRU */
-
        while (nr_pages > 0) {
                int i;
 
@@ -248,11 +243,22 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                        addr += PAGE_SIZE;      /* for next get_user_pages() */
                        nr_pages--;
                }
+               ret = 0;
        }
 
-       lru_add_drain_all();    /* to update stats */
+       return ret;     /* count entire vma as locked_vm */
+}
 
-       return 0;       /* count entire vma as locked_vm */
+/*
+ * convert get_user_pages() return value to posix mlock() error
+ */
+static int __mlock_posix_error_return(long retval)
+{
+       if (retval == -EFAULT)
+               retval = -ENOMEM;
+       else if (retval == -ENOMEM)
+               retval = -EAGAIN;
+       return retval;
 }
 
 #else /* CONFIG_UNEVICTABLE_LRU */
@@ -265,9 +271,15 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                                   int mlock)
 {
        if (mlock && (vma->vm_flags & VM_LOCKED))
-               make_pages_present(start, end);
+               return make_pages_present(start, end);
+       return 0;
+}
+
+static inline int __mlock_posix_error_return(long retval)
+{
        return 0;
 }
+
 #endif /* CONFIG_UNEVICTABLE_LRU */
 
 /**
@@ -434,10 +446,7 @@ success:
                downgrade_write(&mm->mmap_sem);
 
                ret = __mlock_vma_pages_range(vma, start, end, 1);
-               if (ret > 0) {
-                       mm->locked_vm -= ret;
-                       ret = 0;
-               }
+
                /*
                 * Need to reacquire mmap sem in write mode, as our callers
                 * expect this.  We have no support for atomically upgrading
@@ -451,6 +460,11 @@ success:
                /* non-NULL *prev must contain @start, but need to check @end */
                if (!(*prev) || end > (*prev)->vm_end)
                        ret = -ENOMEM;
+               else if (ret > 0) {
+                       mm->locked_vm -= ret;
+                       ret = 0;
+               } else
+                       ret = __mlock_posix_error_return(ret); /* translate if needed */
        } else {
                /*
                 * TODO:  for unlocking, pages will already be resident, so
@@ -525,6 +539,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
        if (!can_do_mlock())
                return -EPERM;
 
+       lru_add_drain_all();    /* flush pagevec */
+
        down_write(&current->mm->mmap_sem);
        len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
        start &= PAGE_MASK;
@@ -591,6 +607,8 @@ asmlinkage long sys_mlockall(int flags)
        if (!can_do_mlock())
                goto out;
 
+       lru_add_drain_all();    /* flush pagevec */
+
        down_write(&current->mm->mmap_sem);
 
        lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
@@ -650,3 +668,48 @@ void user_shm_unlock(size_t size, struct user_struct *user)
        spin_unlock(&shmlock_user_lock);
        free_uid(user);
 }
+
+void *alloc_locked_buffer(size_t size)
+{
+       unsigned long rlim, vm, pgsz;
+       void *buffer = NULL;
+
+       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->total_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->locked_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       buffer = kzalloc(size, GFP_KERNEL);
+       if (!buffer)
+               goto out;
+
+       current->mm->total_vm  += pgsz;
+       current->mm->locked_vm += pgsz;
+
+ out:
+       up_write(&current->mm->mmap_sem);
+       return buffer;
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+       unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       current->mm->total_vm  -= pgsz;
+       current->mm->locked_vm -= pgsz;
+
+       up_write(&current->mm->mmap_sem);
+
+       kfree(buffer);
+}