nfsd: nfsd should drop CAP_MKNOD for non-root
[safe/jmp/linux-2.6] / mm / mlock.c
index 8b47835..cbe9e05 100644 (file)
@@ -60,15 +60,16 @@ void __clear_page_mlock(struct page *page)
                return;
        }
 
+       dec_zone_page_state(page, NR_MLOCK);
+       count_vm_event(UNEVICTABLE_PGCLEARED);
        if (!isolate_lru_page(page)) {
                putback_lru_page(page);
        } else {
                /*
-                * Page not on the LRU yet.  Flush all pagevecs and retry.
+                * We lost the race. the page already moved to evictable list.
                 */
-               lru_add_drain_all();
-               if (!isolate_lru_page(page))
-                       putback_lru_page(page);
+               if (PageUnevictable(page))
+                       count_vm_event(UNEVICTABLE_PGSTRANDED);
        }
 }
 
@@ -80,8 +81,12 @@ void mlock_vma_page(struct page *page)
 {
        BUG_ON(!PageLocked(page));
 
-       if (!TestSetPageMlocked(page) && !isolate_lru_page(page))
-               putback_lru_page(page);
+       if (!TestSetPageMlocked(page)) {
+               inc_zone_page_state(page, NR_MLOCK);
+               count_vm_event(UNEVICTABLE_PGMLOCKED);
+               if (!isolate_lru_page(page))
+                       putback_lru_page(page);
+       }
 }
 
 /*
@@ -106,9 +111,31 @@ static void munlock_vma_page(struct page *page)
 {
        BUG_ON(!PageLocked(page));
 
-       if (TestClearPageMlocked(page) && !isolate_lru_page(page)) {
-               try_to_munlock(page);
-               putback_lru_page(page);
+       if (TestClearPageMlocked(page)) {
+               dec_zone_page_state(page, NR_MLOCK);
+               if (!isolate_lru_page(page)) {
+                       int ret = try_to_munlock(page);
+                       /*
+                        * did try_to_unlock() succeed or punt?
+                        */
+                       if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+                               count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+
+                       putback_lru_page(page);
+               } else {
+                       /*
+                        * We lost the race.  let try_to_unmap() deal
+                        * with it.  At least we get the page state and
+                        * mlock stats right.  However, page is still on
+                        * the noreclaim list.  We'll fix that up when
+                        * the page is eventually freed or we scan the
+                        * noreclaim list.
+                        */
+                       if (PageUnevictable(page))
+                               count_vm_event(UNEVICTABLE_PGSTRANDED);
+                       else
+                               count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+               }
        }
 }
 
@@ -135,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        unsigned long addr = start;
        struct page *pages[16]; /* 16 gives a reasonable batch */
        int nr_pages = (end - start) / PAGE_SIZE;
-       int ret;
+       int ret = 0;
        int gup_flags = 0;
 
        VM_BUG_ON(start & ~PAGE_MASK);
@@ -146,18 +173,17 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                  (atomic_read(&mm->mm_users) != 0));
 
        /*
-        * mlock:   don't page populate if page has PROT_NONE permission.
-        * munlock: the pages always do munlock althrough
-        *          its has PROT_NONE permission.
+        * mlock:   don't page populate if vma has PROT_NONE permission.
+        * munlock: always do munlock although the vma has PROT_NONE
+        *          permission, or SIGKILL is pending.
         */
        if (!mlock)
-               gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS;
+               gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
+                            GUP_FLAGS_IGNORE_SIGKILL;
 
        if (vma->vm_flags & VM_WRITE)
                gup_flags |= GUP_FLAGS_WRITE;
 
-       lru_add_drain_all();    /* push cached pages to LRU */
-
        while (nr_pages > 0) {
                int i;
 
@@ -217,11 +243,22 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                        addr += PAGE_SIZE;      /* for next get_user_pages() */
                        nr_pages--;
                }
+               ret = 0;
        }
 
-       lru_add_drain_all();    /* to update stats */
+       return ret;     /* count entire vma as locked_vm */
+}
 
-       return 0;       /* count entire vma as locked_vm */
+/*
+ * convert get_user_pages() return value to posix mlock() error
+ */
+static int __mlock_posix_error_return(long retval)
+{
+       if (retval == -EFAULT)
+               retval = -ENOMEM;
+       else if (retval == -ENOMEM)
+               retval = -EAGAIN;
+       return retval;
 }
 
 #else /* CONFIG_UNEVICTABLE_LRU */
@@ -234,9 +271,15 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
                                   int mlock)
 {
        if (mlock && (vma->vm_flags & VM_LOCKED))
-               make_pages_present(start, end);
+               return make_pages_present(start, end);
        return 0;
 }
+
+static inline int __mlock_posix_error_return(long retval)
+{
+       return 0;
+}
+
 #endif /* CONFIG_UNEVICTABLE_LRU */
 
 /**
@@ -251,14 +294,10 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  *
  * return number of pages [> 0] to be removed from locked_vm on success
  * of "special" vmas.
- *
- * return negative error if vma spanning @start-@range disappears while
- * mmap semaphore is dropped.  Unlikely?
  */
 long mlock_vma_pages_range(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
-       struct mm_struct *mm = vma->vm_mm;
        int nr_pages = (end - start) / PAGE_SIZE;
        BUG_ON(!(vma->vm_flags & VM_LOCKED));
 
@@ -271,20 +310,11 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
        if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current))) {
-               long error;
-               downgrade_write(&mm->mmap_sem);
 
-               error = __mlock_vma_pages_range(vma, start, end, 1);
+               __mlock_vma_pages_range(vma, start, end, 1);
 
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               vma = find_vma(mm, start);
-               /* non-NULL vma must contain @start, but need to check @end */
-               if (!vma ||  end > vma->vm_end)
-                       return -ENOMEM;
-
-               return 0;       /* hide other errors from mmap(), et al */
+               /* Hide errors from mmap() and other callers */
+               return 0;
        }
 
        /*
@@ -395,39 +425,14 @@ success:
        vma->vm_flags = newflags;
 
        if (lock) {
-               /*
-                * mmap_sem is currently held for write.  Downgrade the write
-                * lock to a read lock so that other faults, mmap scans, ...
-                * while we fault in all pages.
-                */
-               downgrade_write(&mm->mmap_sem);
-
                ret = __mlock_vma_pages_range(vma, start, end, 1);
+
                if (ret > 0) {
                        mm->locked_vm -= ret;
                        ret = 0;
-               }
-               /*
-                * Need to reacquire mmap sem in write mode, as our callers
-                * expect this.  We have no support for atomically upgrading
-                * a sem to write, so we need to check for ranges while sem
-                * is unlocked.
-                */
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               *prev = find_vma(mm, start);
-               /* non-NULL *prev must contain @start, but need to check @end */
-               if (!(*prev) || end > (*prev)->vm_end)
-                       ret = -ENOMEM;
+               } else
+                       ret = __mlock_posix_error_return(ret); /* translate if needed */
        } else {
-               /*
-                * TODO:  for unlocking, pages will already be resident, so
-                * we don't need to wait for allocations/reclaim/pagein, ...
-                * However, unlocking a very large region can still take a
-                * while.  Should we downgrade the semaphore for both lock
-                * AND unlock ?
-                */
                __mlock_vma_pages_range(vma, start, end, 0);
        }
 
@@ -485,7 +490,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
        return error;
 }
 
-asmlinkage long sys_mlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
        unsigned long locked;
        unsigned long lock_limit;
@@ -494,6 +499,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
        if (!can_do_mlock())
                return -EPERM;
 
+       lru_add_drain_all();    /* flush pagevec */
+
        down_write(&current->mm->mmap_sem);
        len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
        start &= PAGE_MASK;
@@ -511,7 +518,7 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
        return error;
 }
 
-asmlinkage long sys_munlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 {
        int ret;
 
@@ -548,7 +555,7 @@ out:
        return 0;
 }
 
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
 {
        unsigned long lock_limit;
        int ret = -EINVAL;
@@ -560,6 +567,8 @@ asmlinkage long sys_mlockall(int flags)
        if (!can_do_mlock())
                goto out;
 
+       lru_add_drain_all();    /* flush pagevec */
+
        down_write(&current->mm->mmap_sem);
 
        lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
@@ -574,7 +583,7 @@ out:
        return ret;
 }
 
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
 {
        int ret;
 
@@ -619,3 +628,53 @@ void user_shm_unlock(size_t size, struct user_struct *user)
        spin_unlock(&shmlock_user_lock);
        free_uid(user);
 }
+
+void *alloc_locked_buffer(size_t size)
+{
+       unsigned long rlim, vm, pgsz;
+       void *buffer = NULL;
+
+       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->total_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+       vm   = current->mm->locked_vm + pgsz;
+       if (rlim < vm)
+               goto out;
+
+       buffer = kzalloc(size, GFP_KERNEL);
+       if (!buffer)
+               goto out;
+
+       current->mm->total_vm  += pgsz;
+       current->mm->locked_vm += pgsz;
+
+ out:
+       up_write(&current->mm->mmap_sem);
+       return buffer;
+}
+
+void release_locked_buffer(void *buffer, size_t size)
+{
+       unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       down_write(&current->mm->mmap_sem);
+
+       current->mm->total_vm  -= pgsz;
+       current->mm->locked_vm -= pgsz;
+
+       up_write(&current->mm->mmap_sem);
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+       release_locked_buffer(buffer, size);
+
+       kfree(buffer);
+}