jbd: clean up journal_try_to_free_buffers()
[safe/jmp/linux-2.6] / mm / mlock.c
index 04d5e74..45eb650 100644 (file)
@@ -31,7 +31,6 @@ int can_do_mlock(void)
 }
 EXPORT_SYMBOL(can_do_mlock);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * Mlocked pages are marked with PageMlocked() flag for efficient testing
  * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
        return retval;
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED.  No-op if unlocking.
- */
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
-{
-       if (mlock && (vma->vm_flags & VM_LOCKED))
-               return make_pages_present(start, end);
-       return 0;
-}
-
-static inline int __mlock_posix_error_return(long retval)
-{
-       return 0;
-}
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
 /**
  * mlock_vma_pages_range() - mlock pages in specified vma range.
  * @vma - the vma containing the specfied address range
@@ -294,14 +272,10 @@ static inline int __mlock_posix_error_return(long retval)
  *
  * return number of pages [> 0] to be removed from locked_vm on success
  * of "special" vmas.
- *
- * return negative error if vma spanning @start-@range disappears while
- * mmap semaphore is dropped.  Unlikely?
  */
 long mlock_vma_pages_range(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
-       struct mm_struct *mm = vma->vm_mm;
        int nr_pages = (end - start) / PAGE_SIZE;
        BUG_ON(!(vma->vm_flags & VM_LOCKED));
 
@@ -314,20 +288,11 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
        if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current))) {
-               long error;
-               downgrade_write(&mm->mmap_sem);
-
-               error = __mlock_vma_pages_range(vma, start, end, 1);
 
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               vma = find_vma(mm, start);
-               /* non-NULL vma must contain @start, but need to check @end */
-               if (!vma ||  end > vma->vm_end)
-                       return -ENOMEM;
+               __mlock_vma_pages_range(vma, start, end, 1);
 
-               return 0;       /* hide other errors from mmap(), et al */
+               /* Hide errors from mmap() and other callers */
+               return 0;
        }
 
        /*
@@ -438,41 +403,14 @@ success:
        vma->vm_flags = newflags;
 
        if (lock) {
-               /*
-                * mmap_sem is currently held for write.  Downgrade the write
-                * lock to a read lock so that other faults, mmap scans, ...
-                * while we fault in all pages.
-                */
-               downgrade_write(&mm->mmap_sem);
-
                ret = __mlock_vma_pages_range(vma, start, end, 1);
 
-               /*
-                * Need to reacquire mmap sem in write mode, as our callers
-                * expect this.  We have no support for atomically upgrading
-                * a sem to write, so we need to check for ranges while sem
-                * is unlocked.
-                */
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               *prev = find_vma(mm, start);
-               /* non-NULL *prev must contain @start, but need to check @end */
-               if (!(*prev) || end > (*prev)->vm_end)
-                       ret = -ENOMEM;
-               else if (ret > 0) {
+               if (ret > 0) {
                        mm->locked_vm -= ret;
                        ret = 0;
                } else
                        ret = __mlock_posix_error_return(ret); /* translate if needed */
        } else {
-               /*
-                * TODO:  for unlocking, pages will already be resident, so
-                * we don't need to wait for allocations/reclaim/pagein, ...
-                * However, unlocking a very large region can still take a
-                * while.  Should we downgrade the semaphore for both lock
-                * AND unlock ?
-                */
                __mlock_vma_pages_range(vma, start, end, 0);
        }
 
@@ -595,7 +533,7 @@ out:
        return 0;
 }
 
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
 {
        unsigned long lock_limit;
        int ret = -EINVAL;
@@ -623,7 +561,7 @@ out:
        return ret;
 }
 
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
 {
        int ret;
 
@@ -669,47 +607,43 @@ void user_shm_unlock(size_t size, struct user_struct *user)
        free_uid(user);
 }
 
-void *alloc_locked_buffer(size_t size)
+int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+                         size_t size)
 {
-       unsigned long rlim, vm, pgsz;
-       void *buffer = NULL;
+       unsigned long lim, vm, pgsz;
+       int error = -ENOMEM;
 
        pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-       down_write(&current->mm->mmap_sem);
-
-       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->total_vm + pgsz;
-       if (rlim < vm)
-               goto out;
+       down_write(&mm->mmap_sem);
 
-       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->locked_vm + pgsz;
-       if (rlim < vm)
+       lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+       vm   = mm->total_vm + pgsz;
+       if (lim < vm)
                goto out;
 
-       buffer = kzalloc(size, GFP_KERNEL);
-       if (!buffer)
+       lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+       vm   = mm->locked_vm + pgsz;
+       if (lim < vm)
                goto out;
 
-       current->mm->total_vm  += pgsz;
-       current->mm->locked_vm += pgsz;
+       mm->total_vm  += pgsz;
+       mm->locked_vm += pgsz;
 
+       error = 0;
  out:
-       up_write(&current->mm->mmap_sem);
-       return buffer;
+       up_write(&mm->mmap_sem);
+       return error;
 }
 
-void free_locked_buffer(void *buffer, size_t size)
+void refund_locked_memory(struct mm_struct *mm, size_t size)
 {
        unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-       down_write(&current->mm->mmap_sem);
-
-       current->mm->total_vm  -= pgsz;
-       current->mm->locked_vm -= pgsz;
+       down_write(&mm->mmap_sem);
 
-       up_write(&current->mm->mmap_sem);
+       mm->total_vm  -= pgsz;
+       mm->locked_vm -= pgsz;
 
-       kfree(buffer);
+       up_write(&mm->mmap_sem);
 }