Merge branch 'master' into next
[safe/jmp/linux-2.6] / mm / madvise.c
index 603c525..b9ce574 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/mempolicy.h>
 #include <linux/hugetlb.h>
+#include <linux/sched.h>
+
+/*
+ * Any behaviour which results in changes to the vma->vm_flags needs to
+ * take mmap_sem for writing. Others, which simply traverse vmas, need
+ * to only take it for reading.
+ */
+static int madvise_need_mmap_write(int behavior)
+{
+       switch (behavior) {
+       case MADV_REMOVE:
+       case MADV_WILLNEED:
+       case MADV_DONTNEED:
+               return 0;
+       default:
+               /* be safe, default to 1. list exceptions explicitly */
+               return 1;
+       }
+}
 
 /*
  * We can potentially split a vm area into separate
@@ -93,7 +112,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
        if (!file)
                return -EBADF;
 
-       if (file->f_mapping->a_ops->get_xip_page) {
+       if (file->f_mapping->a_ops->get_xip_mem) {
                /* no bad return value, but ignore advice */
                return 0;
        }
@@ -113,10 +132,10 @@ static long madvise_willneed(struct vm_area_struct * vma,
  * Application no longer needs these pages.  If the pages are dirty,
  * it's OK to just throw them away.  The app will be more careful about
  * data it wants to keep.  Be sure to free swap resources too.  The
- * zap_page_range call sets things up for refill_inactive to actually free
+ * zap_page_range call sets things up for shrink_active_list to actually free
  * these pages later if no one else has touched them in the meantime,
  * although we could add these pages to a global reuse list for
- * refill_inactive to pick up before reclaiming other pages.
+ * shrink_active_list to pick up before reclaiming other pages.
  *
  * NB: This interface discards data rather than pushes it out to swap,
  * as some implementations do.  This has performance implications for
@@ -183,9 +202,9 @@ static long madvise_remove(struct vm_area_struct *vma,
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
        /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
-       up_write(&current->mm->mmap_sem);
+       up_read(&current->mm->mmap_sem);
        error = vmtruncate_range(mapping->host, offset, endoff);
-       down_write(&current->mm->mmap_sem);
+       down_read(&current->mm->mmap_sem);
        return error;
 }
 
@@ -262,15 +281,20 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
  *  -EBADF  - map exists, but area maps something that isn't a file.
  *  -EAGAIN - a kernel resource was temporarily unavailable.
  */
-asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
+SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
 {
        unsigned long end, tmp;
        struct vm_area_struct * vma, *prev;
        int unmapped_error = 0;
        int error = -EINVAL;
+       int write;
        size_t len;
 
-       down_write(&current->mm->mmap_sem);
+       write = madvise_need_mmap_write(behavior);
+       if (write)
+               down_write(&current->mm->mmap_sem);
+       else
+               down_read(&current->mm->mmap_sem);
 
        if (start & ~PAGE_MASK)
                goto out;
@@ -332,6 +356,10 @@ asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
                        vma = find_vma(current->mm, start);
        }
 out:
-       up_write(&current->mm->mmap_sem);
+       if (write)
+               up_write(&current->mm->mmap_sem);
+       else
+               up_read(&current->mm->mmap_sem);
+
        return error;
 }