include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / vdso / vma.c
index 3fdd514..ac74869 100644 (file)
@@ -6,8 +6,10 @@
 #include <linux/mm.h>
 #include <linux/err.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/random.h>
+#include <linux/elf.h>
 #include <asm/vsyscall.h>
 #include <asm/vgtod.h>
 #include <asm/proto.h>
 #include "vextern.h"           /* Just for VMAGIC.  */
 #undef VEXTERN
 
-int vdso_enabled = 1;
+unsigned int __read_mostly vdso_enabled = 1;
 
 extern char vdso_start[], vdso_end[];
 extern unsigned short vdso_sync_cpuid;
 
-struct page **vdso_pages;
+static struct page **vdso_pages;
+static unsigned vdso_size;
 
 static inline void *var_ref(void *p, char *name)
 {
@@ -38,6 +41,7 @@ static int __init init_vdso_vars(void)
        int i;
        char *vbase;
 
+       vdso_size = npages << PAGE_SHIFT;
        vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
        if (!vdso_pages)
                goto oom;
@@ -83,8 +87,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
        unsigned long addr, end;
        unsigned offset;
        end = (start + PMD_SIZE - 1) & PMD_MASK;
-       if (end >= TASK_SIZE64)
-               end = TASK_SIZE64;
+       if (end >= TASK_SIZE_MAX)
+               end = TASK_SIZE_MAX;
        end -= len;
        /* This loses some more bits than a modulo, but is cheaper */
        offset = get_random_int() & (PTRS_PER_PTE - 1);
@@ -96,33 +100,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 
 /* Setup a VMA at program startup for the vsyscall page.
    Not called for compat tasks */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr;
        int ret;
-       unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
 
        if (!vdso_enabled)
                return 0;
 
        down_write(&mm->mmap_sem);
-       addr = vdso_addr(mm->start_stack, len);
-       addr = get_unmapped_area(NULL, addr, len, 0, 0);
+       addr = vdso_addr(mm->start_stack, vdso_size);
+       addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
        if (IS_ERR_VALUE(addr)) {
                ret = addr;
                goto up_fail;
        }
 
-       ret = install_special_mapping(mm, addr, len,
+       current->mm->context.vdso = (void *)addr;
+
+       ret = install_special_mapping(mm, addr, vdso_size,
                                      VM_READ|VM_EXEC|
                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
                                      VM_ALWAYSDUMP,
                                      vdso_pages);
-       if (ret)
+       if (ret) {
+               current->mm->context.vdso = NULL;
                goto up_fail;
+       }
 
-       current->mm->context.vdso = (void *)addr;
 up_fail:
        up_write(&mm->mmap_sem);
        return ret;