X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmmap.c;h=7a30c4988231377fbec31faf6a1dcfb7e6af44e6;hb=2047e15be4247efd430d067aae5bd24c11d99999;hp=eeefe19a0facb2c3add503b821b21097c0572fa8;hpb=2c0b381467bc2997be9d741a152f3fc75785eedc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index eeefe19..7a30c49 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -7,12 +7,14 @@ */ #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -28,6 +30,11 @@ #include #include #include +#include + +#ifndef arch_mmap_check +#define arch_mmap_check(addr, len, flags) (0) +#endif static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, @@ -59,6 +66,13 @@ pgprot_t protection_map[16] = { __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; +} +EXPORT_SYMBOL(vm_get_page_prot); + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; @@ -80,7 +94,7 @@ atomic_t vm_committed_space = ATOMIC_INIT(0); * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ -int __vm_enough_memory(long pages, int cap_sys_admin) +int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { unsigned long free, allowed; @@ -95,7 +109,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { unsigned long n; - free = get_page_cache_size(); + free = global_page_state(NR_FILE_PAGES); free += nr_swap_pages; /* @@ -104,7 +118,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ - free += atomic_read(&slab_reclaim_pages); + free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave the last 3% for root @@ -120,14 +134,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * only call if we're about to fail. */ n = nr_free_pages(); + + /* + * Leave reserved pages. The pages are not for anonymous pages. + */ + if (n <= totalreserve_pages) + goto error; + else + n -= totalreserve_pages; + + /* + * Leave the last 3% for root + */ if (!cap_sys_admin) n -= n / 32; free += n; if (free > pages) return 0; - vm_unacct_memory(pages); - return -ENOMEM; + + goto error; } allowed = (totalram_pages - hugetlb_total_pages()) @@ -141,7 +167,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) /* Don't let a single process grow too big: leave 3% of the size of this process for other processes */ - allowed -= current->mm->total_vm / 32; + allowed -= mm->total_vm / 32; /* * cast `allowed' as a signed long because vm_committed_space @@ -149,18 +175,12 @@ int __vm_enough_memory(long pages, int cap_sys_admin) */ if (atomic_read(&vm_committed_space) < (long)allowed) return 0; - +error: vm_unacct_memory(pages); return -ENOMEM; } -EXPORT_SYMBOL(sysctl_overcommit_memory); -EXPORT_SYMBOL(sysctl_overcommit_ratio); -EXPORT_SYMBOL(sysctl_max_map_count); -EXPORT_SYMBOL(vm_committed_space); -EXPORT_SYMBOL(__vm_enough_memory); - /* * Requires inode->i_mapping->i_mmap_lock */ @@ -168,7 +188,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_DENYWRITE) - atomic_inc(&file->f_dentry->d_inode->i_writecount); + atomic_inc(&file->f_path.dentry->d_inode->i_writecount); if (vma->vm_flags & VM_SHARED) mapping->i_mmap_writable--; @@ -181,26 +201,36 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, } /* - * Remove one vm structure and free it. + * Unlink a file-based vm structure from its prio_tree, to hide + * vma from rmap and vmtruncate before freeing its page tables. */ -static void remove_vm_struct(struct vm_area_struct *vma) +void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; - might_sleep(); if (file) { struct address_space *mapping = file->f_mapping; spin_lock(&mapping->i_mmap_lock); __remove_shared_vm_struct(vma, file, mapping); spin_unlock(&mapping->i_mmap_lock); } +} + +/* + * Close a vm structure and free it, returning the next. + */ +static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (file) - fput(file); - anon_vma_unlink(vma); + if (vma->vm_file) + fput(vma->vm_file); mpol_free(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); + return next; } asmlinkage unsigned long sys_brk(unsigned long brk) @@ -213,6 +243,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk) if (brk < mm->end_code) goto out; + + /* + * Check against rlimit here. If this check is done later after the test + * of oldbrk with newbrk then it can escape the test and let the data + * segment grow beyond its set limit the in case where the limit is + * not page aligned -Ram Gupta + */ + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; + if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) + goto out; + newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) @@ -225,11 +266,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk) goto out; } - /* Check against rlimit.. */ - rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) - goto out; - /* Check against existing mmap mappings. */ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; @@ -263,6 +299,8 @@ static int browse_rb(struct rb_root *root) printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start); i++; pn = nd; + prev = vma->vm_start; + pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) { @@ -287,8 +325,7 @@ void validate_mm(struct mm_struct *mm) i = browse_rb(&mm->mm_rb); if (i != mm->map_count) printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; - if (bug) - BUG(); + BUG_ON(bug); } #else #define validate_mm(mm) do { } while (0) @@ -364,7 +401,7 @@ static inline void __vma_link_file(struct vm_area_struct *vma) struct address_space *mapping = file->f_mapping; if (vma->vm_flags & VM_DENYWRITE) - atomic_dec(&file->f_dentry->d_inode->i_writecount); + atomic_dec(&file->f_path.dentry->d_inode->i_writecount); if (vma->vm_flags & VM_SHARED) mapping->i_mmap_writable++; @@ -425,8 +462,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct rb_node ** rb_link, * rb_parent; __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); - if (__vma && __vma->vm_start < vma->vm_end) - BUG(); + BUG_ON(__vma && __vma->vm_start < vma->vm_end); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } @@ -605,7 +641,7 @@ again: remove_next = 1 + (end > next->vm_end); * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ -#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) @@ -806,8 +842,7 @@ try_prev: * (e.g. stash info in next's anon_vma_node when assigning * an anon_vma, or when trying vma_merge). Another time. */ - if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) - BUG(); + BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); if (!near) goto none; @@ -838,14 +873,6 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, const unsigned long stack_flags = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); -#ifdef CONFIG_HUGETLB - if (flags & VM_HUGETLB) { - if (!(flags & VM_DONTCOPY)) - mm->shared_vm += pages; - return; - } -#endif /* CONFIG_HUGETLB */ - if (file) { mm->shared_vm += pages; if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) @@ -866,26 +893,12 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long flags, unsigned long pgoff) { struct mm_struct * mm = current->mm; - struct vm_area_struct * vma, * prev; struct inode *inode; unsigned int vm_flags; - int correct_wcount = 0; int error; - struct rb_node ** rb_link, * rb_parent; int accountable = 1; - unsigned long charged = 0, reqprot = prot; - - if (file) { - if (is_file_hugepages(file)) - accountable = 0; - - if (!file->f_op || !file->f_op->mmap) - return -ENODEV; + unsigned long reqprot = prot; - if ((prot & PROT_EXEC) && - (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)) - return -EPERM; - } /* * Does the application expect PROT_READ to imply PROT_EXEC? * @@ -893,12 +906,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) - if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; if (!len) return -EINVAL; + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len || len > TASK_SIZE) @@ -942,7 +959,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, return -EAGAIN; } - inode = file ? file->f_dentry->d_inode : NULL; + inode = file ? file->f_path.dentry->d_inode : NULL; if (file) { switch (flags & MAP_TYPE) { @@ -971,6 +988,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; + if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { + if (vm_flags & VM_EXEC) + return -EPERM; + vm_flags &= ~VM_MAYEXEC; + } + if (is_file_hugepages(file)) + accountable = 0; + + if (!file->f_op || !file->f_op->mmap) + return -ENODEV; break; default: @@ -992,10 +1019,61 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, } } - error = security_file_mmap(file, reqprot, prot, flags); + error = security_file_mmap(file, reqprot, prot, flags, addr, 0); if (error) return error; - + + return mmap_region(file, addr, len, flags, vm_flags, pgoff, + accountable); +} +EXPORT_SYMBOL(do_mmap_pgoff); + +/* + * Some shared mappigns will want the pages marked read-only + * to track write events. If so, we'll downgrade vm_page_prot + * to the private version (using protection_map[] without the + * VM_SHARED bit). + */ +int vma_wants_writenotify(struct vm_area_struct *vma) +{ + unsigned int vm_flags = vma->vm_flags; + + /* If it was private or non-writable, the write bit is already clear */ + if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) + return 0; + + /* The backer wishes to know when pages are first written to? */ + if (vma->vm_ops && vma->vm_ops->page_mkwrite) + return 1; + + /* The open routine did something to the protections already? */ + if (pgprot_val(vma->vm_page_prot) != + pgprot_val(vm_get_page_prot(vm_flags))) + return 0; + + /* Specialty mapping? */ + if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) + return 0; + + /* Can the mapping track the dirty pages? */ + return vma->vm_file && vma->vm_file->f_mapping && + mapping_cap_account_dirty(vma->vm_file->f_mapping); +} + + +unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, unsigned long flags, + unsigned int vm_flags, unsigned long pgoff, + int accountable) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *prev; + int correct_wcount = 0; + int error; + struct rb_node **rb_link, *rb_parent; + unsigned long charged = 0; + struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; + /* Clear old maps */ error = -ENOMEM; munmap_back: @@ -1041,18 +1119,17 @@ munmap_back: * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { error = -ENOMEM; goto unacct_error; } - memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & 0x0f]; + vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { @@ -1093,6 +1170,10 @@ munmap_back: pgoff = vma->vm_pgoff; vm_flags = vma->vm_flags; + if (vma_wants_writenotify(vma)) + vma->vm_page_prot = + protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { file = vma->vm_file; @@ -1115,12 +1196,8 @@ out: mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } - if (flags & MAP_POPULATE) { - up_write(&mm->mmap_sem); - sys_remap_file_pages(addr, len, 0, - pgoff, flags & MAP_NONBLOCK); - down_write(&mm->mmap_sem); - } + if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) + make_pages_present(addr, addr + len); return addr; unmap_and_free_vma: @@ -1140,8 +1217,6 @@ unacct_error: return error; } -EXPORT_SYMBOL(do_mmap_pgoff); - /* Get an address range which is currently unmapped. * For shmat() with addr=0. * @@ -1165,6 +1240,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > TASK_SIZE) return -ENOMEM; + if (flags & MAP_FIXED) + return addr; + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); @@ -1238,6 +1316,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (len > TASK_SIZE) return -ENOMEM; + if (flags & MAP_FIXED) + return addr; + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); @@ -1325,39 +1406,21 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - unsigned long ret; - - if (!(flags & MAP_FIXED)) { - unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - - get_area = current->mm->get_unmapped_area; - if (file && file->f_op && file->f_op->get_unmapped_area) - get_area = file->f_op->get_unmapped_area; - addr = get_area(file, addr, len, pgoff, flags); - if (IS_ERR_VALUE(addr)) - return addr; - } + unsigned long (*get_area)(struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + + get_area = current->mm->get_unmapped_area; + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (addr & ~PAGE_MASK) return -EINVAL; - if (file && is_file_hugepages(file)) { - /* - * Check if the given range is hugepage aligned, and - * can be made suitable for hugepages. - */ - ret = prepare_hugepage_range(addr, len); - } else { - /* - * Ensure that a normal request is not falling in a - * reserved hugepage range. For some archs like IA-64, - * there is a separate region for hugepages. - */ - ret = is_hugepage_only_range(current->mm, addr, len); - } - if (ret) - return -EINVAL; + return addr; } @@ -1445,6 +1508,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, grow)) @@ -1464,6 +1528,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un return -ENOMEM; } + /* Check to ensure the stack will not grow into a hugetlb-only region */ + new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : + vma->vm_end - size; + if (is_hugepage_only_range(vma->vm_mm, new_start, size)) + return -EFAULT; + /* * Overcommit.. This must be the final test, as it will * update security statistics. @@ -1479,11 +1549,15 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un return 0; } -#ifdef CONFIG_STACK_GROWSUP +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* - * vma is the first one with address > vma->vm_end. Have to extend vma. + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_stack(struct vm_area_struct * vma, unsigned long address) +#ifndef CONFIG_IA64 +static inline +#endif +int expand_upwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -1502,9 +1576,14 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. + * Also guard against wrapping around to address 0. */ - address += 4 + PAGE_SIZE - 1; - address &= PAGE_MASK; + if (address < PAGE_ALIGN(address+4)) + address = PAGE_ALIGN(address+4); + else { + anon_vma_unlock(vma); + return -ENOMEM; + } error = 0; /* Somebody else might have raced and expanded it already */ @@ -1521,28 +1600,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) anon_vma_unlock(vma); return error; } +#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) -{ - struct vm_area_struct *vma, *prev; - - addr &= PAGE_MASK; - vma = find_vma_prev(mm, addr, &prev); - if (vma && (vma->vm_start <= addr)) - return vma; - if (!prev || expand_stack(prev, addr)) - return NULL; - if (prev->vm_flags & VM_LOCKED) { - make_pages_present(addr, prev->vm_end); - } - return prev; -} -#else /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ -int expand_stack(struct vm_area_struct *vma, unsigned long address) +static inline int expand_downwards(struct vm_area_struct *vma, + unsigned long address) { int error; @@ -1579,6 +1643,38 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) return error; } +int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address) +{ + return expand_downwards(vma, address); +} + +#ifdef CONFIG_STACK_GROWSUP +int expand_stack(struct vm_area_struct *vma, unsigned long address) +{ + return expand_upwards(vma, address); +} + +struct vm_area_struct * +find_extend_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma, *prev; + + addr &= PAGE_MASK; + vma = find_vma_prev(mm, addr, &prev); + if (vma && (vma->vm_start <= addr)) + return vma; + if (!prev || expand_stack(prev, addr)) + return NULL; + if (prev->vm_flags & VM_LOCKED) + make_pages_present(addr, prev->vm_end); + return prev; +} +#else +int expand_stack(struct vm_area_struct *vma, unsigned long address) +{ + return expand_downwards(vma, address); +} + struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) { @@ -1596,9 +1692,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) start = vma->vm_start; if (expand_stack(vma, addr)) return NULL; - if (vma->vm_flags & VM_LOCKED) { + if (vma->vm_flags & VM_LOCKED) make_pages_present(addr, start); - } return vma; } #endif @@ -1611,16 +1706,16 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { + /* Update high watermark before we lower total_vm */ + update_hiwater_vm(mm); do { - struct vm_area_struct *next = vma->vm_next; long nrpages = vma_pages(vma); mm->total_vm -= nrpages; if (vma->vm_flags & VM_LOCKED) mm->locked_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); - remove_vm_struct(vma); - vma = next; + vma = remove_vma(vma); } while (vma); validate_mm(mm); } @@ -1639,14 +1734,13 @@ static void unmap_region(struct mm_struct *mm, unsigned long nr_accounted = 0; lru_add_drain(); - spin_lock(&mm->page_table_lock); tlb = tlb_gather_mmu(mm, 0); - unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); + update_hiwater_rss(mm); + unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, next? next->vm_start: 0); tlb_finish_mmu(tlb, start, end); - spin_unlock(&mm->page_table_lock); } /* @@ -1680,7 +1774,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* * Split a vma into two pieces at address 'addr', a new vma is allocated - * either for the first part or the the tail. + * either for the first part or the tail. */ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, int new_below) @@ -1694,7 +1788,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return -ENOMEM; @@ -1809,7 +1903,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) static inline void verify_mm_writelocked(struct mm_struct *mm) { -#ifdef CONFIG_DEBUG_KERNEL +#ifdef CONFIG_DEBUG_VM if (unlikely(down_read_trylock(&mm->mmap_sem))) { WARN_ON(1); up_read(&mm->mmap_sem); @@ -1829,6 +1923,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) unsigned long flags; struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; len = PAGE_ALIGN(len); if (!len) @@ -1837,6 +1932,15 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if ((addr + len) > TASK_SIZE || (addr + len) < addr) return -EINVAL; + if (is_hugepage_only_range(mm, addr, len)) + return -EINVAL; + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* * mlock MCL_FUTURE? */ @@ -1877,8 +1981,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (security_vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - /* Can we just expand an old private anonymous mapping? */ if (vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL)) @@ -1887,19 +1989,18 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * create a vma struct for an anonymous mapping */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } - memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & 0x0f]; + vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; @@ -1920,35 +2021,25 @@ void exit_mmap(struct mm_struct *mm) unsigned long nr_accounted = 0; unsigned long end; - lru_add_drain(); - - spin_lock(&mm->page_table_lock); + /* mm's last user has gone, and its about to be pulled down */ + arch_exit_mmap(mm); + lru_add_drain(); flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); + /* Don't update_hiwater_rss(mm) here, do_exit already did */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); + end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); - mm->mmap = mm->mmap_cache = NULL; - mm->mm_rb = RB_ROOT; - set_mm_counter(mm, rss, 0); - mm->total_vm = 0; - mm->locked_vm = 0; - - spin_unlock(&mm->page_table_lock); - /* - * Walk the list again, actually closing and freeing it - * without holding any MM locks. + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. */ - while (vma) { - struct vm_area_struct *next = vma->vm_next; - remove_vm_struct(vma); - vma = next; - } + while (vma) + vma = remove_vma(vma); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } @@ -1982,7 +2073,7 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) if (__vma && __vma->vm_start < vma->vm_end) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory(vma_pages(vma))) + security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; @@ -2020,7 +2111,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, vma_start < new_vma->vm_end) *vmap = new_vma; } else { - new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (new_vma) { *new_vma = *vma; pol = mpol_copy(vma_policy(vma)); @@ -2057,3 +2148,75 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) return 0; return 1; } + + +static struct page *special_mapping_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + struct page **pages; + + BUG_ON(address < vma->vm_start || address >= vma->vm_end); + + address -= vma->vm_start; + for (pages = vma->vm_private_data; address > 0 && *pages; ++pages) + address -= PAGE_SIZE; + + if (*pages) { + struct page *page = *pages; + get_page(page); + return page; + } + + return NOPAGE_SIGBUS; +} + +/* + * Having a close hook prevents vma merging regardless of flags. + */ +static void special_mapping_close(struct vm_area_struct *vma) +{ +} + +static struct vm_operations_struct special_mapping_vmops = { + .close = special_mapping_close, + .nopage = special_mapping_nopage, +}; + +/* + * Called with mm->mmap_sem held for writing. + * Insert a new vma covering the given region, with the given flags. + * Its pages are supplied by the given array of struct page *. + * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. + * The region past the last page supplied will always produce SIGBUS. + * The array pointer and the pages it points to are assumed to stay alive + * for as long as this mapping might exist. + */ +int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long vm_flags, struct page **pages) +{ + struct vm_area_struct *vma; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (unlikely(vma == NULL)) + return -ENOMEM; + + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; + + vma->vm_flags = vm_flags | mm->def_flags; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + vma->vm_ops = &special_mapping_vmops; + vma->vm_private_data = pages; + + if (unlikely(insert_vm_struct(mm, vma))) { + kmem_cache_free(vm_area_cachep, vma); + return -ENOMEM; + } + + mm->total_vm += len >> PAGE_SHIFT; + + return 0; +}