X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmmap.c;h=d799d896d74aae5f19c604928eb6a52738676d66;hb=46ff2c7250eece24974ddb2198421c83a69a6289;hp=de54acd9942f9929004921042721df5cdfe2b6c7;hpb=07ab67c8d0d7c1021343b7d5c045033d6bf7be69;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index de54acd..d799d89 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,10 @@ #include #include +#ifndef arch_mmap_check +#define arch_mmap_check(addr, len, flags) (0) +#endif + static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); @@ -59,9 +64,16 @@ pgprot_t protection_map[16] = { __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 }; +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; +} +EXPORT_SYMBOL(vm_get_page_prot); + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ -int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; +int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; atomic_t vm_committed_space = ATOMIC_INIT(0); /* @@ -95,7 +107,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin) if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { unsigned long n; - free = get_page_cache_size(); + free = global_page_state(NR_FILE_PAGES); free += nr_swap_pages; /* @@ -120,14 +132,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin) * only call if we're about to fail. */ n = nr_free_pages(); + + /* + * Leave reserved pages. The pages are not for anonymous pages. + */ + if (n <= totalreserve_pages) + goto error; + else + n -= totalreserve_pages; + + /* + * Leave the last 3% for root + */ if (!cap_sys_admin) n -= n / 32; free += n; if (free > pages) return 0; - vm_unacct_memory(pages); - return -ENOMEM; + + goto error; } allowed = (totalram_pages - hugetlb_total_pages()) @@ -143,18 +167,18 @@ int __vm_enough_memory(long pages, int cap_sys_admin) leave 3% of the size of this process for other processes */ allowed -= current->mm->total_vm / 32; - if (atomic_read(&vm_committed_space) < allowed) + /* + * cast `allowed' as a signed long because vm_committed_space + * sometimes has a negative value + */ + if (atomic_read(&vm_committed_space) < (long)allowed) return 0; - +error: vm_unacct_memory(pages); return -ENOMEM; } -EXPORT_SYMBOL(sysctl_overcommit_memory); -EXPORT_SYMBOL(sysctl_overcommit_ratio); -EXPORT_SYMBOL(sysctl_max_map_count); -EXPORT_SYMBOL(vm_committed_space); EXPORT_SYMBOL(__vm_enough_memory); /* @@ -177,35 +201,38 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, } /* - * Remove one vm structure and free it. + * Unlink a file-based vm structure from its prio_tree, to hide + * vma from rmap and vmtruncate before freeing its page tables. */ -static void remove_vm_struct(struct vm_area_struct *vma) +void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; - might_sleep(); if (file) { struct address_space *mapping = file->f_mapping; spin_lock(&mapping->i_mmap_lock); __remove_shared_vm_struct(vma, file, mapping); spin_unlock(&mapping->i_mmap_lock); } +} + +/* + * Close a vm structure and free it, returning the next. + */ +static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (file) - fput(file); - anon_vma_unlink(vma); + if (vma->vm_file) + fput(vma->vm_file); mpol_free(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); + return next; } -/* - * sys_brk() for the most part doesn't need the global kernel - * lock, except when an application is doing something nasty - * like trying to un-brk an area that has already been mapped - * to a regular file. in this case, the unmapping will need - * to invoke file system routines that need the global lock. - */ asmlinkage unsigned long sys_brk(unsigned long brk) { unsigned long rlim, retval; @@ -216,6 +243,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk) if (brk < mm->end_code) goto out; + + /* + * Check against rlimit here. If this check is done later after the test + * of oldbrk with newbrk then it can escape the test and let the data + * segment grow beyond its set limit the in case where the limit is + * not page aligned -Ram Gupta + */ + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; + if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) + goto out; + newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) @@ -228,11 +266,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk) goto out; } - /* Check against rlimit.. */ - rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) - goto out; - /* Check against existing mmap mappings. */ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; @@ -290,8 +323,7 @@ void validate_mm(struct mm_struct *mm) i = browse_rb(&mm->mm_rb); if (i != mm->map_count) printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; - if (bug) - BUG(); + BUG_ON(bug); } #else #define validate_mm(mm) do { } while (0) @@ -428,8 +460,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct rb_node ** rb_link, * rb_parent; __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); - if (__vma && __vma->vm_start < vma->vm_end) - BUG(); + BUG_ON(__vma && __vma->vm_start < vma->vm_end); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } @@ -608,7 +639,7 @@ again: remove_next = 1 + (end > next->vm_end); * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ -#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) @@ -809,8 +840,7 @@ try_prev: * (e.g. stash info in next's anon_vma_node when assigning * an anon_vma, or when trying vma_merge). Another time. */ - if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) - BUG(); + BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); if (!near) goto none; @@ -835,20 +865,12 @@ none: } #ifdef CONFIG_PROC_FS -void __vm_stat_account(struct mm_struct *mm, unsigned long flags, +void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { const unsigned long stack_flags = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); -#ifdef CONFIG_HUGETLB - if (flags & VM_HUGETLB) { - if (!(flags & VM_DONTCOPY)) - mm->shared_vm += pages; - return; - } -#endif /* CONFIG_HUGETLB */ - if (file) { mm->shared_vm += pages; if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) @@ -902,6 +924,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, if (!len) return -EINVAL; + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len || len > TASK_SIZE) @@ -1044,18 +1070,18 @@ munmap_back: * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { error = -ENOMEM; goto unacct_error; } - memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & 0x0f]; + vma->vm_page_prot = protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; vma->vm_pgoff = pgoff; if (file) { @@ -1079,6 +1105,12 @@ munmap_back: goto free_vma; } + /* Don't make the VMA automatically writable if it's shared, but the + * backer wishes to know when pages are first written to */ + if (vma->vm_ops && vma->vm_ops->page_mkwrite) + vma->vm_page_prot = + protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) * that memory reservation must be checked; but that reservation @@ -1113,7 +1145,7 @@ munmap_back: } out: mm->total_vm += len >> PAGE_SHIFT; - __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); @@ -1175,7 +1207,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - start_addr = addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { @@ -1186,7 +1223,9 @@ full_search: * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + addr = TASK_UNMAPPED_BASE; + start_addr = addr; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -1198,19 +1237,22 @@ full_search: mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; } } #endif -void arch_unmap_area(struct vm_area_struct *area) +void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the lowest possible address? */ - if (area->vm_start >= TASK_UNMAPPED_BASE && - area->vm_start < area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_start; + if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } } /* @@ -1240,6 +1282,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; @@ -1251,6 +1299,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return (mm->free_area_cache = addr-len); } + if (mm->mmap_base < len) + goto bottomup; + addr = mm->mmap_base-len; do { @@ -1264,38 +1315,45 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + /* try just below the current vma->vm_start */ addr = vma->vm_start-len; } while (len < vma->vm_start); +bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; return addr; } #endif -void arch_unmap_area_topdown(struct vm_area_struct *area) +void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the highest possible address? */ - if (area->vm_end > area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_end; + if (addr > mm->free_area_cache) + mm->free_area_cache = addr; /* dont allow allocations above current base */ - if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base) - area->vm_mm->free_area_cache = area->vm_mm->mmap_base; + if (mm->free_area_cache > mm->mmap_base) + mm->free_area_cache = mm->mmap_base; } unsigned long @@ -1452,15 +1510,19 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; - __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); + vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); return 0; } -#ifdef CONFIG_STACK_GROWSUP +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* - * vma is the first one with address > vma->vm_end. Have to extend vma. + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_stack(struct vm_area_struct * vma, unsigned long address) +#ifndef CONFIG_IA64 +static inline +#endif +int expand_upwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -1498,6 +1560,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) anon_vma_unlock(vma); return error; } +#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ + +#ifdef CONFIG_STACK_GROWSUP +int expand_stack(struct vm_area_struct *vma, unsigned long address) +{ + return expand_upwards(vma, address); +} struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) @@ -1580,37 +1649,24 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) } #endif -/* Normal function to fix up a mapping - * This function is the default for when an area has no specific - * function. This may be used as part of a more specific routine. - * - * By the time this function is called, the area struct has been - * removed from the process mapping list. - */ -static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) -{ - size_t len = area->vm_end - area->vm_start; - - area->vm_mm->total_vm -= len >> PAGE_SHIFT; - if (area->vm_flags & VM_LOCKED) - area->vm_mm->locked_vm -= len >> PAGE_SHIFT; - vm_stat_unaccount(area); - area->vm_mm->unmap_area(area); - remove_vm_struct(area); -} - /* - * Update the VMA and inode share lists. - * - * Ok - we have the memory areas we should free on the 'free' list, + * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. + * + * Called with the mm semaphore held. */ -static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { + /* Update high watermark before we lower total_vm */ + update_hiwater_vm(mm); do { - struct vm_area_struct *next = vma->vm_next; - unmap_vma(mm, vma); - vma = next; + long nrpages = vma_pages(vma); + + mm->total_vm -= nrpages; + if (vma->vm_flags & VM_LOCKED) + mm->locked_vm -= nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); + vma = remove_vma(vma); } while (vma); validate_mm(mm); } @@ -1618,7 +1674,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) /* * Get rid of page table information in the indicated region. * - * Called with the page table lock held. + * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, @@ -1629,14 +1685,13 @@ static void unmap_region(struct mm_struct *mm, unsigned long nr_accounted = 0; lru_add_drain(); - spin_lock(&mm->page_table_lock); tlb = tlb_gather_mmu(mm, 0); - unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); + update_hiwater_rss(mm); + unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, next? next->vm_start: 0); tlb_finish_mmu(tlb, start, end); - spin_unlock(&mm->page_table_lock); } /* @@ -1649,6 +1704,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; + unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); do { @@ -1659,6 +1715,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } while (vma && vma->vm_start < end); *insertion_point = vma; tail_vma->vm_next = NULL; + if (mm->unmap_area == arch_unmap_area) + addr = prev ? prev->vm_end : mm->mmap_base; + else + addr = vma ? vma->vm_start : mm->mmap_base; + mm->unmap_area(mm, addr); mm->mmap_cache = NULL; /* Kill the cache. */ } @@ -1771,7 +1832,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ - unmap_vma_list(mm, vma); + remove_vma_list(mm, vma); return 0; } @@ -1793,7 +1854,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) static inline void verify_mm_writelocked(struct mm_struct *mm) { -#ifdef CONFIG_DEBUG_KERNEL +#ifdef CONFIG_DEBUG_VM if (unlikely(down_read_trylock(&mm->mmap_sem))) { WARN_ON(1); up_read(&mm->mmap_sem); @@ -1813,6 +1874,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) unsigned long flags; struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; len = PAGE_ALIGN(len); if (!len) @@ -1821,6 +1883,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if ((addr + len) > TASK_SIZE || (addr + len) < addr) return -EINVAL; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + + error = arch_mmap_check(addr, len, flags); + if (error) + return error; + /* * mlock MCL_FUTURE? */ @@ -1861,8 +1929,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (security_vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - /* Can we just expand an old private anonymous mapping? */ if (vma_merge(mm, prev, addr, addr + len, flags, NULL, NULL, pgoff, NULL)) @@ -1871,19 +1937,19 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * create a vma struct for an anonymous mapping */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } - memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & 0x0f]; + vma->vm_page_prot = protection_map[flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; @@ -1905,34 +1971,21 @@ void exit_mmap(struct mm_struct *mm) unsigned long end; lru_add_drain(); - - spin_lock(&mm->page_table_lock); - flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); + /* Don't update_hiwater_rss(mm) here, do_exit already did */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); + end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); - mm->mmap = mm->mmap_cache = NULL; - mm->mm_rb = RB_ROOT; - set_mm_counter(mm, rss, 0); - mm->total_vm = 0; - mm->locked_vm = 0; - - spin_unlock(&mm->page_table_lock); - /* - * Walk the list again, actually closing and freeing it - * without holding any MM locks. + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. */ - while (vma) { - struct vm_area_struct *next = vma->vm_next; - remove_vm_struct(vma); - vma = next; - } + while (vma) + vma = remove_vma(vma); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } @@ -1965,6 +2018,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); if (__vma && __vma->vm_start < vma->vm_end) return -ENOMEM; + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory(vma_pages(vma))) + return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; }