X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmmap.c;h=47556d2b3e90ddbde4b0b8e69430ef5b3726e3af;hb=17320a9644a45ccac51ce4ff4333276844abf72d;hp=01f9793591f666965342df8ef886358680bbae4c;hpb=93ea1d0a12623dc1a693642b5758261f35f9bf96;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index 01f9793..47556d2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,7 @@ pgprot_t protection_map[16] = { int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ -int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; +int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; atomic_t vm_committed_space = ATOMIC_INIT(0); /* @@ -143,7 +144,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin) leave 3% of the size of this process for other processes */ allowed -= current->mm->total_vm / 32; - if (atomic_read(&vm_committed_space) < allowed) + /* + * cast `allowed' as a signed long because vm_committed_space + * sometimes has a negative value + */ + if (atomic_read(&vm_committed_space) < (long)allowed) return 0; vm_unacct_memory(pages); @@ -151,10 +156,6 @@ int __vm_enough_memory(long pages, int cap_sys_admin) return -ENOMEM; } -EXPORT_SYMBOL(sysctl_overcommit_memory); -EXPORT_SYMBOL(sysctl_overcommit_ratio); -EXPORT_SYMBOL(sysctl_max_map_count); -EXPORT_SYMBOL(vm_committed_space); EXPORT_SYMBOL(__vm_enough_memory); /* @@ -177,35 +178,38 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma, } /* - * Remove one vm structure and free it. + * Unlink a file-based vm structure from its prio_tree, to hide + * vma from rmap and vmtruncate before freeing its page tables. */ -static void remove_vm_struct(struct vm_area_struct *vma) +void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; - might_sleep(); if (file) { struct address_space *mapping = file->f_mapping; spin_lock(&mapping->i_mmap_lock); __remove_shared_vm_struct(vma, file, mapping); spin_unlock(&mapping->i_mmap_lock); } +} + +/* + * Close a vm structure and free it, returning the next. + */ +static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (file) - fput(file); - anon_vma_unlink(vma); + if (vma->vm_file) + fput(vma->vm_file); mpol_free(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); + return next; } -/* - * sys_brk() for the most part doesn't need the global kernel - * lock, except when an application is doing something nasty - * like trying to un-brk an area that has already been mapped - * to a regular file. in this case, the unmapping will need - * to invoke file system routines that need the global lock. - */ asmlinkage unsigned long sys_brk(unsigned long brk) { unsigned long rlim, retval; @@ -608,7 +612,7 @@ again: remove_next = 1 + (end > next->vm_end); * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those. */ -#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED) +#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) @@ -835,7 +839,7 @@ none: } #ifdef CONFIG_PROC_FS -void __vm_stat_account(struct mm_struct *mm, unsigned long flags, +void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { const unsigned long stack_flags @@ -1113,7 +1117,7 @@ munmap_back: } out: mm->total_vm += len >> PAGE_SHIFT; - __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); @@ -1175,7 +1179,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vma || addr + len <= vma->vm_start)) return addr; } - start_addr = addr = mm->free_area_cache; + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { @@ -1186,7 +1195,9 @@ full_search: * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + addr = TASK_UNMAPPED_BASE; + start_addr = addr; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -1198,19 +1209,22 @@ full_search: mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; } } #endif -void arch_unmap_area(struct vm_area_struct *area) +void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the lowest possible address? */ - if (area->vm_start >= TASK_UNMAPPED_BASE && - area->vm_start < area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_start; + if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } } /* @@ -1240,17 +1254,26 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; /* make sure it can fit in the remaining address space */ - if (addr >= len) { + if (addr > len) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } + if (mm->mmap_base < len) + goto bottomup; + addr = mm->mmap_base-len; do { @@ -1264,75 +1287,85 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + /* try just below the current vma->vm_start */ addr = vma->vm_start-len; - } while (len <= vma->vm_start); + } while (len < vma->vm_start); +bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; return addr; } #endif -void arch_unmap_area_topdown(struct vm_area_struct *area) +void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { /* * Is this a new hole at the highest possible address? */ - if (area->vm_end > area->vm_mm->free_area_cache) - area->vm_mm->free_area_cache = area->vm_end; + if (addr > mm->free_area_cache) + mm->free_area_cache = addr; /* dont allow allocations above current base */ - if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base) - area->vm_mm->free_area_cache = area->vm_mm->mmap_base; + if (mm->free_area_cache > mm->mmap_base) + mm->free_area_cache = mm->mmap_base; } unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - if (flags & MAP_FIXED) { - unsigned long ret; + unsigned long ret; - if (addr > TASK_SIZE - len) - return -ENOMEM; - if (addr & ~PAGE_MASK) - return -EINVAL; - if (file && is_file_hugepages(file)) { - /* - * Check if the given range is hugepage aligned, and - * can be made suitable for hugepages. - */ - ret = prepare_hugepage_range(addr, len); - } else { - /* - * Ensure that a normal request is not falling in a - * reserved hugepage range. For some archs like IA-64, - * there is a separate region for hugepages. - */ - ret = is_hugepage_only_range(current->mm, addr, len); - } - if (ret) - return -EINVAL; - return addr; - } + if (!(flags & MAP_FIXED)) { + unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - if (file && file->f_op && file->f_op->get_unmapped_area) - return file->f_op->get_unmapped_area(file, addr, len, - pgoff, flags); + get_area = current->mm->get_unmapped_area; + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + } - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + if (addr > TASK_SIZE - len) + return -ENOMEM; + if (addr & ~PAGE_MASK) + return -EINVAL; + if (file && is_file_hugepages(file)) { + /* + * Check if the given range is hugepage aligned, and + * can be made suitable for hugepages. + */ + ret = prepare_hugepage_range(addr, len); + } else { + /* + * Ensure that a normal request is not falling in a + * reserved hugepage range. For some archs like IA-64, + * there is a separate region for hugepages. + */ + ret = is_hugepage_only_range(current->mm, addr, len); + } + if (ret) + return -EINVAL; + return addr; } EXPORT_SYMBOL(get_unmapped_area); @@ -1449,15 +1482,19 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; - __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); + vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); return 0; } -#ifdef CONFIG_STACK_GROWSUP +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* - * vma is the first one with address > vma->vm_end. Have to extend vma. + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_stack(struct vm_area_struct * vma, unsigned long address) +#ifndef CONFIG_IA64 +static inline +#endif +int expand_upwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -1495,6 +1532,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) anon_vma_unlock(vma); return error; } +#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ + +#ifdef CONFIG_STACK_GROWSUP +int expand_stack(struct vm_area_struct *vma, unsigned long address) +{ + return expand_upwards(vma, address); +} struct vm_area_struct * find_extend_vma(struct mm_struct *mm, unsigned long addr) @@ -1577,37 +1621,24 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) } #endif -/* Normal function to fix up a mapping - * This function is the default for when an area has no specific - * function. This may be used as part of a more specific routine. - * - * By the time this function is called, the area struct has been - * removed from the process mapping list. - */ -static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) -{ - size_t len = area->vm_end - area->vm_start; - - area->vm_mm->total_vm -= len >> PAGE_SHIFT; - if (area->vm_flags & VM_LOCKED) - area->vm_mm->locked_vm -= len >> PAGE_SHIFT; - vm_stat_unaccount(area); - area->vm_mm->unmap_area(area); - remove_vm_struct(area); -} - /* - * Update the VMA and inode share lists. - * - * Ok - we have the memory areas we should free on the 'free' list, + * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. + * + * Called with the mm semaphore held. */ -static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { + /* Update high watermark before we lower total_vm */ + update_hiwater_vm(mm); do { - struct vm_area_struct *next = vma->vm_next; - unmap_vma(mm, vma); - vma = next; + long nrpages = vma_pages(vma); + + mm->total_vm -= nrpages; + if (vma->vm_flags & VM_LOCKED) + mm->locked_vm -= nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); + vma = remove_vma(vma); } while (vma); validate_mm(mm); } @@ -1615,7 +1646,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) /* * Get rid of page table information in the indicated region. * - * Called with the page table lock held. + * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, @@ -1626,14 +1657,13 @@ static void unmap_region(struct mm_struct *mm, unsigned long nr_accounted = 0; lru_add_drain(); - spin_lock(&mm->page_table_lock); tlb = tlb_gather_mmu(mm, 0); - unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL); + update_hiwater_rss(mm); + unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, next? next->vm_start: 0); tlb_finish_mmu(tlb, start, end); - spin_unlock(&mm->page_table_lock); } /* @@ -1646,6 +1676,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; + unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); do { @@ -1656,6 +1687,11 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } while (vma && vma->vm_start < end); *insertion_point = vma; tail_vma->vm_next = NULL; + if (mm->unmap_area == arch_unmap_area) + addr = prev ? prev->vm_end : mm->mmap_base; + else + addr = vma ? vma->vm_start : mm->mmap_base; + mm->unmap_area(mm, addr); mm->mmap_cache = NULL; /* Kill the cache. */ } @@ -1768,7 +1804,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) unmap_region(mm, vma, prev, start, end); /* Fix up all other VM information */ - unmap_vma_list(mm, vma); + remove_vma_list(mm, vma); return 0; } @@ -1790,7 +1826,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) static inline void verify_mm_writelocked(struct mm_struct *mm) { -#ifdef CONFIG_DEBUG_KERNEL +#ifdef CONFIG_DEBUG_VM if (unlikely(down_read_trylock(&mm->mmap_sem))) { WARN_ON(1); up_read(&mm->mmap_sem); @@ -1902,34 +1938,21 @@ void exit_mmap(struct mm_struct *mm) unsigned long end; lru_add_drain(); - - spin_lock(&mm->page_table_lock); - flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); + /* Don't update_hiwater_rss(mm) here, do_exit already did */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); + end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); - mm->mmap = mm->mmap_cache = NULL; - mm->mm_rb = RB_ROOT; - set_mm_counter(mm, rss, 0); - mm->total_vm = 0; - mm->locked_vm = 0; - - spin_unlock(&mm->page_table_lock); - /* - * Walk the list again, actually closing and freeing it - * without holding any MM locks. + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. */ - while (vma) { - struct vm_area_struct *next = vma->vm_next; - remove_vm_struct(vma); - vma = next; - } + while (vma) + vma = remove_vma(vma); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } @@ -1962,6 +1985,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); if (__vma && __vma->vm_start < vma->vm_end) return -ENOMEM; + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory(vma_pages(vma))) + return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; }