X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmmap.c;h=bb4c963cc5347f695de8a060336e877278bbed81;hb=8bc3be2751b4f74ab90a446da1912fd8204d53f7;hp=9e685b9abcde33517dd8ce0d6f62475609f7f05d;hpb=4af3c9cc4fad54c3627e9afebf905aafde5690ed;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/mmap.c b/mm/mmap.c index 9e685b9..bb4c963 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -36,6 +36,10 @@ #define arch_mmap_check(addr, len, flags) (0) #endif +#ifndef arch_rebalance_pgtables +#define arch_rebalance_pgtables(addr, len) (addr) +#endif + static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); @@ -181,8 +185,6 @@ error: return -ENOMEM; } -EXPORT_SYMBOL(__vm_enough_memory); - /* * Requires inode->i_mapping->i_mmap_lock */ @@ -253,7 +255,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk) * not page aligned -Ram Gupta */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) goto out; newbrk = PAGE_ALIGN(brk); @@ -914,6 +917,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, if (!len) return -EINVAL; + if (!(flags & MAP_FIXED)) + addr = round_hint_to_min(addr); + error = arch_mmap_check(addr, len, flags); if (error) return error; @@ -1050,8 +1056,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) /* The open routine did something to the protections already? */ if (pgprot_val(vma->vm_page_prot) != - pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) + pgprot_val(vm_get_page_prot(vm_flags))) return 0; /* Specialty mapping? */ @@ -1132,8 +1137,7 @@ munmap_back: vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { @@ -1175,8 +1179,7 @@ munmap_back: vm_flags = vma->vm_flags; if (vma_wants_writenotify(vma)) - vma->vm_page_prot = - protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { @@ -1425,7 +1428,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (addr & ~PAGE_MASK) return -EINVAL; - return addr; + return arch_rebalance_pgtables(addr, len); } EXPORT_SYMBOL(get_unmapped_area); @@ -1620,6 +1623,12 @@ static inline int expand_downwards(struct vm_area_struct *vma, */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; + + address &= PAGE_MASK; + error = security_file_mmap(NULL, 0, 0, 0, address, 1); + if (error) + return error; + anon_vma_lock(vma); /* @@ -1627,8 +1636,6 @@ static inline int expand_downwards(struct vm_area_struct *vma, * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ - address &= PAGE_MASK; - error = 0; /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -1939,6 +1946,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; + error = security_file_mmap(NULL, 0, 0, 0, addr, 1); + if (error) + return error; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = arch_mmap_check(addr, len, flags); @@ -2004,8 +2015,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; @@ -2210,8 +2220,8 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_start = addr; vma->vm_end = addr + len; - vma->vm_flags = vm_flags | mm->def_flags; - vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages;