readahead: remove the old algorithm
[safe/jmp/linux-2.6] / mm / nommu.c
index af87456..1b105d2 100644 (file)
@@ -45,6 +45,7 @@ int heap_stack_gap = 0;
 
 EXPORT_SYMBOL(mem_map);
 EXPORT_SYMBOL(__vm_enough_memory);
+EXPORT_SYMBOL(num_physpages);
 
 /* list of shareable VMAs */
 struct rb_root nommu_vma_tree = RB_ROOT;
@@ -261,6 +262,14 @@ void vunmap(void *addr)
 }
 
 /*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
+ */
+void  __attribute__((weak)) vmalloc_sync_all(void)
+{
+}
+
+/*
  *  sys_brk() for the most part doesn't need the global kernel
  *  lock, except when an application is doing something nasty
  *  like trying to un-brk an area that has already been mapped
@@ -358,6 +367,11 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
        return find_vma(mm, addr);
 }
 
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+       return -ENOMEM;
+}
+
 /*
  * look up the first VMA exactly that exactly matches addr
  * - should be called with mm->mmap_sem at least held readlocked
@@ -523,7 +537,7 @@ static int validate_mmap_request(struct file *file,
                 */
                mapping = file->f_mapping;
                if (!mapping)
-                       mapping = file->f_dentry->d_inode->i_mapping;
+                       mapping = file->f_path.dentry->d_inode->i_mapping;
 
                capabilities = 0;
                if (mapping && mapping->backing_dev_info)
@@ -532,7 +546,7 @@ static int validate_mmap_request(struct file *file,
                if (!capabilities) {
                        /* no explicit capabilities set, so assume some
                         * defaults */
-                       switch (file->f_dentry->d_inode->i_mode & S_IFMT) {
+                       switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
                        case S_IFREG:
                        case S_IFBLK:
                                capabilities = BDI_CAP_MAP_COPY;
@@ -563,11 +577,11 @@ static int validate_mmap_request(struct file *file,
                            !(file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (IS_APPEND(file->f_dentry->d_inode) &&
+                       if (IS_APPEND(file->f_path.dentry->d_inode) &&
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file->f_dentry->d_inode))
+                       if (locks_verify_locked(file->f_path.dentry->d_inode))
                                return -EAGAIN;
 
                        if (!(capabilities & BDI_CAP_MAP_DIRECT))
@@ -598,7 +612,7 @@ static int validate_mmap_request(struct file *file,
 
                /* handle executable mappings and implied executable
                 * mappings */
-               if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
+               if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
                        if (prot & PROT_EXEC)
                                return -EPERM;
                }
@@ -630,7 +644,7 @@ static int validate_mmap_request(struct file *file,
        }
 
        /* allow the security API to have its say */
-       ret = security_file_mmap(file, reqprot, prot, flags);
+       ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
        if (ret < 0)
                return ret;
 
@@ -826,6 +840,11 @@ unsigned long do_mmap_pgoff(struct file *file,
                unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
                unsigned long vmpglen;
 
+               /* suppress VMA sharing for shared regions */
+               if (vm_flags & VM_SHARED &&
+                   capabilities & BDI_CAP_MAP_DIRECT)
+                       goto dont_share_VMAs;
+
                for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
                        vma = rb_entry(rb, struct vm_area_struct, vm_rb);
 
@@ -833,7 +852,7 @@ unsigned long do_mmap_pgoff(struct file *file,
                                continue;
 
                        /* search for overlapping mappings on the same file */
-                       if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode)
+                       if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
                                continue;
 
                        if (vma->vm_pgoff >= pgoff + pglen)
@@ -859,6 +878,7 @@ unsigned long do_mmap_pgoff(struct file *file,
                        goto shared;
                }
 
+       dont_share_VMAs:
                vma = NULL;
 
                /* obtain the address at which to make a shared mapping
@@ -1193,6 +1213,28 @@ void unmap_mapping_range(struct address_space *mapping,
 EXPORT_SYMBOL(unmap_mapping_range);
 
 /*
+ * ask for an unmapped area at which to create a mapping on a file
+ */
+unsigned long get_unmapped_area(struct file *file, unsigned long addr,
+                               unsigned long len, unsigned long pgoff,
+                               unsigned long flags)
+{
+       unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
+                                 unsigned long, unsigned long);
+
+       get_area = current->mm->get_unmapped_area;
+       if (file && file->f_op && file->f_op->get_unmapped_area)
+               get_area = file->f_op->get_unmapped_area;
+
+       if (!get_area)
+               return -ENOSYS;
+
+       return get_area(file, addr, len, pgoff, flags);
+}
+
+EXPORT_SYMBOL(get_unmapped_area);
+
+/*
  * Check that a process has enough memory to allocate a new virtual
  * mapping. 0 means there is enough memory for the allocation to
  * succeed and -ENOMEM implies there is not.
@@ -1299,11 +1341,10 @@ int in_gate_area_no_task(unsigned long addr)
        return 0;
 }
 
-struct page *filemap_nopage(struct vm_area_struct *area,
-                       unsigned long address, int *type)
+int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        BUG();
-       return NULL;
+       return 0;
 }
 
 /*