mm: fault feedback #1
[safe/jmp/linux-2.6] / mm / shmem.c
index bdaecfd..0a555af 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/xattr.h>
+#include <linux/exportfs.h>
 #include <linux/generic_acl.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
@@ -82,6 +83,7 @@ enum sgp_type {
        SGP_READ,       /* don't exceed i_size, don't allocate page */
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
        SGP_WRITE,      /* may exceed i_size, may allocate page */
+       SGP_FAULT,      /* same as SGP_CACHE, return with page locked */
 };
 
 static int shmem_getpage(struct inode *inode, unsigned long idx,
@@ -93,8 +95,11 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
         * The above definition of ENTRIES_PER_PAGE, and the use of
         * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
         * might be reconsidered if it ever diverges from PAGE_SIZE.
+        *
+        * __GFP_MOVABLE is masked out as swap vectors cannot move
         */
-       return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+       return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+                               PAGE_CACHE_SHIFT-PAGE_SHIFT);
 }
 
 static inline void shmem_dir_free(struct page *page)
@@ -175,12 +180,12 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
                vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
 }
 
-static struct super_operations shmem_ops;
+static const struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
-static struct file_operations shmem_file_operations;
-static struct inode_operations shmem_inode_operations;
-static struct inode_operations shmem_dir_inode_operations;
-static struct inode_operations shmem_special_inode_operations;
+static const struct file_operations shmem_file_operations;
+static const struct inode_operations shmem_inode_operations;
+static const struct inode_operations shmem_dir_inode_operations;
+static const struct inode_operations shmem_special_inode_operations;
 static struct vm_operations_struct shmem_vm_ops;
 
 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
@@ -372,7 +377,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
                }
 
                spin_unlock(&info->lock);
-               page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
+               page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
                if (page)
                        set_page_private(page, 0);
                spin_lock(&info->lock);
@@ -402,26 +407,38 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
 /*
  * shmem_free_swp - free some swap entries in a directory
  *
- * @dir:   pointer to the directory
- * @edir:  pointer after last entry of the directory
+ * @dir:        pointer to the directory
+ * @edir:       pointer after last entry of the directory
+ * @punch_lock: pointer to spinlock when needed for the holepunch case
  */
-static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
+static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
+                                               spinlock_t *punch_lock)
 {
+       spinlock_t *punch_unlock = NULL;
        swp_entry_t *ptr;
        int freed = 0;
 
        for (ptr = dir; ptr < edir; ptr++) {
                if (ptr->val) {
+                       if (unlikely(punch_lock)) {
+                               punch_unlock = punch_lock;
+                               punch_lock = NULL;
+                               spin_lock(punch_unlock);
+                               if (!ptr->val)
+                                       continue;
+                       }
                        free_swap_and_cache(*ptr);
                        *ptr = (swp_entry_t){0};
                        freed++;
                }
        }
+       if (punch_unlock)
+               spin_unlock(punch_unlock);
        return freed;
 }
 
-static int shmem_map_and_free_swp(struct page *subdir,
-               int offset, int limit, struct page ***dir)
+static int shmem_map_and_free_swp(struct page *subdir, int offset,
+               int limit, struct page ***dir, spinlock_t *punch_lock)
 {
        swp_entry_t *ptr;
        int freed = 0;
@@ -431,7 +448,8 @@ static int shmem_map_and_free_swp(struct page *subdir,
                int size = limit - offset;
                if (size > LATENCY_LIMIT)
                        size = LATENCY_LIMIT;
-               freed += shmem_free_swp(ptr+offset, ptr+offset+size);
+               freed += shmem_free_swp(ptr+offset, ptr+offset+size,
+                                                       punch_lock);
                if (need_resched()) {
                        shmem_swp_unmap(ptr);
                        if (*dir) {
@@ -481,7 +499,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        long nr_swaps_freed = 0;
        int offset;
        int freed;
-       int punch_hole = 0;
+       int punch_hole;
+       spinlock_t *needs_lock;
+       spinlock_t *punch_lock;
+       unsigned long upper_limit;
 
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
        idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -492,11 +513,20 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        info->flags |= SHMEM_TRUNCATE;
        if (likely(end == (loff_t) -1)) {
                limit = info->next_index;
+               upper_limit = SHMEM_MAX_INDEX;
                info->next_index = idx;
+               needs_lock = NULL;
+               punch_hole = 0;
        } else {
-               limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               if (limit > info->next_index)
-                       limit = info->next_index;
+               if (end + 1 >= inode->i_size) { /* we may free a little more */
+                       limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
+                                                       PAGE_CACHE_SHIFT;
+                       upper_limit = SHMEM_MAX_INDEX;
+               } else {
+                       limit = (end + 1) >> PAGE_CACHE_SHIFT;
+                       upper_limit = limit;
+               }
+               needs_lock = &info->lock;
                punch_hole = 1;
        }
 
@@ -513,12 +543,30 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                size = limit;
                if (size > SHMEM_NR_DIRECT)
                        size = SHMEM_NR_DIRECT;
-               nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
+               nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
        }
-       if (!topdir)
+
+       /*
+        * If there are no indirect blocks or we are punching a hole
+        * below indirect blocks, nothing to be done.
+        */
+       if (!topdir || limit <= SHMEM_NR_DIRECT)
                goto done2;
 
-       BUG_ON(limit <= SHMEM_NR_DIRECT);
+       /*
+        * The truncation case has already dropped info->lock, and we're safe
+        * because i_size and next_index have already been lowered, preventing
+        * access beyond.  But in the punch_hole case, we still need to take
+        * the lock when updating the swap directory, because there might be
+        * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
+        * shmem_writepage.  However, whenever we find we can remove a whole
+        * directory page (not at the misaligned start or end of the range),
+        * we first NULLify its pointer in the level above, and then have no
+        * need to take the lock when updating its contents: needs_lock and
+        * punch_lock (either pointing to info->lock or NULL) manage this.
+        */
+
+       upper_limit -= SHMEM_NR_DIRECT;
        limit -= SHMEM_NR_DIRECT;
        idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
        offset = idx % ENTRIES_PER_PAGE;
@@ -538,8 +586,14 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                if (*dir) {
                        diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
                                ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
-                       if (!diroff && !offset) {
-                               *dir = NULL;
+                       if (!diroff && !offset && upper_limit >= stage) {
+                               if (needs_lock) {
+                                       spin_lock(needs_lock);
+                                       *dir = NULL;
+                                       spin_unlock(needs_lock);
+                                       needs_lock = NULL;
+                               } else
+                                       *dir = NULL;
                                nr_pages_to_free++;
                                list_add(&middir->lru, &pages_to_free);
                        }
@@ -565,39 +619,55 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
                        }
                        stage = idx + ENTRIES_PER_PAGEPAGE;
                        middir = *dir;
-                       *dir = NULL;
-                       nr_pages_to_free++;
-                       list_add(&middir->lru, &pages_to_free);
+                       if (punch_hole)
+                               needs_lock = &info->lock;
+                       if (upper_limit >= stage) {
+                               if (needs_lock) {
+                                       spin_lock(needs_lock);
+                                       *dir = NULL;
+                                       spin_unlock(needs_lock);
+                                       needs_lock = NULL;
+                               } else
+                                       *dir = NULL;
+                               nr_pages_to_free++;
+                               list_add(&middir->lru, &pages_to_free);
+                       }
                        shmem_dir_unmap(dir);
                        cond_resched();
                        dir = shmem_dir_map(middir);
                        diroff = 0;
                }
+               punch_lock = needs_lock;
                subdir = dir[diroff];
-               if (subdir && page_private(subdir)) {
+               if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
+                       if (needs_lock) {
+                               spin_lock(needs_lock);
+                               dir[diroff] = NULL;
+                               spin_unlock(needs_lock);
+                               punch_lock = NULL;
+                       } else
+                               dir[diroff] = NULL;
+                       nr_pages_to_free++;
+                       list_add(&subdir->lru, &pages_to_free);
+               }
+               if (subdir && page_private(subdir) /* has swap entries */) {
                        size = limit - idx;
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        freed = shmem_map_and_free_swp(subdir,
-                                               offset, size, &dir);
+                                       offset, size, &dir, punch_lock);
                        if (!dir)
                                dir = shmem_dir_map(middir);
                        nr_swaps_freed += freed;
-                       if (offset)
+                       if (offset || punch_lock) {
                                spin_lock(&info->lock);
-                       set_page_private(subdir, page_private(subdir) - freed);
-                       if (offset)
+                               set_page_private(subdir,
+                                       page_private(subdir) - freed);
                                spin_unlock(&info->lock);
-                       if (!punch_hole)
-                               BUG_ON(page_private(subdir) > offset);
-               }
-               if (offset)
-                       offset = 0;
-               else if (subdir && !page_private(subdir)) {
-                       dir[diroff] = NULL;
-                       nr_pages_to_free++;
-                       list_add(&subdir->lru, &pages_to_free);
+                       } else
+                               BUG_ON(page_private(subdir) != freed);
                }
+               offset = 0;
        }
 done1:
        shmem_dir_unmap(dir);
@@ -609,8 +679,16 @@ done2:
                 * generic_delete_inode did it, before we lowered next_index.
                 * Also, though shmem_getpage checks i_size before adding to
                 * cache, no recheck after: so fix the narrow window there too.
+                *
+                * Recalling truncate_inode_pages_range and unmap_mapping_range
+                * every time for punch_hole (which never got a chance to clear
+                * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
+                * yet hardly ever necessary: try to optimize them out later.
                 */
                truncate_inode_pages_range(inode->i_mapping, start, end);
+               if (punch_hole)
+                       unmap_mapping_range(inode->i_mapping, start,
+                                                       end - start, 1);
        }
 
        spin_lock(&info->lock);
@@ -894,6 +972,8 @@ static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_
                *nodelist++ = '\0';
                if (nodelist_parse(nodelist, *policy_nodes))
                        goto out;
+               if (!nodes_subset(*policy_nodes, node_online_map))
+                       goto out;
        }
        if (!strcmp(value, "default")) {
                *policy = MPOL_DEFAULT;
@@ -1021,13 +1101,17 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
 
        if (idx >= SHMEM_MAX_INDEX)
                return -EFBIG;
+
+       if (type)
+               *type = VM_FAULT_MINOR;
+
        /*
         * Normally, filepage is NULL on entry, and either found
         * uptodate immediately, or allocated and zeroed, or read
         * in under swappage, which is then assigned to filepage.
-        * But shmem_prepare_write passes in a locked filepage,
-        * which may be found not uptodate by other callers too,
-        * and may need to be copied from the swappage read in.
+        * But shmem_readpage and shmem_prepare_write pass in a locked
+        * filepage, which may be found not uptodate by other callers
+        * too, and may need to be copied from the swappage read in.
         */
 repeat:
        if (!filepage)
@@ -1210,8 +1294,10 @@ repeat:
        }
 done:
        if (*pagep != filepage) {
-               unlock_page(filepage);
                *pagep = filepage;
+               if (sgp != SGP_FAULT)
+                       unlock_page(filepage);
+
        }
        return 0;
 
@@ -1223,84 +1309,34 @@ failed:
        return error;
 }
 
-struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
+static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct inode *inode = vma->vm_file->f_dentry->d_inode;
-       struct page *page = NULL;
-       unsigned long idx;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        int error;
+       int ret;
 
-       idx = (address - vma->vm_start) >> PAGE_SHIFT;
-       idx += vma->vm_pgoff;
-       idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-       if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-               return NOPAGE_SIGBUS;
+       if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+               return VM_FAULT_SIGBUS;
 
-       error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
        if (error)
-               return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
+               return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
 
-       mark_page_accessed(page);
-       return page;
-}
-
-static int shmem_populate(struct vm_area_struct *vma,
-       unsigned long addr, unsigned long len,
-       pgprot_t prot, unsigned long pgoff, int nonblock)
-{
-       struct inode *inode = vma->vm_file->f_dentry->d_inode;
-       struct mm_struct *mm = vma->vm_mm;
-       enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
-       unsigned long size;
-
-       size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
-               return -EINVAL;
-
-       while ((long) len > 0) {
-               struct page *page = NULL;
-               int err;
-               /*
-                * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
-                */
-               err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
-               if (err)
-                       return err;
-               /* Page may still be null, but only if nonblock was set. */
-               if (page) {
-                       mark_page_accessed(page);
-                       err = install_page(mm, vma, addr, page, prot);
-                       if (err) {
-                               page_cache_release(page);
-                               return err;
-                       }
-               } else if (vma->vm_flags & VM_NONLINEAR) {
-                       /* No page was found just because we can't read it in
-                        * now (being here implies nonblock != 0), but the page
-                        * may exist, so set the PTE to fault it in later. */
-                       err = install_file_pte(mm, vma, addr, pgoff, prot);
-                       if (err)
-                               return err;
-               }
-
-               len -= PAGE_SIZE;
-               addr += PAGE_SIZE;
-               pgoff++;
-       }
-       return 0;
+       mark_page_accessed(vmf->page);
+       return ret | FAULT_RET_LOCKED;
 }
 
 #ifdef CONFIG_NUMA
 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 {
-       struct inode *i = vma->vm_file->f_dentry->d_inode;
+       struct inode *i = vma->vm_file->f_path.dentry->d_inode;
        return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
 }
 
 struct mempolicy *
 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
 {
-       struct inode *i = vma->vm_file->f_dentry->d_inode;
+       struct inode *i = vma->vm_file->f_path.dentry->d_inode;
        unsigned long idx;
 
        idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -1310,7 +1346,7 @@ shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
 
 int shmem_lock(struct file *file, int lock, struct user_struct *user)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file->f_path.dentry->d_inode;
        struct shmem_inode_info *info = SHMEM_I(inode);
        int retval = -ENOMEM;
 
@@ -1330,10 +1366,11 @@ out_nomem:
        return retval;
 }
 
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 {
        file_accessed(file);
        vma->vm_ops = &shmem_vm_ops;
+       vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
 
@@ -1405,13 +1442,22 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
 }
 
 #ifdef CONFIG_TMPFS
-static struct inode_operations shmem_symlink_inode_operations;
-static struct inode_operations shmem_symlink_inline_operations;
+static const struct inode_operations shmem_symlink_inode_operations;
+static const struct inode_operations shmem_symlink_inline_operations;
 
 /*
- * Normally tmpfs makes no use of shmem_prepare_write, but it
- * lets a tmpfs file be used read-write below the loop driver.
+ * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
+ * but providing them allows a tmpfs file to be used for splice, sendfile, and
+ * below the loop driver, in the generic fashion that many filesystems support.
  */
+static int shmem_readpage(struct file *file, struct page *page)
+{
+       struct inode *inode = page->mapping->host;
+       int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
+       unlock_page(page);
+       return error;
+}
+
 static int
 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
 {
@@ -1422,7 +1468,7 @@ shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsig
 static ssize_t
 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       struct inode    *inode = file->f_dentry->d_inode;
+       struct inode    *inode = file->f_path.dentry->d_inode;
        loff_t          pos;
        unsigned long   written;
        ssize_t         err;
@@ -1442,7 +1488,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
        if (err || !count)
                goto out;
 
-       err = remove_suid(file->f_dentry);
+       err = remove_suid(file->f_path.dentry);
        if (err)
                goto out;
 
@@ -1524,7 +1570,7 @@ out:
 
 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = filp->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
        unsigned long index, offset;
 
@@ -1635,25 +1681,6 @@ static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count
        return desc.error;
 }
 
-static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
-                        size_t count, read_actor_t actor, void *target)
-{
-       read_descriptor_t desc;
-
-       if (!count)
-               return 0;
-
-       desc.written = 0;
-       desc.count = count;
-       desc.arg.data = target;
-       desc.error = 0;
-
-       do_shmem_file_read(in_file, ppos, &desc, actor);
-       if (desc.written)
-               return desc.written;
-       return desc.error;
-}
-
 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1899,12 +1926,12 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
        }
 }
 
-static struct inode_operations shmem_symlink_inline_operations = {
+static const struct inode_operations shmem_symlink_inline_operations = {
        .readlink       = generic_readlink,
        .follow_link    = shmem_follow_link_inline,
 };
 
-static struct inode_operations shmem_symlink_inode_operations = {
+static const struct inode_operations shmem_symlink_inode_operations = {
        .truncate       = shmem_truncate,
        .readlink       = generic_readlink,
        .follow_link    = shmem_follow_link,
@@ -1943,7 +1970,7 @@ static int shmem_xattr_security_set(struct inode *inode, const char *name,
        return security_inode_setsecurity(inode, name, value, size, flags);
 }
 
-struct xattr_handler shmem_xattr_security_handler = {
+static struct xattr_handler shmem_xattr_security_handler = {
        .prefix = XATTR_SECURITY_PREFIX,
        .list   = shmem_xattr_security_list,
        .get    = shmem_xattr_security_get,
@@ -2284,14 +2311,11 @@ static void init_once(void *foo, struct kmem_cache *cachep,
 {
        struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
 
-       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
-           SLAB_CTOR_CONSTRUCTOR) {
-               inode_init_once(&p->vfs_inode);
+       inode_init_once(&p->vfs_inode);
 #ifdef CONFIG_TMPFS_POSIX_ACL
-               p->i_acl = NULL;
-               p->i_default_acl = NULL;
+       p->i_acl = NULL;
+       p->i_default_acl = NULL;
 #endif
-       }
 }
 
 static int init_inodecache(void)
@@ -2311,26 +2335,28 @@ static void destroy_inodecache(void)
 
 static const struct address_space_operations shmem_aops = {
        .writepage      = shmem_writepage,
-       .set_page_dirty = __set_page_dirty_nobuffers,
+       .set_page_dirty = __set_page_dirty_no_writeback,
 #ifdef CONFIG_TMPFS
+       .readpage       = shmem_readpage,
        .prepare_write  = shmem_prepare_write,
        .commit_write   = simple_commit_write,
 #endif
        .migratepage    = migrate_page,
 };
 
-static struct file_operations shmem_file_operations = {
+static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
        .llseek         = generic_file_llseek,
        .read           = shmem_file_read,
        .write          = shmem_file_write,
        .fsync          = simple_sync_file,
-       .sendfile       = shmem_file_sendfile,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = generic_file_splice_write,
 #endif
 };
 
-static struct inode_operations shmem_inode_operations = {
+static const struct inode_operations shmem_inode_operations = {
        .truncate       = shmem_truncate,
        .setattr        = shmem_notify_change,
        .truncate_range = shmem_truncate_range,
@@ -2344,7 +2370,7 @@ static struct inode_operations shmem_inode_operations = {
 
 };
 
-static struct inode_operations shmem_dir_inode_operations = {
+static const struct inode_operations shmem_dir_inode_operations = {
 #ifdef CONFIG_TMPFS
        .create         = shmem_create,
        .lookup         = simple_lookup,
@@ -2366,7 +2392,7 @@ static struct inode_operations shmem_dir_inode_operations = {
 #endif
 };
 
-static struct inode_operations shmem_special_inode_operations = {
+static const struct inode_operations shmem_special_inode_operations = {
 #ifdef CONFIG_TMPFS_POSIX_ACL
        .setattr        = shmem_notify_change,
        .setxattr       = generic_setxattr,
@@ -2377,7 +2403,7 @@ static struct inode_operations shmem_special_inode_operations = {
 #endif
 };
 
-static struct super_operations shmem_ops = {
+static const struct super_operations shmem_ops = {
        .alloc_inode    = shmem_alloc_inode,
        .destroy_inode  = shmem_destroy_inode,
 #ifdef CONFIG_TMPFS
@@ -2390,8 +2416,7 @@ static struct super_operations shmem_ops = {
 };
 
 static struct vm_operations_struct shmem_vm_ops = {
-       .nopage         = shmem_nopage,
-       .populate       = shmem_populate,
+       .fault          = shmem_fault,
 #ifdef CONFIG_NUMA
        .set_policy     = shmem_set_policy,
        .get_policy     = shmem_get_policy,
@@ -2493,8 +2518,8 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
        d_instantiate(dentry, inode);
        inode->i_size = size;
        inode->i_nlink = 0;     /* It is unlinked */
-       file->f_vfsmnt = mntget(shm_mnt);
-       file->f_dentry = dentry;
+       file->f_path.mnt = mntget(shm_mnt);
+       file->f_path.dentry = dentry;
        file->f_mapping = inode->i_mapping;
        file->f_op = &shmem_file_operations;
        file->f_mode = FMODE_WRITE | FMODE_READ;