CGroup API files: make CGROUP_DEBUG default to off
[safe/jmp/linux-2.6] / mm / shmem.c
index e577adf..e6d9298 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/ctype.h>
 #include <linux/migrate.h>
 #include <linux/highmem.h>
+#include <linux/seq_file.h>
 
 #include <asm/uaccess.h>
 #include <asm/div64.h>
 enum sgp_type {
        SGP_READ,       /* don't exceed i_size, don't allocate page */
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
+       SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
        SGP_WRITE,      /* may exceed i_size, may allocate page */
 };
 
+#ifdef CONFIG_TMPFS
+static unsigned long shmem_default_max_blocks(void)
+{
+       return totalram_pages / 2;
+}
+
+static unsigned long shmem_default_max_inodes(void)
+{
+       return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
+}
+#endif
+
 static int shmem_getpage(struct inode *inode, unsigned long idx,
                         struct page **pagep, enum sgp_type sgp, int *type);
 
@@ -192,7 +206,7 @@ static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
 };
 
 static LIST_HEAD(shmem_swaplist);
-static DEFINE_SPINLOCK(shmem_swaplist_lock);
+static DEFINE_MUTEX(shmem_swaplist_mutex);
 
 static void shmem_free_blocks(struct inode *inode, long pages)
 {
@@ -230,9 +244,8 @@ static void shmem_free_inode(struct super_block *sb)
        }
 }
 
-/*
+/**
  * shmem_recalc_inode - recalculate the size of an inode
- *
  * @inode: inode to recalc
  *
  * We have to calculate the free blocks since the mm can drop
@@ -256,9 +269,8 @@ static void shmem_recalc_inode(struct inode *inode)
        }
 }
 
-/*
+/**
  * shmem_swp_entry - find the swap vector position in the info structure
- *
  * @info:  info structure for the inode
  * @index: index of the page to find
  * @page:  optional page to add to the structure. Has to be preset to
@@ -360,13 +372,13 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns
        }
 }
 
-/*
+/**
  * shmem_swp_alloc - get the position of the swap entry for the page.
- *                   If it does not exist allocate the entry.
- *
  * @info:      info structure for the inode
  * @index:     index of the page to find
  * @sgp:       check and recheck i_size? skip allocation?
+ *
+ * If the entry does not exist, allocate it.
  */
 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
 {
@@ -426,9 +438,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
        return entry;
 }
 
-/*
+/**
  * shmem_free_swp - free some swap entries in a directory
- *
  * @dir:        pointer to the directory
  * @edir:       pointer after last entry of the directory
  * @punch_lock: pointer to spinlock when needed for the holepunch case
@@ -795,9 +806,9 @@ static void shmem_delete_inode(struct inode *inode)
                inode->i_size = 0;
                shmem_truncate(inode);
                if (!list_empty(&info->swaplist)) {
-                       spin_lock(&shmem_swaplist_lock);
+                       mutex_lock(&shmem_swaplist_mutex);
                        list_del_init(&info->swaplist);
-                       spin_unlock(&shmem_swaplist_lock);
+                       mutex_unlock(&shmem_swaplist_mutex);
                }
        }
        BUG_ON(inode->i_blocks);
@@ -827,19 +838,22 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
        struct page *subdir;
        swp_entry_t *ptr;
        int offset;
+       int error;
 
        idx = 0;
        ptr = info->i_direct;
        spin_lock(&info->lock);
+       if (!info->swapped) {
+               list_del_init(&info->swaplist);
+               goto lost2;
+       }
        limit = info->next_index;
        size = limit;
        if (size > SHMEM_NR_DIRECT)
                size = SHMEM_NR_DIRECT;
        offset = shmem_find_swp(entry, ptr, ptr+size);
-       if (offset >= 0) {
-               shmem_swp_balance_unmap();
+       if (offset >= 0)
                goto found;
-       }
        if (!info->i_indirect)
                goto lost2;
 
@@ -849,6 +863,14 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
        for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
                if (unlikely(idx == stage)) {
                        shmem_dir_unmap(dir-1);
+                       if (cond_resched_lock(&info->lock)) {
+                               /* check it has not been truncated */
+                               if (limit > info->next_index) {
+                                       limit = info->next_index;
+                                       if (idx >= limit)
+                                               goto lost2;
+                               }
+                       }
                        dir = shmem_dir_map(info->i_indirect) +
                            ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
                        while (!*dir) {
@@ -869,11 +891,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        offset = shmem_find_swp(entry, ptr, ptr+size);
+                       shmem_swp_unmap(ptr);
                        if (offset >= 0) {
                                shmem_dir_unmap(dir);
                                goto found;
                        }
-                       shmem_swp_unmap(ptr);
                }
        }
 lost1:
@@ -883,21 +905,69 @@ lost2:
        return 0;
 found:
        idx += offset;
-       inode = &info->vfs_inode;
-       if (add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC) == 0) {
+       inode = igrab(&info->vfs_inode);
+       spin_unlock(&info->lock);
+
+       /*
+        * Move _head_ to start search for next from here.
+        * But be careful: shmem_delete_inode checks list_empty without taking
+        * mutex, and there's an instant in list_move_tail when info->swaplist
+        * would appear empty, if it were the only one on shmem_swaplist.  We
+        * could avoid doing it if inode NULL; or use this minor optimization.
+        */
+       if (shmem_swaplist.next != &info->swaplist)
+               list_move_tail(&shmem_swaplist, &info->swaplist);
+       mutex_unlock(&shmem_swaplist_mutex);
+
+       error = 1;
+       if (!inode)
+               goto out;
+       /* Precharge page while we can wait, compensate afterwards */
+       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+       if (error)
+               goto out;
+       error = radix_tree_preload(GFP_KERNEL);
+       if (error)
+               goto uncharge;
+       error = 1;
+
+       spin_lock(&info->lock);
+       ptr = shmem_swp_entry(info, idx, NULL);
+       if (ptr && ptr->val == entry.val)
+               error = add_to_page_cache(page, inode->i_mapping,
+                                               idx, GFP_NOWAIT);
+       if (error == -EEXIST) {
+               struct page *filepage = find_get_page(inode->i_mapping, idx);
+               error = 1;
+               if (filepage) {
+                       /*
+                        * There might be a more uptodate page coming down
+                        * from a stacked writepage: forget our swappage if so.
+                        */
+                       if (PageUptodate(filepage))
+                               error = 0;
+                       page_cache_release(filepage);
+               }
+       }
+       if (!error) {
                delete_from_swap_cache(page);
                set_page_dirty(page);
                info->flags |= SHMEM_PAGEIN;
-               shmem_swp_set(info, ptr + offset, 0);
+               shmem_swp_set(info, ptr, 0);
+               swap_free(entry);
+               error = 1;      /* not an error, but entry was found */
        }
-       shmem_swp_unmap(ptr);
+       if (ptr)
+               shmem_swp_unmap(ptr);
        spin_unlock(&info->lock);
-       /*
-        * Decrement swap count even when the entry is left behind:
-        * try_to_unuse will skip over mms, then reincrement count.
-        */
-       swap_free(entry);
-       return 1;
+       radix_tree_preload_end();
+uncharge:
+       mem_cgroup_uncharge_page(page);
+out:
+       unlock_page(page);
+       page_cache_release(page);
+       iput(inode);            /* allows for NULL */
+       return error;
 }
 
 /*
@@ -909,20 +979,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
        struct shmem_inode_info *info;
        int found = 0;
 
-       spin_lock(&shmem_swaplist_lock);
+       mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(p, next, &shmem_swaplist) {
                info = list_entry(p, struct shmem_inode_info, swaplist);
-               if (!info->swapped)
-                       list_del_init(&info->swaplist);
-               else if (shmem_unuse_inode(info, entry, page)) {
-                       /* move head to start search for next from here */
-                       list_move_tail(&shmem_swaplist, &info->swaplist);
-                       found = 1;
-                       break;
-               }
+               found = shmem_unuse_inode(info, entry, page);
+               cond_resched();
+               if (found)
+                       goto out;
        }
-       spin_unlock(&shmem_swaplist_lock);
-       return found;
+       mutex_unlock(&shmem_swaplist_mutex);
+out:   return found;   /* 0 or 1 or -ENOMEM */
 }
 
 /*
@@ -937,58 +1003,65 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        struct inode *inode;
 
        BUG_ON(!PageLocked(page));
-       /*
-        * shmem_backing_dev_info's capabilities prevent regular writeback or
-        * sync from ever calling shmem_writepage; but a stacking filesystem
-        * may use the ->writepage of its underlying filesystem, in which case
-        * we want to do nothing when that underlying filesystem is tmpfs
-        * (writing out to swap is useful as a response to memory pressure, but
-        * of no use to stabilize the data) - just redirty the page, unlock it
-        * and claim success in this case.  AOP_WRITEPAGE_ACTIVATE, and the
-        * page_mapped check below, must be avoided unless we're in reclaim.
-        */
-       if (!wbc->for_reclaim) {
-               set_page_dirty(page);
-               unlock_page(page);
-               return 0;
-       }
-       BUG_ON(page_mapped(page));
-
        mapping = page->mapping;
        index = page->index;
        inode = mapping->host;
        info = SHMEM_I(inode);
        if (info->flags & VM_LOCKED)
                goto redirty;
-       swap = get_swap_page();
-       if (!swap.val)
+       if (!total_swap_pages)
                goto redirty;
 
+       /*
+        * shmem_backing_dev_info's capabilities prevent regular writeback or
+        * sync from ever calling shmem_writepage; but a stacking filesystem
+        * may use the ->writepage of its underlying filesystem, in which case
+        * tmpfs should write out to swap only in response to memory pressure,
+        * and not for pdflush or sync.  However, in those cases, we do still
+        * want to check if there's a redundant swappage to be discarded.
+        */
+       if (wbc->for_reclaim)
+               swap = get_swap_page();
+       else
+               swap.val = 0;
+
        spin_lock(&info->lock);
-       shmem_recalc_inode(inode);
        if (index >= info->next_index) {
                BUG_ON(!(info->flags & SHMEM_TRUNCATE));
                goto unlock;
        }
        entry = shmem_swp_entry(info, index, NULL);
-       BUG_ON(!entry);
-       BUG_ON(entry->val);
+       if (entry->val) {
+               /*
+                * The more uptodate page coming down from a stacked
+                * writepage should replace our old swappage.
+                */
+               free_swap_and_cache(*entry);
+               shmem_swp_set(info, entry, 0);
+       }
+       shmem_recalc_inode(inode);
 
-       if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+       if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
                remove_from_page_cache(page);
                shmem_swp_set(info, entry, swap.val);
                shmem_swp_unmap(entry);
+               if (list_empty(&info->swaplist))
+                       inode = igrab(inode);
+               else
+                       inode = NULL;
                spin_unlock(&info->lock);
-               if (list_empty(&info->swaplist)) {
-                       spin_lock(&shmem_swaplist_lock);
-                       /* move instead of add in case we're racing */
-                       list_move_tail(&info->swaplist, &shmem_swaplist);
-                       spin_unlock(&shmem_swaplist_lock);
-               }
                swap_duplicate(swap);
+               BUG_ON(page_mapped(page));
                page_cache_release(page);       /* pagecache ref */
                set_page_dirty(page);
                unlock_page(page);
+               if (inode) {
+                       mutex_lock(&shmem_swaplist_mutex);
+                       /* move instead of add in case we're racing */
+                       list_move_tail(&info->swaplist, &shmem_swaplist);
+                       mutex_unlock(&shmem_swaplist_mutex);
+                       iput(inode);
+               }
                return 0;
        }
 
@@ -998,72 +1071,55 @@ unlock:
        swap_free(swap);
 redirty:
        set_page_dirty(page);
-       return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
+       if (wbc->for_reclaim)
+               return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
+       unlock_page(page);
+       return 0;
 }
 
 #ifdef CONFIG_NUMA
-static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+#ifdef CONFIG_TMPFS
+static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
 {
-       char *nodelist = strchr(value, ':');
-       int err = 1;
+       char buffer[64];
 
-       if (nodelist) {
-               /* NUL-terminate policy string */
-               *nodelist++ = '\0';
-               if (nodelist_parse(nodelist, *policy_nodes))
-                       goto out;
-               if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
-                       goto out;
-       }
-       if (!strcmp(value, "default")) {
-               *policy = MPOL_DEFAULT;
-               /* Don't allow a nodelist */
-               if (!nodelist)
-                       err = 0;
-       } else if (!strcmp(value, "prefer")) {
-               *policy = MPOL_PREFERRED;
-               /* Insist on a nodelist of one node only */
-               if (nodelist) {
-                       char *rest = nodelist;
-                       while (isdigit(*rest))
-                               rest++;
-                       if (!*rest)
-                               err = 0;
-               }
-       } else if (!strcmp(value, "bind")) {
-               *policy = MPOL_BIND;
-               /* Insist on a nodelist */
-               if (nodelist)
-                       err = 0;
-       } else if (!strcmp(value, "interleave")) {
-               *policy = MPOL_INTERLEAVE;
-               /*
-                * Default to online nodes with memory if no nodelist
-                */
-               if (!nodelist)
-                       *policy_nodes = node_states[N_HIGH_MEMORY];
-               err = 0;
+       if (!mpol || mpol->mode == MPOL_DEFAULT)
+               return;         /* show nothing */
+
+       mpol_to_str(buffer, sizeof(buffer), mpol, 1);
+
+       seq_printf(seq, ",mpol=%s", buffer);
+}
+
+static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+       struct mempolicy *mpol = NULL;
+       if (sbinfo->mpol) {
+               spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
+               mpol = sbinfo->mpol;
+               mpol_get(mpol);
+               spin_unlock(&sbinfo->stat_lock);
        }
-out:
-       /* Restore string for error message */
-       if (nodelist)
-               *--nodelist = ':';
-       return err;
+       return mpol;
 }
+#endif /* CONFIG_TMPFS */
 
 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
                        struct shmem_inode_info *info, unsigned long idx)
 {
+       struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
        struct page *page;
 
+       spol = mpol_cond_copy(&mpol,
+                               mpol_shared_policy_lookup(&info->policy, idx));
+
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       pvma.vm_policy = spol;
        page = swapin_readahead(entry, gfp, &pvma, 0);
-       mpol_free(pvma.vm_policy);
        return page;
 }
 
@@ -1071,23 +1127,24 @@ static struct page *shmem_alloc_page(gfp_t gfp,
                        struct shmem_inode_info *info, unsigned long idx)
 {
        struct vm_area_struct pvma;
-       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
        pvma.vm_ops = NULL;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
-       page = alloc_page_vma(gfp, &pvma, 0);
-       mpol_free(pvma.vm_policy);
-       return page;
+
+       /*
+        * alloc_page_vma() will drop the shared policy reference
+        */
+       return alloc_page_vma(gfp, &pvma, 0);
 }
-#else
-static inline int shmem_parse_mpol(char *value, int *policy,
-                                               nodemask_t *policy_nodes)
+#else /* !CONFIG_NUMA */
+#ifdef CONFIG_TMPFS
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
 {
-       return 1;
 }
+#endif /* CONFIG_TMPFS */
 
 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
                        struct shmem_inode_info *info, unsigned long idx)
@@ -1100,6 +1157,13 @@ static inline struct page *shmem_alloc_page(gfp_t gfp,
 {
        return alloc_page(gfp);
 }
+#endif /* CONFIG_NUMA */
+
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
+static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+       return NULL;
+}
 #endif
 
 /*
@@ -1143,6 +1207,16 @@ repeat:
                goto done;
        error = 0;
        gfp = mapping_gfp_mask(mapping);
+       if (!filepage) {
+               /*
+                * Try to preload while we can wait, to not make a habit of
+                * draining atomic reserves; but don't latch on to this cpu.
+                */
+               error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+               if (error)
+                       goto failed;
+               radix_tree_preload_end();
+       }
 
        spin_lock(&info->lock);
        shmem_recalc_inode(inode);
@@ -1224,7 +1298,7 @@ repeat:
                        set_page_dirty(filepage);
                        swap_free(swap);
                } else if (!(error = add_to_page_cache(
-                               swappage, mapping, idx, GFP_ATOMIC))) {
+                               swappage, mapping, idx, GFP_NOWAIT))) {
                        info->flags |= SHMEM_PAGEIN;
                        shmem_swp_set(info, entry, 0);
                        shmem_swp_unmap(entry);
@@ -1237,11 +1311,17 @@ repeat:
                        shmem_swp_unmap(entry);
                        spin_unlock(&info->lock);
                        unlock_page(swappage);
-                       page_cache_release(swappage);
                        if (error == -ENOMEM) {
-                               /* let kswapd refresh zone for GFP_ATOMICs */
-                               congestion_wait(WRITE, HZ/50);
+                               /* allow reclaim from this memory cgroup */
+                               error = mem_cgroup_cache_charge(swappage,
+                                       current->mm, gfp & ~__GFP_HIGHMEM);
+                               if (error) {
+                                       page_cache_release(swappage);
+                                       goto failed;
+                               }
+                               mem_cgroup_uncharge_page(swappage);
                        }
+                       page_cache_release(swappage);
                        goto repeat;
                }
        } else if (sgp == SGP_READ && !filepage) {
@@ -1287,6 +1367,17 @@ repeat:
                                goto failed;
                        }
 
+                       /* Precharge page while we can wait, compensate after */
+                       error = mem_cgroup_cache_charge(filepage, current->mm,
+                                                       gfp & ~__GFP_HIGHMEM);
+                       if (error) {
+                               page_cache_release(filepage);
+                               shmem_unacct_blocks(info->flags, 1);
+                               shmem_free_blocks(inode, 1);
+                               filepage = NULL;
+                               goto failed;
+                       }
+
                        spin_lock(&info->lock);
                        entry = shmem_swp_alloc(info, idx, sgp);
                        if (IS_ERR(entry))
@@ -1296,8 +1387,9 @@ repeat:
                                shmem_swp_unmap(entry);
                        }
                        if (error || swap.val || 0 != add_to_page_cache_lru(
-                                       filepage, mapping, idx, GFP_ATOMIC)) {
+                                       filepage, mapping, idx, GFP_NOWAIT)) {
                                spin_unlock(&info->lock);
+                               mem_cgroup_uncharge_page(filepage);
                                page_cache_release(filepage);
                                shmem_unacct_blocks(info->flags, 1);
                                shmem_free_blocks(inode, 1);
@@ -1306,6 +1398,7 @@ repeat:
                                        goto failed;
                                goto repeat;
                        }
+                       mem_cgroup_uncharge_page(filepage);
                        info->flags |= SHMEM_PAGEIN;
                }
 
@@ -1314,6 +1407,8 @@ repeat:
                clear_highpage(filepage);
                flush_dcache_page(filepage);
                SetPageUptodate(filepage);
+               if (sgp == SGP_DIRTY)
+                       set_page_dirty(filepage);
        }
 done:
        *pagep = filepage;
@@ -1425,8 +1520,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                case S_IFREG:
                        inode->i_op = &shmem_inode_operations;
                        inode->i_fop = &shmem_file_operations;
-                       mpol_shared_policy_init(&info->policy, sbinfo->policy,
-                                                       &sbinfo->policy_nodes);
+                       mpol_shared_policy_init(&info->policy,
+                                                shmem_get_sbmpol(sbinfo));
                        break;
                case S_IFDIR:
                        inc_nlink(inode);
@@ -1440,8 +1535,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                         * Must not load anything in the rbtree,
                         * mpol_free_shared_policy will not be called.
                         */
-                       mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
-                                               NULL);
+                       mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
        } else
@@ -1499,6 +1593,15 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
        struct inode *inode = filp->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
        unsigned long index, offset;
+       enum sgp_type sgp = SGP_READ;
+
+       /*
+        * Might this read be for a stacking filesystem?  Then when reading
+        * holes of a sparse file, we actually need to allocate those pages,
+        * and even mark them dirty, so it cannot exceed the max_blocks limit.
+        */
+       if (segment_eq(get_fs(), KERNEL_DS))
+               sgp = SGP_DIRTY;
 
        index = *ppos >> PAGE_CACHE_SHIFT;
        offset = *ppos & ~PAGE_CACHE_MASK;
@@ -1517,7 +1620,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                break;
                }
 
-               desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
+               desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
                if (desc->error) {
                        if (desc->error == -EINVAL)
                                desc->error = 0;
@@ -1859,7 +1962,7 @@ static const struct inode_operations shmem_symlink_inode_operations = {
 };
 
 #ifdef CONFIG_TMPFS_POSIX_ACL
-/**
+/*
  * Superblocks without xattr inode operations will get security.* xattr
  * support from the VFS "for free". As soon as we have any other xattrs
  * like ACLs, we also need to implement the security.* handlers at
@@ -1878,8 +1981,7 @@ static int shmem_xattr_security_get(struct inode *inode, const char *name,
 {
        if (strcmp(name, "") == 0)
                return -EINVAL;
-       return security_inode_getsecurity(inode, name, buffer, size,
-                                         -EOPNOTSUPP);
+       return xattr_getsecurity(inode, name, buffer, size);
 }
 
 static int shmem_xattr_security_set(struct inode *inode, const char *name,
@@ -1975,9 +2077,8 @@ static const struct export_operations shmem_export_ops = {
        .fh_to_dentry   = shmem_fh_to_dentry,
 };
 
-static int shmem_parse_options(char *options, int *mode, uid_t *uid,
-       gid_t *gid, unsigned long *blocks, unsigned long *inodes,
-       int *policy, nodemask_t *policy_nodes)
+static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
+                              bool remount)
 {
        char *this_char, *value, *rest;
 
@@ -2020,35 +2121,36 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
                        }
                        if (*rest)
                                goto bad_val;
-                       *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
+                       sbinfo->max_blocks =
+                               DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
                } else if (!strcmp(this_char,"nr_blocks")) {
-                       *blocks = memparse(value,&rest);
+                       sbinfo->max_blocks = memparse(value, &rest);
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"nr_inodes")) {
-                       *inodes = memparse(value,&rest);
+                       sbinfo->max_inodes = memparse(value, &rest);
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"mode")) {
-                       if (!mode)
+                       if (remount)
                                continue;
-                       *mode = simple_strtoul(value,&rest,8);
+                       sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"uid")) {
-                       if (!uid)
+                       if (remount)
                                continue;
-                       *uid = simple_strtoul(value,&rest,0);
+                       sbinfo->uid = simple_strtoul(value, &rest, 0);
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"gid")) {
-                       if (!gid)
+                       if (remount)
                                continue;
-                       *gid = simple_strtoul(value,&rest,0);
+                       sbinfo->gid = simple_strtoul(value, &rest, 0);
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"mpol")) {
-                       if (shmem_parse_mpol(value,policy,policy_nodes))
+                       if (mpol_parse_str(value, &sbinfo->mpol, 1))
                                goto bad_val;
                } else {
                        printk(KERN_ERR "tmpfs: Bad mount option %s\n",
@@ -2068,24 +2170,20 @@ bad_val:
 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
-       unsigned long max_blocks = sbinfo->max_blocks;
-       unsigned long max_inodes = sbinfo->max_inodes;
-       int policy = sbinfo->policy;
-       nodemask_t policy_nodes = sbinfo->policy_nodes;
+       struct shmem_sb_info config = *sbinfo;
        unsigned long blocks;
        unsigned long inodes;
        int error = -EINVAL;
 
-       if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
-                               &max_inodes, &policy, &policy_nodes))
+       if (shmem_parse_options(data, &config, true))
                return error;
 
        spin_lock(&sbinfo->stat_lock);
        blocks = sbinfo->max_blocks - sbinfo->free_blocks;
        inodes = sbinfo->max_inodes - sbinfo->free_inodes;
-       if (max_blocks < blocks)
+       if (config.max_blocks < blocks)
                goto out;
-       if (max_inodes < inodes)
+       if (config.max_inodes < inodes)
                goto out;
        /*
         * Those tests also disallow limited->unlimited while any are in
@@ -2093,23 +2191,43 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
         * but we must separately disallow unlimited->limited, because
         * in that case we have no record of how much is already in use.
         */
-       if (max_blocks && !sbinfo->max_blocks)
+       if (config.max_blocks && !sbinfo->max_blocks)
                goto out;
-       if (max_inodes && !sbinfo->max_inodes)
+       if (config.max_inodes && !sbinfo->max_inodes)
                goto out;
 
        error = 0;
-       sbinfo->max_blocks  = max_blocks;
-       sbinfo->free_blocks = max_blocks - blocks;
-       sbinfo->max_inodes  = max_inodes;
-       sbinfo->free_inodes = max_inodes - inodes;
-       sbinfo->policy = policy;
-       sbinfo->policy_nodes = policy_nodes;
+       sbinfo->max_blocks  = config.max_blocks;
+       sbinfo->free_blocks = config.max_blocks - blocks;
+       sbinfo->max_inodes  = config.max_inodes;
+       sbinfo->free_inodes = config.max_inodes - inodes;
+
+       mpol_put(sbinfo->mpol);
+       sbinfo->mpol        = config.mpol;      /* transfers initial ref */
 out:
        spin_unlock(&sbinfo->stat_lock);
        return error;
 }
-#endif
+
+static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
+{
+       struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
+
+       if (sbinfo->max_blocks != shmem_default_max_blocks())
+               seq_printf(seq, ",size=%luk",
+                       sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
+       if (sbinfo->max_inodes != shmem_default_max_inodes())
+               seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
+       if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
+               seq_printf(seq, ",mode=%03o", sbinfo->mode);
+       if (sbinfo->uid != 0)
+               seq_printf(seq, ",uid=%u", sbinfo->uid);
+       if (sbinfo->gid != 0)
+               seq_printf(seq, ",gid=%u", sbinfo->gid);
+       shmem_show_mpol(seq, sbinfo->mpol);
+       return 0;
+}
+#endif /* CONFIG_TMPFS */
 
 static void shmem_put_super(struct super_block *sb)
 {
@@ -2122,15 +2240,22 @@ static int shmem_fill_super(struct super_block *sb,
 {
        struct inode *inode;
        struct dentry *root;
-       int mode   = S_IRWXUGO | S_ISVTX;
-       uid_t uid = current->fsuid;
-       gid_t gid = current->fsgid;
-       int err = -ENOMEM;
        struct shmem_sb_info *sbinfo;
-       unsigned long blocks = 0;
-       unsigned long inodes = 0;
-       int policy = MPOL_DEFAULT;
-       nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
+       int err = -ENOMEM;
+
+       /* Round up to L1_CACHE_BYTES to resist false sharing */
+       sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
+                               L1_CACHE_BYTES), GFP_KERNEL);
+       if (!sbinfo)
+               return -ENOMEM;
+
+       sbinfo->max_blocks = 0;
+       sbinfo->max_inodes = 0;
+       sbinfo->mode = S_IRWXUGO | S_ISVTX;
+       sbinfo->uid = current->fsuid;
+       sbinfo->gid = current->fsgid;
+       sbinfo->mpol = NULL;
+       sb->s_fs_info = sbinfo;
 
 #ifdef CONFIG_TMPFS
        /*
@@ -2139,34 +2264,22 @@ static int shmem_fill_super(struct super_block *sb,
         * but the internal instance is left unlimited.
         */
        if (!(sb->s_flags & MS_NOUSER)) {
-               blocks = totalram_pages / 2;
-               inodes = totalram_pages - totalhigh_pages;
-               if (inodes > blocks)
-                       inodes = blocks;
-               if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
-                                       &inodes, &policy, &policy_nodes))
-                       return -EINVAL;
+               sbinfo->max_blocks = shmem_default_max_blocks();
+               sbinfo->max_inodes = shmem_default_max_inodes();
+               if (shmem_parse_options(data, sbinfo, false)) {
+                       err = -EINVAL;
+                       goto failed;
+               }
        }
        sb->s_export_op = &shmem_export_ops;
 #else
        sb->s_flags |= MS_NOUSER;
 #endif
 
-       /* Round up to L1_CACHE_BYTES to resist false sharing */
-       sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
-                               L1_CACHE_BYTES), GFP_KERNEL);
-       if (!sbinfo)
-               return -ENOMEM;
-
        spin_lock_init(&sbinfo->stat_lock);
-       sbinfo->max_blocks = blocks;
-       sbinfo->free_blocks = blocks;
-       sbinfo->max_inodes = inodes;
-       sbinfo->free_inodes = inodes;
-       sbinfo->policy = policy;
-       sbinfo->policy_nodes = policy_nodes;
+       sbinfo->free_blocks = sbinfo->max_blocks;
+       sbinfo->free_inodes = sbinfo->max_inodes;
 
-       sb->s_fs_info = sbinfo;
        sb->s_maxbytes = SHMEM_MAX_BYTES;
        sb->s_blocksize = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -2178,11 +2291,11 @@ static int shmem_fill_super(struct super_block *sb,
        sb->s_flags |= MS_POSIXACL;
 #endif
 
-       inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
+       inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
        if (!inode)
                goto failed;
-       inode->i_uid = uid;
-       inode->i_gid = gid;
+       inode->i_uid = sbinfo->uid;
+       inode->i_gid = sbinfo->gid;
        root = d_alloc_root(inode);
        if (!root)
                goto failed_iput;
@@ -2318,6 +2431,7 @@ static const struct super_operations shmem_ops = {
 #ifdef CONFIG_TMPFS
        .statfs         = shmem_statfs,
        .remount_fs     = shmem_remount_fs,
+       .show_options   = shmem_show_options,
 #endif
        .delete_inode   = shmem_delete_inode,
        .drop_inode     = generic_delete_inode,
@@ -2386,12 +2500,11 @@ out4:
 }
 module_init(init_tmpfs)
 
-/*
+/**
  * shmem_file_setup - get an unlinked file living in tmpfs
- *
  * @name: name for dentry (to be seen in /proc/<pid>/maps
  * @size: size to be set for the file
- *
+ * @flags: vm_flags
  */
 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
 {
@@ -2446,9 +2559,8 @@ put_memory:
        return ERR_PTR(error);
 }
 
-/*
+/**
  * shmem_zero_setup - setup a shared anonymous mapping
- *
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
  */
 int shmem_zero_setup(struct vm_area_struct *vma)