* Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
+ * tiny-shmem:
+ * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
+ *
* This file is released under the GPL.
*/
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/vfs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+
+static struct vfsmount *shm_mnt;
+
+#ifdef CONFIG_SHMEM
/*
* This virtual memory filesystem is heavily based on the ramfs. It
* extends ramfs by the ability to use swap and honor resource limits
* which makes it a completely usable filesystem.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/exportfs.h>
+#include <linux/posix_acl.h>
#include <linux/generic_acl.h>
-#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
-#include <linux/mount.h>
#include <linux/writeback.h>
-#include <linux/vfs.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/ctype.h>
#include <linux/migrate.h>
#include <linux/highmem.h>
+#include <linux/seq_file.h>
+#include <linux/magic.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
-/* This magic number is used in glibc for posix shared memory */
-#define TMPFS_MAGIC 0x01021994
-
+/*
+ * The maximum size of a shmem/tmpfs file is limited by the maximum size of
+ * its triple-indirect swap vector - see illustration at shmem_swp_entry().
+ *
+ * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
+ * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
+ * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
+ * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
+ *
+ * We use / and * instead of shifts in the definitions below, so that the swap
+ * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
+ */
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
-#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
+#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
-#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
+#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
+#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
+#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
+#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
+
+#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
enum sgp_type {
- SGP_QUICK, /* don't try more than file page cache lookup */
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
+ SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
SGP_WRITE, /* may exceed i_size, may allocate page */
- SGP_FAULT, /* same as SGP_CACHE, return with page locked */
};
+#ifdef CONFIG_TMPFS
+static unsigned long shmem_default_max_blocks(void)
+{
+ return totalram_pages / 2;
+}
+
+static unsigned long shmem_default_max_inodes(void)
+{
+ return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
+}
+#endif
+
static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp, int *type);
* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
* might be reconsidered if it ever diverges from PAGE_SIZE.
*
- * __GFP_MOVABLE is masked out as swap vectors cannot move
+ * Mobility flags are masked out as swap vectors cannot move
*/
- return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+ return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
PAGE_CACHE_SHIFT-PAGE_SHIFT);
}
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
- return (flags & VM_ACCOUNT)?
- security_vm_enough_memory(VM_ACCT(size)): 0;
+ return (flags & VM_NORESERVE) ?
+ 0 : security_vm_enough_memory_kern(VM_ACCT(size));
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
- if (flags & VM_ACCOUNT)
+ if (!(flags & VM_NORESERVE))
vm_unacct_memory(VM_ACCT(size));
}
*/
static inline int shmem_acct_block(unsigned long flags)
{
- return (flags & VM_ACCOUNT)?
- 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
+ return (flags & VM_NORESERVE) ?
+ security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
- if (!(flags & VM_ACCOUNT))
+ if (flags & VM_NORESERVE)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
-static struct vm_operations_struct shmem_vm_ops;
+static const struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
.unplug_io_fn = default_unplug_io_fn,
};
static LIST_HEAD(shmem_swaplist);
-static DEFINE_SPINLOCK(shmem_swaplist_lock);
+static DEFINE_MUTEX(shmem_swaplist_mutex);
static void shmem_free_blocks(struct inode *inode, long pages)
{
}
}
-/*
+static int shmem_reserve_inode(struct super_block *sb)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+ spin_lock(&sbinfo->stat_lock);
+ if (!sbinfo->free_inodes) {
+ spin_unlock(&sbinfo->stat_lock);
+ return -ENOSPC;
+ }
+ sbinfo->free_inodes--;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+ return 0;
+}
+
+static void shmem_free_inode(struct super_block *sb)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+ spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+}
+
+/**
* shmem_recalc_inode - recalculate the size of an inode
- *
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
}
}
-/*
+/**
* shmem_swp_entry - find the swap vector position in the info structure
- *
* @info: info structure for the inode
* @index: index of the page to find
* @page: optional page to add to the structure. Has to be preset to
}
}
-/*
+/**
* shmem_swp_alloc - get the position of the swap entry for the page.
- * If it does not exist allocate the entry.
- *
* @info: info structure for the inode
* @index: index of the page to find
* @sgp: check and recheck i_size? skip allocation?
+ *
+ * If the entry does not exist, allocate it.
*/
static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
{
return entry;
}
-/*
+/**
* shmem_free_swp - free some swap entries in a directory
- *
* @dir: pointer to the directory
* @edir: pointer after last entry of the directory
* @punch_lock: pointer to spinlock when needed for the holepunch case
(void) shmem_getpage(inode,
attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ, NULL);
+ if (page)
+ unlock_page(page);
}
/*
* Reset SHMEM_PAGEIN flag so that shmem_truncate can
error = inode_setattr(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (!error && (attr->ia_valid & ATTR_MODE))
- error = generic_acl_chmod(inode, &shmem_acl_ops);
+ error = generic_acl_chmod(inode);
#endif
if (page)
page_cache_release(page);
static void shmem_delete_inode(struct inode *inode)
{
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_inode_info *info = SHMEM_I(inode);
if (inode->i_op->truncate == shmem_truncate) {
inode->i_size = 0;
shmem_truncate(inode);
if (!list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
+ mutex_lock(&shmem_swaplist_mutex);
list_del_init(&info->swaplist);
- spin_unlock(&shmem_swaplist_lock);
+ mutex_unlock(&shmem_swaplist_mutex);
}
}
BUG_ON(inode->i_blocks);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
+ shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
struct page *subdir;
swp_entry_t *ptr;
int offset;
+ int error;
idx = 0;
ptr = info->i_direct;
spin_lock(&info->lock);
+ if (!info->swapped) {
+ list_del_init(&info->swaplist);
+ goto lost2;
+ }
limit = info->next_index;
size = limit;
if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0) {
- shmem_swp_balance_unmap();
+ if (offset >= 0)
goto found;
- }
if (!info->i_indirect)
goto lost2;
for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
if (unlikely(idx == stage)) {
shmem_dir_unmap(dir-1);
+ if (cond_resched_lock(&info->lock)) {
+ /* check it has not been truncated */
+ if (limit > info->next_index) {
+ limit = info->next_index;
+ if (idx >= limit)
+ goto lost2;
+ }
+ }
dir = shmem_dir_map(info->i_indirect) +
ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
while (!*dir) {
if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE;
offset = shmem_find_swp(entry, ptr, ptr+size);
+ shmem_swp_unmap(ptr);
if (offset >= 0) {
shmem_dir_unmap(dir);
goto found;
}
- shmem_swp_unmap(ptr);
}
}
lost1:
return 0;
found:
idx += offset;
- inode = &info->vfs_inode;
- if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, ptr + offset, 0);
- }
- shmem_swp_unmap(ptr);
+ inode = igrab(&info->vfs_inode);
spin_unlock(&info->lock);
+
/*
- * Decrement swap count even when the entry is left behind:
- * try_to_unuse will skip over mms, then reincrement count.
+ * Move _head_ to start search for next from here.
+ * But be careful: shmem_delete_inode checks list_empty without taking
+ * mutex, and there's an instant in list_move_tail when info->swaplist
+ * would appear empty, if it were the only one on shmem_swaplist. We
+ * could avoid doing it if inode NULL; or use this minor optimization.
*/
- swap_free(entry);
- return 1;
+ if (shmem_swaplist.next != &info->swaplist)
+ list_move_tail(&shmem_swaplist, &info->swaplist);
+ mutex_unlock(&shmem_swaplist_mutex);
+
+ error = 1;
+ if (!inode)
+ goto out;
+ /*
+ * Charge page using GFP_KERNEL while we can wait.
+ * Charged back to the user(not to caller) when swap account is used.
+ * add_to_page_cache() will be called with GFP_NOWAIT.
+ */
+ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+ if (error)
+ goto out;
+ error = radix_tree_preload(GFP_KERNEL);
+ if (error) {
+ mem_cgroup_uncharge_cache_page(page);
+ goto out;
+ }
+ error = 1;
+
+ spin_lock(&info->lock);
+ ptr = shmem_swp_entry(info, idx, NULL);
+ if (ptr && ptr->val == entry.val) {
+ error = add_to_page_cache_locked(page, inode->i_mapping,
+ idx, GFP_NOWAIT);
+ /* does mem_cgroup_uncharge_cache_page on error */
+ } else /* we must compensate for our precharge above */
+ mem_cgroup_uncharge_cache_page(page);
+
+ if (error == -EEXIST) {
+ struct page *filepage = find_get_page(inode->i_mapping, idx);
+ error = 1;
+ if (filepage) {
+ /*
+ * There might be a more uptodate page coming down
+ * from a stacked writepage: forget our swappage if so.
+ */
+ if (PageUptodate(filepage))
+ error = 0;
+ page_cache_release(filepage);
+ }
+ }
+ if (!error) {
+ delete_from_swap_cache(page);
+ set_page_dirty(page);
+ info->flags |= SHMEM_PAGEIN;
+ shmem_swp_set(info, ptr, 0);
+ swap_free(entry);
+ error = 1; /* not an error, but entry was found */
+ }
+ if (ptr)
+ shmem_swp_unmap(ptr);
+ spin_unlock(&info->lock);
+ radix_tree_preload_end();
+out:
+ unlock_page(page);
+ page_cache_release(page);
+ iput(inode); /* allows for NULL */
+ return error;
}
/*
struct shmem_inode_info *info;
int found = 0;
- spin_lock(&shmem_swaplist_lock);
+ mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(p, next, &shmem_swaplist) {
info = list_entry(p, struct shmem_inode_info, swaplist);
- if (!info->swapped)
- list_del_init(&info->swaplist);
- else if (shmem_unuse_inode(info, entry, page)) {
- /* move head to start search for next from here */
- list_move_tail(&shmem_swaplist, &info->swaplist);
- found = 1;
- break;
- }
+ found = shmem_unuse_inode(info, entry, page);
+ cond_resched();
+ if (found)
+ goto out;
}
- spin_unlock(&shmem_swaplist_lock);
- return found;
+ mutex_unlock(&shmem_swaplist_mutex);
+ /*
+ * Can some race bring us here? We've been holding page lock,
+ * so I think not; but would rather try again later than BUG()
+ */
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ return (found < 0) ? found : 0;
}
/*
struct inode *inode;
BUG_ON(!PageLocked(page));
- BUG_ON(page_mapped(page));
-
mapping = page->mapping;
index = page->index;
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
- swap = get_swap_page();
- if (!swap.val)
+ if (!total_swap_pages)
goto redirty;
+ /*
+ * shmem_backing_dev_info's capabilities prevent regular writeback or
+ * sync from ever calling shmem_writepage; but a stacking filesystem
+ * may use the ->writepage of its underlying filesystem, in which case
+ * tmpfs should write out to swap only in response to memory pressure,
+ * and not for the writeback threads or sync. However, in those cases,
+ * we do still want to check if there's a redundant swappage to be
+ * discarded.
+ */
+ if (wbc->for_reclaim)
+ swap = get_swap_page();
+ else
+ swap.val = 0;
+
spin_lock(&info->lock);
- shmem_recalc_inode(inode);
if (index >= info->next_index) {
BUG_ON(!(info->flags & SHMEM_TRUNCATE));
goto unlock;
}
entry = shmem_swp_entry(info, index, NULL);
- BUG_ON(!entry);
- BUG_ON(entry->val);
+ if (entry->val) {
+ /*
+ * The more uptodate page coming down from a stacked
+ * writepage should replace our old swappage.
+ */
+ free_swap_and_cache(*entry);
+ shmem_swp_set(info, entry, 0);
+ }
+ shmem_recalc_inode(inode);
- if (move_to_swap_cache(page, swap) == 0) {
+ if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+ remove_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
+ if (list_empty(&info->swaplist))
+ inode = igrab(inode);
+ else
+ inode = NULL;
spin_unlock(&info->lock);
- if (list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
+ swap_shmem_alloc(swap);
+ BUG_ON(page_mapped(page));
+ page_cache_release(page); /* pagecache ref */
+ swap_writepage(page, wbc);
+ if (inode) {
+ mutex_lock(&shmem_swaplist_mutex);
/* move instead of add in case we're racing */
list_move_tail(&info->swaplist, &shmem_swaplist);
- spin_unlock(&shmem_swaplist_lock);
+ mutex_unlock(&shmem_swaplist_mutex);
+ iput(inode);
}
- unlock_page(page);
return 0;
}
shmem_swp_unmap(entry);
unlock:
spin_unlock(&info->lock);
- swap_free(swap);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
+ swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
- return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */
+ if (wbc->for_reclaim)
+ return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
+ unlock_page(page);
+ return 0;
}
#ifdef CONFIG_NUMA
-static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+#ifdef CONFIG_TMPFS
+static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
- char *nodelist = strchr(value, ':');
- int err = 1;
+ char buffer[64];
- if (nodelist) {
- /* NUL-terminate policy string */
- *nodelist++ = '\0';
- if (nodelist_parse(nodelist, *policy_nodes))
- goto out;
- if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
- goto out;
- }
- if (!strcmp(value, "default")) {
- *policy = MPOL_DEFAULT;
- /* Don't allow a nodelist */
- if (!nodelist)
- err = 0;
- } else if (!strcmp(value, "prefer")) {
- *policy = MPOL_PREFERRED;
- /* Insist on a nodelist of one node only */
- if (nodelist) {
- char *rest = nodelist;
- while (isdigit(*rest))
- rest++;
- if (!*rest)
- err = 0;
- }
- } else if (!strcmp(value, "bind")) {
- *policy = MPOL_BIND;
- /* Insist on a nodelist */
- if (nodelist)
- err = 0;
- } else if (!strcmp(value, "interleave")) {
- *policy = MPOL_INTERLEAVE;
- /*
- * Default to online nodes with memory if no nodelist
- */
- if (!nodelist)
- *policy_nodes = node_states[N_HIGH_MEMORY];
- err = 0;
+ if (!mpol || mpol->mode == MPOL_DEFAULT)
+ return; /* show nothing */
+
+ mpol_to_str(buffer, sizeof(buffer), mpol, 1);
+
+ seq_printf(seq, ",mpol=%s", buffer);
+}
+
+static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+ struct mempolicy *mpol = NULL;
+ if (sbinfo->mpol) {
+ spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
+ mpol = sbinfo->mpol;
+ mpol_get(mpol);
+ spin_unlock(&sbinfo->stat_lock);
}
-out:
- /* Restore string for error message */
- if (nodelist)
- *--nodelist = ':';
- return err;
+ return mpol;
}
+#endif /* CONFIG_TMPFS */
-static struct page *shmem_swapin_async(struct shared_policy *p,
- swp_entry_t entry, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- struct page *page;
+ struct mempolicy mpol, *spol;
struct vm_area_struct pvma;
+ struct page *page;
+
+ spol = mpol_cond_copy(&mpol,
+ mpol_shared_policy_lookup(&info->policy, idx));
/* Create a pseudo vma that just contains the policy */
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_end = PAGE_SIZE;
+ pvma.vm_start = 0;
pvma.vm_pgoff = idx;
- pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
- page = read_swap_cache_async(entry, &pvma, 0);
- mpol_free(pvma.vm_policy);
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = spol;
+ page = swapin_readahead(entry, gfp, &pvma, 0);
return page;
}
-struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
- unsigned long idx)
-{
- struct shared_policy *p = &info->policy;
- int i, num;
- struct page *page;
- unsigned long offset;
-
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- page = shmem_swapin_async(p,
- swp_entry(swp_type(entry), offset), idx);
- if (!page)
- break;
- page_cache_release(page);
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
- return shmem_swapin_async(p, entry, idx);
-}
-
-static struct page *
-shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
- unsigned long idx)
+static struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
- struct page *page;
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
pvma.vm_pgoff = idx;
- pvma.vm_end = PAGE_SIZE;
- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
- mpol_free(pvma.vm_policy);
- return page;
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+
+ /*
+ * alloc_page_vma() will drop the shared policy reference
+ */
+ return alloc_page_vma(gfp, &pvma, 0);
}
-#else
-static inline int shmem_parse_mpol(char *value, int *policy,
- nodemask_t *policy_nodes)
+#else /* !CONFIG_NUMA */
+#ifdef CONFIG_TMPFS
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
{
- return 1;
}
+#endif /* CONFIG_TMPFS */
-static inline struct page *
-shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- swapin_readahead(entry, 0, NULL);
- return read_swap_cache_async(entry, NULL, 0);
+ return swapin_readahead(entry, gfp, NULL, 0);
}
-static inline struct page *
-shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- return alloc_page(gfp | __GFP_ZERO);
+ return alloc_page(gfp);
+}
+#endif /* CONFIG_NUMA */
+
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
+static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+ return NULL;
}
#endif
struct page *swappage;
swp_entry_t *entry;
swp_entry_t swap;
+ gfp_t gfp;
int error;
if (idx >= SHMEM_MAX_INDEX)
* Normally, filepage is NULL on entry, and either found
* uptodate immediately, or allocated and zeroed, or read
* in under swappage, which is then assigned to filepage.
- * But shmem_readpage and shmem_write_begin pass in a locked
+ * But shmem_readpage (required for splice) passes in a locked
* filepage, which may be found not uptodate by other callers
* too, and may need to be copied from the swappage read in.
*/
if (filepage && PageUptodate(filepage))
goto done;
error = 0;
- if (sgp == SGP_QUICK)
- goto failed;
+ gfp = mapping_gfp_mask(mapping);
+ if (!filepage) {
+ /*
+ * Try to preload while we can wait, to not make a habit of
+ * draining atomic reserves; but don't latch on to this cpu.
+ */
+ error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+ if (error)
+ goto failed;
+ radix_tree_preload_end();
+ }
spin_lock(&info->lock);
shmem_recalc_inode(inode);
*type |= VM_FAULT_MAJOR;
}
spin_unlock(&info->lock);
- swappage = shmem_swapin(info, swap, idx);
+ swappage = shmem_swapin(swap, gfp, info, idx);
if (!swappage) {
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
}
/* We have to do this with page locked to prevent races */
- if (TestSetPageLocked(swappage)) {
+ if (!trylock_page(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
wait_on_page_locked(swappage);
SetPageUptodate(filepage);
set_page_dirty(filepage);
swap_free(swap);
- } else if (!(error = move_from_swap_cache(
- swappage, idx, mapping))) {
+ } else if (!(error = add_to_page_cache_locked(swappage, mapping,
+ idx, GFP_NOWAIT))) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
+ delete_from_swap_cache(swappage);
spin_unlock(&info->lock);
filepage = swappage;
+ set_page_dirty(filepage);
swap_free(swap);
} else {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
if (error == -ENOMEM) {
- /* let kswapd refresh zone for GFP_ATOMICs */
- congestion_wait(WRITE, HZ/50);
+ /*
+ * reclaim from proper memory cgroup and
+ * call memcg's OOM if needed.
+ */
+ error = mem_cgroup_shmem_charge_fallback(
+ swappage,
+ current->mm,
+ gfp);
+ if (error) {
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto failed;
+ }
}
+ unlock_page(swappage);
+ page_cache_release(swappage);
goto repeat;
}
} else if (sgp == SGP_READ && !filepage) {
shmem_swp_unmap(entry);
filepage = find_get_page(mapping, idx);
if (filepage &&
- (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
+ (!PageUptodate(filepage) || !trylock_page(filepage))) {
spin_unlock(&info->lock);
wait_on_page_locked(filepage);
page_cache_release(filepage);
}
if (!filepage) {
+ int ret;
+
spin_unlock(&info->lock);
- filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
- info,
- idx);
+ filepage = shmem_alloc_page(gfp, info, idx);
if (!filepage) {
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
error = -ENOMEM;
goto failed;
}
+ SetPageSwapBacked(filepage);
+
+ /* Precharge page while we can wait, compensate after */
+ error = mem_cgroup_cache_charge(filepage, current->mm,
+ GFP_KERNEL);
+ if (error) {
+ page_cache_release(filepage);
+ shmem_unacct_blocks(info->flags, 1);
+ shmem_free_blocks(inode, 1);
+ filepage = NULL;
+ goto failed;
+ }
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
swap = *entry;
shmem_swp_unmap(entry);
}
- if (error || swap.val || 0 != add_to_page_cache_lru(
- filepage, mapping, idx, GFP_ATOMIC)) {
+ ret = error || swap.val;
+ if (ret)
+ mem_cgroup_uncharge_cache_page(filepage);
+ else
+ ret = add_to_page_cache_lru(filepage, mapping,
+ idx, GFP_NOWAIT);
+ /*
+ * At add_to_page_cache_lru() failure, uncharge will
+ * be done automatically.
+ */
+ if (ret) {
spin_unlock(&info->lock);
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
info->alloced++;
spin_unlock(&info->lock);
+ clear_highpage(filepage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
+ if (sgp == SGP_DIRTY)
+ set_page_dirty(filepage);
}
done:
- if (*pagep != filepage) {
- *pagep = filepage;
- if (sgp != SGP_FAULT)
- unlock_page(filepage);
-
- }
+ *pagep = filepage;
return 0;
failed:
if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
return VM_FAULT_SIGBUS;
- error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
+ error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
- mark_page_accessed(vmf->page);
return ret | VM_FAULT_LOCKED;
}
#ifdef CONFIG_NUMA
-int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
}
-struct mempolicy *
-shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
+static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
{
struct inode *i = vma->vm_file->f_path.dentry->d_inode;
unsigned long idx;
if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
info->flags |= VM_LOCKED;
+ mapping_set_unevictable(file->f_mapping);
}
if (!lock && (info->flags & VM_LOCKED) && user) {
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
+ mapping_clear_unevictable(file->f_mapping);
+ scan_mapping_unevictable_pages(file->f_mapping);
}
retval = 0;
+
out_nomem:
spin_unlock(&info->lock);
return retval;
return 0;
}
-static struct inode *
-shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *shmem_get_inode(struct super_block *sb, int mode,
+ dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return NULL;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
+ if (shmem_reserve_inode(sb))
+ return NULL;
inode = new_inode(sb);
if (inode) {
inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
inode->i_blocks = 0;
- inode->i_mapping->a_ops = &shmem_aops;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
+ info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
+ cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
+ inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
- mpol_shared_policy_init(&info->policy, sbinfo->policy,
- &sbinfo->policy_nodes);
+ mpol_shared_policy_init(&info->policy,
+ shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
- mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
- NULL);
+ mpol_shared_policy_init(&info->policy, NULL);
break;
}
- } else if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
+ } else
+ shmem_free_inode(sb);
return inode;
}
{
struct inode *inode = mapping->host;
+ if (pos + copied > inode->i_size)
+ i_size_write(inode, pos + copied);
+
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
- if (pos+copied > inode->i_size)
- i_size_write(inode, pos+copied);
-
return copied;
}
-static ssize_t
-shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- loff_t pos;
- unsigned long written;
- ssize_t err;
-
- if ((ssize_t) count < 0)
- return -EINVAL;
-
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
- mutex_lock(&inode->i_mutex);
-
- pos = *ppos;
- written = 0;
-
- err = generic_write_checks(file, &pos, &count, 0);
- if (err || !count)
- goto out;
-
- err = remove_suid(file->f_path.dentry);
- if (err)
- goto out;
-
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-
- do {
- struct page *page = NULL;
- unsigned long bytes, index, offset;
- char *kaddr;
- int left;
-
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
-
- /*
- * We don't hold page lock across copy from user -
- * what would it guard against? - so no deadlock here.
- * But it still may be a good idea to prefault below.
- */
-
- err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
- if (err)
- break;
-
- left = bytes;
- if (PageHighMem(page)) {
- volatile unsigned char dummy;
- __get_user(dummy, buf);
- __get_user(dummy, buf + bytes - 1);
-
- kaddr = kmap_atomic(page, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + offset,
- buf, bytes);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if (left) {
- kaddr = kmap(page);
- left = __copy_from_user(kaddr + offset, buf, bytes);
- kunmap(page);
- }
-
- written += bytes;
- count -= bytes;
- pos += bytes;
- buf += bytes;
- if (pos > inode->i_size)
- i_size_write(inode, pos);
-
- flush_dcache_page(page);
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- if (left) {
- pos -= left;
- written -= left;
- err = -EFAULT;
- break;
- }
-
- /*
- * Our dirty pages are not counted in nr_dirty,
- * and we do not attempt to balance dirty pages.
- */
-
- cond_resched();
- } while (count);
-
- *ppos = pos;
- if (written)
- err = written;
-out:
- mutex_unlock(&inode->i_mutex);
- return err;
-}
-
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
unsigned long index, offset;
+ enum sgp_type sgp = SGP_READ;
+
+ /*
+ * Might this read be for a stacking filesystem? Then when reading
+ * holes of a sparse file, we actually need to allocate those pages,
+ * and even mark them dirty, so it cannot exceed the max_blocks limit.
+ */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ sgp = SGP_DIRTY;
index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
break;
}
- desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
+ desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
if (desc->error) {
if (desc->error == -EINVAL)
desc->error = 0;
break;
}
+ if (page)
+ unlock_page(page);
/*
* We must evaluate after, since reads (unlike writes)
file_accessed(filp);
}
-static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t shmem_file_aio_read(struct kiocb *iocb,
+ const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
- read_descriptor_t desc;
+ struct file *filp = iocb->ki_filp;
+ ssize_t retval;
+ unsigned long seg;
+ size_t count;
+ loff_t *ppos = &iocb->ki_pos;
- if ((ssize_t) count < 0)
- return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
- if (!count)
- return 0;
+ retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+ if (retval)
+ return retval;
- desc.written = 0;
- desc.count = count;
- desc.arg.buf = buf;
- desc.error = 0;
+ for (seg = 0; seg < nr_segs; seg++) {
+ read_descriptor_t desc;
- do_shmem_file_read(filp, ppos, &desc, file_read_actor);
- if (desc.written)
- return desc.written;
- return desc.error;
+ desc.written = 0;
+ desc.arg.buf = iov[seg].iov_base;
+ desc.count = iov[seg].iov_len;
+ if (desc.count == 0)
+ continue;
+ desc.error = 0;
+ do_shmem_file_read(filp, ppos, &desc, file_read_actor);
+ retval += desc.written;
+ if (desc.error) {
+ retval = retval ?: desc.error;
+ break;
+ }
+ if (desc.count > 0)
+ break;
+ }
+ return retval;
}
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
static int
shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
- struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
+ struct inode *inode;
int error = -ENOSPC;
+ inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir, NULL, NULL,
NULL);
return error;
}
}
- error = shmem_acl_init(inode, dir);
+#ifdef CONFIG_TMPFS_POSIX_ACL
+ error = generic_acl_init(inode, dir);
if (error) {
iput(inode);
return error;
}
+#else
+ error = 0;
+#endif
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ int ret;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
*/
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return -ENOSPC;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
+ ret = shmem_reserve_inode(inode->i_sb);
+ if (ret)
+ goto out;
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
atomic_inc(&inode->i_count); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
- return 0;
+out:
+ return ret;
}
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
- }
+ if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
+ shmem_free_inode(inode->i_sb);
dir->i_size -= BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
- inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+ inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
if (!inode)
return -ENOSPC;
iput(inode);
return error;
}
+ inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
}
if (dir->i_mode & S_ISGID)
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+ if (page)
+ unlock_page(page);
return page;
}
};
#ifdef CONFIG_TMPFS_POSIX_ACL
-/**
+/*
* Superblocks without xattr inode operations will get security.* xattr
* support from the VFS "for free". As soon as we have any other xattrs
* like ACLs, we also need to implement the security.* handlers at
* filesystem level, though.
*/
-static size_t shmem_xattr_security_list(struct inode *inode, char *list,
+static size_t shmem_xattr_security_list(struct dentry *dentry, char *list,
size_t list_len, const char *name,
- size_t name_len)
+ size_t name_len, int handler_flags)
{
- return security_inode_listsecurity(inode, list, list_len);
+ return security_inode_listsecurity(dentry->d_inode, list, list_len);
}
-static int shmem_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int shmem_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_getsecurity(inode, name, buffer, size,
- -EOPNOTSUPP);
+ return xattr_getsecurity(dentry->d_inode, name, buffer, size);
}
-static int shmem_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int shmem_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_setsecurity(inode, name, value, size, flags);
+ return security_inode_setsecurity(dentry->d_inode, name, value,
+ size, flags);
}
static struct xattr_handler shmem_xattr_security_handler = {
};
static struct xattr_handler *shmem_xattr_handlers[] = {
- &shmem_xattr_acl_access_handler,
- &shmem_xattr_acl_default_handler,
+ &generic_acl_access_handler,
+ &generic_acl_default_handler,
&shmem_xattr_security_handler,
NULL
};
return ino->i_ino == inum && fh[0] == ino->i_generation;
}
-static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
+static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
{
- struct dentry *de = NULL;
struct inode *inode;
- __u32 *fh = vfh;
- __u64 inum = fh[2];
- inum = (inum << 32) | fh[1];
+ struct dentry *dentry = NULL;
+ u64 inum = fid->raw[2];
+ inum = (inum << 32) | fid->raw[1];
+
+ if (fh_len < 3)
+ return NULL;
- inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
if (inode) {
- de = d_find_alias(inode);
+ dentry = d_find_alias(inode);
iput(inode);
}
- return de? de: ERR_PTR(-ESTALE);
-}
-
-static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
- int len, int type,
- int (*acceptable)(void *context, struct dentry *de),
- void *context)
-{
- if (len < 3)
- return ERR_PTR(-ESTALE);
-
- return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
- context);
+ return dentry;
}
static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
return 1;
}
-static struct export_operations shmem_export_ops = {
+static const struct export_operations shmem_export_ops = {
.get_parent = shmem_get_parent,
- .get_dentry = shmem_get_dentry,
.encode_fh = shmem_encode_fh,
- .decode_fh = shmem_decode_fh,
+ .fh_to_dentry = shmem_fh_to_dentry,
};
-static int shmem_parse_options(char *options, int *mode, uid_t *uid,
- gid_t *gid, unsigned long *blocks, unsigned long *inodes,
- int *policy, nodemask_t *policy_nodes)
+static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
+ bool remount)
{
char *this_char, *value, *rest;
}
if (*rest)
goto bad_val;
- *blocks = size >> PAGE_CACHE_SHIFT;
+ sbinfo->max_blocks =
+ DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
} else if (!strcmp(this_char,"nr_blocks")) {
- *blocks = memparse(value,&rest);
+ sbinfo->max_blocks = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"nr_inodes")) {
- *inodes = memparse(value,&rest);
+ sbinfo->max_inodes = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mode")) {
- if (!mode)
+ if (remount)
continue;
- *mode = simple_strtoul(value,&rest,8);
+ sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"uid")) {
- if (!uid)
+ if (remount)
continue;
- *uid = simple_strtoul(value,&rest,0);
+ sbinfo->uid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"gid")) {
- if (!gid)
+ if (remount)
continue;
- *gid = simple_strtoul(value,&rest,0);
+ sbinfo->gid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
- if (shmem_parse_mpol(value,policy,policy_nodes))
+ if (mpol_parse_str(value, &sbinfo->mpol, 1))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- unsigned long max_blocks = sbinfo->max_blocks;
- unsigned long max_inodes = sbinfo->max_inodes;
- int policy = sbinfo->policy;
- nodemask_t policy_nodes = sbinfo->policy_nodes;
+ struct shmem_sb_info config = *sbinfo;
unsigned long blocks;
unsigned long inodes;
int error = -EINVAL;
- if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
- &max_inodes, &policy, &policy_nodes))
+ if (shmem_parse_options(data, &config, true))
return error;
spin_lock(&sbinfo->stat_lock);
blocks = sbinfo->max_blocks - sbinfo->free_blocks;
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
- if (max_blocks < blocks)
+ if (config.max_blocks < blocks)
goto out;
- if (max_inodes < inodes)
+ if (config.max_inodes < inodes)
goto out;
/*
* Those tests also disallow limited->unlimited while any are in
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
- if (max_blocks && !sbinfo->max_blocks)
+ if (config.max_blocks && !sbinfo->max_blocks)
goto out;
- if (max_inodes && !sbinfo->max_inodes)
+ if (config.max_inodes && !sbinfo->max_inodes)
goto out;
error = 0;
- sbinfo->max_blocks = max_blocks;
- sbinfo->free_blocks = max_blocks - blocks;
- sbinfo->max_inodes = max_inodes;
- sbinfo->free_inodes = max_inodes - inodes;
- sbinfo->policy = policy;
- sbinfo->policy_nodes = policy_nodes;
+ sbinfo->max_blocks = config.max_blocks;
+ sbinfo->free_blocks = config.max_blocks - blocks;
+ sbinfo->max_inodes = config.max_inodes;
+ sbinfo->free_inodes = config.max_inodes - inodes;
+
+ mpol_put(sbinfo->mpol);
+ sbinfo->mpol = config.mpol; /* transfers initial ref */
out:
spin_unlock(&sbinfo->stat_lock);
return error;
}
-#endif
+
+static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
+
+ if (sbinfo->max_blocks != shmem_default_max_blocks())
+ seq_printf(seq, ",size=%luk",
+ sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
+ if (sbinfo->max_inodes != shmem_default_max_inodes())
+ seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
+ if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
+ seq_printf(seq, ",mode=%03o", sbinfo->mode);
+ if (sbinfo->uid != 0)
+ seq_printf(seq, ",uid=%u", sbinfo->uid);
+ if (sbinfo->gid != 0)
+ seq_printf(seq, ",gid=%u", sbinfo->gid);
+ shmem_show_mpol(seq, sbinfo->mpol);
+ return 0;
+}
+#endif /* CONFIG_TMPFS */
static void shmem_put_super(struct super_block *sb)
{
sb->s_fs_info = NULL;
}
-static int shmem_fill_super(struct super_block *sb,
- void *data, int silent)
+int shmem_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
- int mode = S_IRWXUGO | S_ISVTX;
- uid_t uid = current->fsuid;
- gid_t gid = current->fsgid;
- int err = -ENOMEM;
struct shmem_sb_info *sbinfo;
- unsigned long blocks = 0;
- unsigned long inodes = 0;
- int policy = MPOL_DEFAULT;
- nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
+ int err = -ENOMEM;
+
+ /* Round up to L1_CACHE_BYTES to resist false sharing */
+ sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+ L1_CACHE_BYTES), GFP_KERNEL);
+ if (!sbinfo)
+ return -ENOMEM;
+
+ sbinfo->mode = S_IRWXUGO | S_ISVTX;
+ sbinfo->uid = current_fsuid();
+ sbinfo->gid = current_fsgid();
+ sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
/*
* but the internal instance is left unlimited.
*/
if (!(sb->s_flags & MS_NOUSER)) {
- blocks = totalram_pages / 2;
- inodes = totalram_pages - totalhigh_pages;
- if (inodes > blocks)
- inodes = blocks;
- if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
- &inodes, &policy, &policy_nodes))
- return -EINVAL;
+ sbinfo->max_blocks = shmem_default_max_blocks();
+ sbinfo->max_inodes = shmem_default_max_inodes();
+ if (shmem_parse_options(data, sbinfo, false)) {
+ err = -EINVAL;
+ goto failed;
+ }
}
sb->s_export_op = &shmem_export_ops;
#else
sb->s_flags |= MS_NOUSER;
#endif
- /* Round up to L1_CACHE_BYTES to resist false sharing */
- sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
- L1_CACHE_BYTES), GFP_KERNEL);
- if (!sbinfo)
- return -ENOMEM;
-
spin_lock_init(&sbinfo->stat_lock);
- sbinfo->max_blocks = blocks;
- sbinfo->free_blocks = blocks;
- sbinfo->max_inodes = inodes;
- sbinfo->free_inodes = inodes;
- sbinfo->policy = policy;
- sbinfo->policy_nodes = policy_nodes;
+ sbinfo->free_blocks = sbinfo->max_blocks;
+ sbinfo->free_inodes = sbinfo->max_inodes;
- sb->s_fs_info = sbinfo;
sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_flags |= MS_POSIXACL;
#endif
- inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
+ inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)
goto failed;
- inode->i_uid = uid;
- inode->i_gid = gid;
+ inode->i_uid = sbinfo->uid;
+ inode->i_gid = sbinfo->gid;
root = d_alloc_root(inode);
if (!root)
goto failed_iput;
/* only struct inode is valid if it's an inline symlink */
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
}
- shmem_acl_destroy_inode(inode);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
-static void init_once(void *foo, struct kmem_cache *cachep,
- unsigned long flags)
+static void init_once(void *foo)
{
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
inode_init_once(&p->vfs_inode);
-#ifdef CONFIG_TMPFS_POSIX_ACL
- p->i_acl = NULL;
- p->i_default_acl = NULL;
-#endif
}
static int init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
- 0, 0, init_once);
- if (shmem_inode_cachep == NULL)
- return -ENOMEM;
+ 0, SLAB_PANIC, init_once);
return 0;
}
.write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
+ .error_remove_page = generic_error_remove_page,
};
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
.llseek = generic_file_llseek,
- .read = shmem_file_read,
- .write = shmem_file_write,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = shmem_file_aio_read,
+ .aio_write = generic_file_aio_write,
.fsync = simple_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = generic_check_acl,
#endif
};
#ifdef CONFIG_TMPFS
.statfs = shmem_statfs,
.remount_fs = shmem_remount_fs,
+ .show_options = shmem_show_options,
#endif
.delete_inode = shmem_delete_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
};
-static struct vm_operations_struct shmem_vm_ops = {
+static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
.get_sb = shmem_get_sb,
.kill_sb = kill_litter_super,
};
-static struct vfsmount *shm_mnt;
-static int __init init_tmpfs(void)
+int __init init_tmpfs(void)
{
int error;
+ error = bdi_init(&shmem_backing_dev_info);
+ if (error)
+ goto out4;
+
error = init_inodecache();
if (error)
goto out3;
out2:
destroy_inodecache();
out3:
+ bdi_destroy(&shmem_backing_dev_info);
+out4:
shm_mnt = ERR_PTR(error);
return error;
}
-module_init(init_tmpfs)
+
+#else /* !CONFIG_SHMEM */
/*
- * shmem_file_setup - get an unlinked file living in tmpfs
+ * tiny-shmem: simple shmemfs and tmpfs using ramfs code
*
+ * This is intended for small system where the benefits of the full
+ * shmem code (swap-backed and resource-limited) are outweighed by
+ * their complexity. On systems without swap this code should be
+ * effectively equivalent, but much lighter weight.
+ */
+
+#include <linux/ramfs.h>
+
+static struct file_system_type tmpfs_fs_type = {
+ .name = "tmpfs",
+ .get_sb = ramfs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+int __init init_tmpfs(void)
+{
+ BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
+
+ shm_mnt = kern_mount(&tmpfs_fs_type);
+ BUG_ON(IS_ERR(shm_mnt));
+
+ return 0;
+}
+
+int shmem_unuse(swp_entry_t entry, struct page *page)
+{
+ return 0;
+}
+
+int shmem_lock(struct file *file, int lock, struct user_struct *user)
+{
+ return 0;
+}
+
+#define shmem_vm_ops generic_file_vm_ops
+#define shmem_file_operations ramfs_file_operations
+#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
+#define shmem_acct_size(flags, size) 0
+#define shmem_unacct_size(flags, size) do {} while (0)
+#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
+
+#endif /* CONFIG_SHMEM */
+
+/* common code */
+
+/**
+ * shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
- *
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
-struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
int error;
struct file *file;
struct inode *inode;
- struct dentry *dentry, *root;
+ struct path path;
+ struct dentry *root;
struct qstr this;
if (IS_ERR(shm_mnt))
this.len = strlen(name);
this.hash = 0; /* will go */
root = shm_mnt->mnt_root;
- dentry = d_alloc(root, &this);
- if (!dentry)
+ path.dentry = d_alloc(root, &this);
+ if (!path.dentry)
goto put_memory;
-
- error = -ENFILE;
- file = get_empty_filp();
- if (!file)
- goto put_dentry;
+ path.mnt = mntget(shm_mnt);
error = -ENOSPC;
- inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
+ inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
- goto close_file;
+ goto put_dentry;
- SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
- d_instantiate(dentry, inode);
+ d_instantiate(path.dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
- file->f_path.mnt = mntget(shm_mnt);
- file->f_path.dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = &shmem_file_operations;
- file->f_mode = FMODE_WRITE | FMODE_READ;
+#ifndef CONFIG_MMU
+ error = ramfs_nommu_expand_for_mapping(inode, size);
+ if (error)
+ goto put_dentry;
+#endif
+
+ error = -ENFILE;
+ file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
+ &shmem_file_operations);
+ if (!file)
+ goto put_dentry;
+
return file;
-close_file:
- put_filp(file);
put_dentry:
- dput(dentry);
+ path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
return ERR_PTR(error);
}
+EXPORT_SYMBOL_GPL(shmem_file_setup);
-/*
+/**
* shmem_zero_setup - setup a shared anonymous mapping
- *
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup(struct vm_area_struct *vma)