X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=mm%2Fshmem.c;h=eef4ebea51581f9aa8a6d2244f8ce5fa57f99c22;hp=2e03d6031c244dbc213514bfb95cb862d4583975;hb=d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978;hpb=cb5f7b9a47963d9238398cd0c2676473e3c6896d diff --git a/mm/shmem.c b/mm/shmem.c index 2e03d60..eef4ebe 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -14,33 +14,41 @@ * Copyright (c) 2004, Luke Kenneth Casson Leighton * Copyright (c) 2004 Red Hat, Inc., James Morris * + * tiny-shmem: + * Copyright (c) 2004, 2008 Matt Mackall + * * This file is released under the GPL. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vfsmount *shm_mnt; + +#ifdef CONFIG_SHMEM /* * This virtual memory filesystem is heavily based on the ramfs. It * extends ramfs by the ability to use swap and honor resource limits * which makes it a completely usable filesystem. */ -#include -#include -#include #include #include +#include #include -#include #include -#include -#include -#include #include #include #include #include -#include #include -#include #include #include #include @@ -49,21 +57,35 @@ #include #include #include +#include +#include #include #include #include -/* This magic number is used in glibc for posix shared memory */ -#define TMPFS_MAGIC 0x01021994 - +/* + * The maximum size of a shmem/tmpfs file is limited by the maximum size of + * its triple-indirect swap vector - see illustration at shmem_swp_entry(). + * + * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, + * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum + * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, + * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. + * + * We use / and * instead of shifts in the definitions below, so that the swap + * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. + */ #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) -#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) +#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) -#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) +#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) +#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) + +#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ @@ -84,6 +106,18 @@ enum sgp_type { SGP_WRITE, /* may exceed i_size, may allocate page */ }; +#ifdef CONFIG_TMPFS +static unsigned long shmem_default_max_blocks(void) +{ + return totalram_pages / 2; +} + +static unsigned long shmem_default_max_inodes(void) +{ + return min(totalram_pages - totalhigh_pages, totalram_pages / 2); +} +#endif + static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type); @@ -150,13 +184,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) */ static inline int shmem_acct_size(unsigned long flags, loff_t size) { - return (flags & VM_ACCOUNT)? - security_vm_enough_memory(VM_ACCT(size)): 0; + return (flags & VM_NORESERVE) ? + 0 : security_vm_enough_memory_kern(VM_ACCT(size)); } static inline void shmem_unacct_size(unsigned long flags, loff_t size) { - if (flags & VM_ACCOUNT) + if (!(flags & VM_NORESERVE)) vm_unacct_memory(VM_ACCT(size)); } @@ -168,13 +202,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size) */ static inline int shmem_acct_block(unsigned long flags) { - return (flags & VM_ACCOUNT)? - 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); + return (flags & VM_NORESERVE) ? + security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; } static inline void shmem_unacct_blocks(unsigned long flags, long pages) { - if (!(flags & VM_ACCOUNT)) + if (flags & VM_NORESERVE) vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); } @@ -184,11 +218,11 @@ static const struct file_operations shmem_file_operations; static const struct inode_operations shmem_inode_operations; static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; -static struct vm_operations_struct shmem_vm_ops; +static const struct vm_operations_struct shmem_vm_ops; static struct backing_dev_info shmem_backing_dev_info __read_mostly = { .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, .unplug_io_fn = default_unplug_io_fn, }; @@ -231,9 +265,8 @@ static void shmem_free_inode(struct super_block *sb) } } -/* +/** * shmem_recalc_inode - recalculate the size of an inode - * * @inode: inode to recalc * * We have to calculate the free blocks since the mm can drop @@ -257,9 +290,8 @@ static void shmem_recalc_inode(struct inode *inode) } } -/* +/** * shmem_swp_entry - find the swap vector position in the info structure - * * @info: info structure for the inode * @index: index of the page to find * @page: optional page to add to the structure. Has to be preset to @@ -361,13 +393,13 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns } } -/* +/** * shmem_swp_alloc - get the position of the swap entry for the page. - * If it does not exist allocate the entry. - * * @info: info structure for the inode * @index: index of the page to find * @sgp: check and recheck i_size? skip allocation? + * + * If the entry does not exist, allocate it. */ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) { @@ -427,9 +459,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long return entry; } -/* +/** * shmem_free_swp - free some swap entries in a directory - * * @dir: pointer to the directory * @edir: pointer after last entry of the directory * @punch_lock: pointer to spinlock when needed for the holepunch case @@ -779,7 +810,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) error = inode_setattr(inode, attr); #ifdef CONFIG_TMPFS_POSIX_ACL if (!error && (attr->ia_valid & ATTR_MODE)) - error = generic_acl_chmod(inode, &shmem_acl_ops); + error = generic_acl_chmod(inode); #endif if (page) page_cache_release(page); @@ -833,15 +864,17 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s idx = 0; ptr = info->i_direct; spin_lock(&info->lock); + if (!info->swapped) { + list_del_init(&info->swaplist); + goto lost2; + } limit = info->next_index; size = limit; if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) { - shmem_swp_balance_unmap(); + if (offset >= 0) goto found; - } if (!info->i_indirect) goto lost2; @@ -879,11 +912,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > ENTRIES_PER_PAGE) size = ENTRIES_PER_PAGE; offset = shmem_find_swp(entry, ptr, ptr+size); + shmem_swp_unmap(ptr); if (offset >= 0) { shmem_dir_unmap(dir); goto found; } - shmem_swp_unmap(ptr); } } lost1: @@ -893,10 +926,50 @@ lost2: return 0; found: idx += offset; - inode = &info->vfs_inode; - error = add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC); + inode = igrab(&info->vfs_inode); + spin_unlock(&info->lock); + + /* + * Move _head_ to start search for next from here. + * But be careful: shmem_delete_inode checks list_empty without taking + * mutex, and there's an instant in list_move_tail when info->swaplist + * would appear empty, if it were the only one on shmem_swaplist. We + * could avoid doing it if inode NULL; or use this minor optimization. + */ + if (shmem_swaplist.next != &info->swaplist) + list_move_tail(&shmem_swaplist, &info->swaplist); + mutex_unlock(&shmem_swaplist_mutex); + + error = 1; + if (!inode) + goto out; + /* + * Charge page using GFP_KERNEL while we can wait. + * Charged back to the user(not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. + */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + if (error) + goto out; + error = radix_tree_preload(GFP_KERNEL); + if (error) { + mem_cgroup_uncharge_cache_page(page); + goto out; + } + error = 1; + + spin_lock(&info->lock); + ptr = shmem_swp_entry(info, idx, NULL); + if (ptr && ptr->val == entry.val) { + error = add_to_page_cache_locked(page, inode->i_mapping, + idx, GFP_NOWAIT); + /* does mem_cgroup_uncharge_cache_page on error */ + } else /* we must compensate for our precharge above */ + mem_cgroup_uncharge_cache_page(page); + if (error == -EEXIST) { struct page *filepage = find_get_page(inode->i_mapping, idx); + error = 1; if (filepage) { /* * There might be a more uptodate page coming down @@ -911,16 +984,19 @@ found: delete_from_swap_cache(page); set_page_dirty(page); info->flags |= SHMEM_PAGEIN; - shmem_swp_set(info, ptr + offset, 0); + shmem_swp_set(info, ptr, 0); + swap_free(entry); + error = 1; /* not an error, but entry was found */ } - shmem_swp_unmap(ptr); + if (ptr) + shmem_swp_unmap(ptr); spin_unlock(&info->lock); - /* - * Decrement swap count even when the entry is left behind: - * try_to_unuse will skip over mms, then reincrement count. - */ - swap_free(entry); - return 1; + radix_tree_preload_end(); +out: + unlock_page(page); + page_cache_release(page); + iput(inode); /* allows for NULL */ + return error; } /* @@ -935,18 +1011,20 @@ int shmem_unuse(swp_entry_t entry, struct page *page) mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(p, next, &shmem_swaplist) { info = list_entry(p, struct shmem_inode_info, swaplist); - if (!info->swapped) - list_del_init(&info->swaplist); - else if (shmem_unuse_inode(info, entry, page)) { - /* move head to start search for next from here */ - list_move_tail(&shmem_swaplist, &info->swaplist); - found = 1; - break; - } + found = shmem_unuse_inode(info, entry, page); cond_resched(); + if (found) + goto out; } mutex_unlock(&shmem_swaplist_mutex); - return found; + /* + * Can some race bring us here? We've been holding page lock, + * so I think not; but would rather try again later than BUG() + */ + unlock_page(page); + page_cache_release(page); +out: + return (found < 0) ? found : 0; } /* @@ -975,8 +1053,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) * sync from ever calling shmem_writepage; but a stacking filesystem * may use the ->writepage of its underlying filesystem, in which case * tmpfs should write out to swap only in response to memory pressure, - * and not for pdflush or sync. However, in those cases, we do still - * want to check if there's a redundant swappage to be discarded. + * and not for the writeback threads or sync. However, in those cases, + * we do still want to check if there's a redundant swappage to be + * discarded. */ if (wbc->for_reclaim) swap = get_swap_page(); @@ -1003,25 +1082,33 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) remove_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); + if (list_empty(&info->swaplist)) + inode = igrab(inode); + else + inode = NULL; spin_unlock(&info->lock); - if (list_empty(&info->swaplist)) { + swap_shmem_alloc(swap); + BUG_ON(page_mapped(page)); + page_cache_release(page); /* pagecache ref */ + swap_writepage(page, wbc); + if (inode) { mutex_lock(&shmem_swaplist_mutex); /* move instead of add in case we're racing */ list_move_tail(&info->swaplist, &shmem_swaplist); mutex_unlock(&shmem_swaplist_mutex); + iput(inode); } - swap_duplicate(swap); - BUG_ON(page_mapped(page)); - page_cache_release(page); /* pagecache ref */ - set_page_dirty(page); - unlock_page(page); return 0; } shmem_swp_unmap(entry); unlock: spin_unlock(&info->lock); - swap_free(swap); + /* + * add_to_swap_cache() doesn't return -EEXIST, so we can safely + * clear SWAP_HAS_CACHE flag. + */ + swapcache_free(swap, NULL); redirty: set_page_dirty(page); if (wbc->for_reclaim) @@ -1031,68 +1118,48 @@ redirty: } #ifdef CONFIG_NUMA -static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +#ifdef CONFIG_TMPFS +static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) { - char *nodelist = strchr(value, ':'); - int err = 1; + char buffer[64]; - if (nodelist) { - /* NUL-terminate policy string */ - *nodelist++ = '\0'; - if (nodelist_parse(nodelist, *policy_nodes)) - goto out; - if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY])) - goto out; - } - if (!strcmp(value, "default")) { - *policy = MPOL_DEFAULT; - /* Don't allow a nodelist */ - if (!nodelist) - err = 0; - } else if (!strcmp(value, "prefer")) { - *policy = MPOL_PREFERRED; - /* Insist on a nodelist of one node only */ - if (nodelist) { - char *rest = nodelist; - while (isdigit(*rest)) - rest++; - if (!*rest) - err = 0; - } - } else if (!strcmp(value, "bind")) { - *policy = MPOL_BIND; - /* Insist on a nodelist */ - if (nodelist) - err = 0; - } else if (!strcmp(value, "interleave")) { - *policy = MPOL_INTERLEAVE; - /* - * Default to online nodes with memory if no nodelist - */ - if (!nodelist) - *policy_nodes = node_states[N_HIGH_MEMORY]; - err = 0; + if (!mpol || mpol->mode == MPOL_DEFAULT) + return; /* show nothing */ + + mpol_to_str(buffer, sizeof(buffer), mpol, 1); + + seq_printf(seq, ",mpol=%s", buffer); +} + +static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) +{ + struct mempolicy *mpol = NULL; + if (sbinfo->mpol) { + spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ + mpol = sbinfo->mpol; + mpol_get(mpol); + spin_unlock(&sbinfo->stat_lock); } -out: - /* Restore string for error message */ - if (nodelist) - *--nodelist = ':'; - return err; + return mpol; } +#endif /* CONFIG_TMPFS */ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { + struct mempolicy mpol, *spol; struct vm_area_struct pvma; struct page *page; + spol = mpol_cond_copy(&mpol, + mpol_shared_policy_lookup(&info->policy, idx)); + /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; pvma.vm_pgoff = idx; pvma.vm_ops = NULL; - pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + pvma.vm_policy = spol; page = swapin_readahead(entry, gfp, &pvma, 0); - mpol_free(pvma.vm_policy); return page; } @@ -1100,23 +1167,24 @@ static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; - struct page *page; /* Create a pseudo vma that just contains the policy */ pvma.vm_start = 0; pvma.vm_pgoff = idx; pvma.vm_ops = NULL; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); - page = alloc_page_vma(gfp, &pvma, 0); - mpol_free(pvma.vm_policy); - return page; + + /* + * alloc_page_vma() will drop the shared policy reference + */ + return alloc_page_vma(gfp, &pvma, 0); } -#else -static inline int shmem_parse_mpol(char *value, int *policy, - nodemask_t *policy_nodes) +#else /* !CONFIG_NUMA */ +#ifdef CONFIG_TMPFS +static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) { - return 1; } +#endif /* CONFIG_TMPFS */ static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) @@ -1129,6 +1197,13 @@ static inline struct page *shmem_alloc_page(gfp_t gfp, { return alloc_page(gfp); } +#endif /* CONFIG_NUMA */ + +#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) +static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) +{ + return NULL; +} #endif /* @@ -1172,6 +1247,16 @@ repeat: goto done; error = 0; gfp = mapping_gfp_mask(mapping); + if (!filepage) { + /* + * Try to preload while we can wait, to not make a habit of + * draining atomic reserves; but don't latch on to this cpu. + */ + error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); + if (error) + goto failed; + radix_tree_preload_end(); + } spin_lock(&info->lock); shmem_recalc_inode(inode); @@ -1216,7 +1301,7 @@ repeat: } /* We have to do this with page locked to prevent races */ - if (TestSetPageLocked(swappage)) { + if (!trylock_page(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(swappage); @@ -1252,8 +1337,8 @@ repeat: SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); - } else if (!(error = add_to_page_cache( - swappage, mapping, idx, GFP_ATOMIC))) { + } else if (!(error = add_to_page_cache_locked(swappage, mapping, + idx, GFP_NOWAIT))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); @@ -1265,19 +1350,30 @@ repeat: } else { shmem_swp_unmap(entry); spin_unlock(&info->lock); - unlock_page(swappage); - page_cache_release(swappage); if (error == -ENOMEM) { - /* let kswapd refresh zone for GFP_ATOMICs */ - congestion_wait(WRITE, HZ/50); + /* + * reclaim from proper memory cgroup and + * call memcg's OOM if needed. + */ + error = mem_cgroup_shmem_charge_fallback( + swappage, + current->mm, + gfp); + if (error) { + unlock_page(swappage); + page_cache_release(swappage); + goto failed; + } } + unlock_page(swappage); + page_cache_release(swappage); goto repeat; } } else if (sgp == SGP_READ && !filepage) { shmem_swp_unmap(entry); filepage = find_get_page(mapping, idx); if (filepage && - (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { + (!PageUptodate(filepage) || !trylock_page(filepage))) { spin_unlock(&info->lock); wait_on_page_locked(filepage); page_cache_release(filepage); @@ -1307,6 +1403,8 @@ repeat: } if (!filepage) { + int ret; + spin_unlock(&info->lock); filepage = shmem_alloc_page(gfp, info, idx); if (!filepage) { @@ -1315,6 +1413,18 @@ repeat: error = -ENOMEM; goto failed; } + SetPageSwapBacked(filepage); + + /* Precharge page while we can wait, compensate after */ + error = mem_cgroup_cache_charge(filepage, current->mm, + GFP_KERNEL); + if (error) { + page_cache_release(filepage); + shmem_unacct_blocks(info->flags, 1); + shmem_free_blocks(inode, 1); + filepage = NULL; + goto failed; + } spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); @@ -1324,8 +1434,17 @@ repeat: swap = *entry; shmem_swp_unmap(entry); } - if (error || swap.val || 0 != add_to_page_cache_lru( - filepage, mapping, idx, GFP_ATOMIC)) { + ret = error || swap.val; + if (ret) + mem_cgroup_uncharge_cache_page(filepage); + else + ret = add_to_page_cache_lru(filepage, mapping, + idx, GFP_NOWAIT); + /* + * At add_to_page_cache_lru() failure, uncharge will + * be done automatically. + */ + if (ret) { spin_unlock(&info->lock); page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); @@ -1371,7 +1490,6 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (error) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); - mark_page_accessed(vmf->page); return ret | VM_FAULT_LOCKED; } @@ -1404,12 +1522,16 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) if (!user_shm_lock(inode->i_size, user)) goto out_nomem; info->flags |= VM_LOCKED; + mapping_set_unevictable(file->f_mapping); } if (!lock && (info->flags & VM_LOCKED) && user) { user_shm_unlock(inode->i_size, user); info->flags &= ~VM_LOCKED; + mapping_clear_unevictable(file->f_mapping); + scan_mapping_unevictable_pages(file->f_mapping); } retval = 0; + out_nomem: spin_unlock(&info->lock); return retval; @@ -1423,8 +1545,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) return 0; } -static struct inode * -shmem_get_inode(struct super_block *sb, int mode, dev_t dev) +static struct inode *shmem_get_inode(struct super_block *sb, int mode, + dev_t dev, unsigned long flags) { struct inode *inode; struct shmem_inode_info *info; @@ -1436,17 +1558,18 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) inode = new_inode(sb); if (inode) { inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; - inode->i_mapping->a_ops = &shmem_aops; inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_generation = get_seconds(); info = SHMEM_I(inode); memset(info, 0, (char *)inode - (char *)info); spin_lock_init(&info->lock); + info->flags = flags & VM_NORESERVE; INIT_LIST_HEAD(&info->swaplist); + cache_no_acl(inode); switch (mode & S_IFMT) { default: @@ -1454,10 +1577,11 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) init_special_inode(inode, mode, dev); break; case S_IFREG: + inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_inode_operations; inode->i_fop = &shmem_file_operations; - mpol_shared_policy_init(&info->policy, sbinfo->policy, - &sbinfo->policy_nodes); + mpol_shared_policy_init(&info->policy, + shmem_get_sbmpol(sbinfo)); break; case S_IFDIR: inc_nlink(inode); @@ -1471,8 +1595,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) * Must not load anything in the rbtree, * mpol_free_shared_policy will not be called. */ - mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, - NULL); + mpol_shared_policy_init(&info->policy, NULL); break; } } else @@ -1518,8 +1641,8 @@ shmem_write_end(struct file *file, struct address_space *mapping, if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); - unlock_page(page); set_page_dirty(page); + unlock_page(page); page_cache_release(page); return copied; @@ -1627,26 +1750,38 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ file_accessed(filp); } -static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +static ssize_t shmem_file_aio_read(struct kiocb *iocb, + const struct iovec *iov, unsigned long nr_segs, loff_t pos) { - read_descriptor_t desc; + struct file *filp = iocb->ki_filp; + ssize_t retval; + unsigned long seg; + size_t count; + loff_t *ppos = &iocb->ki_pos; - if ((ssize_t) count < 0) - return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, count)) - return -EFAULT; - if (!count) - return 0; + retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); + if (retval) + return retval; - desc.written = 0; - desc.count = count; - desc.arg.buf = buf; - desc.error = 0; + for (seg = 0; seg < nr_segs; seg++) { + read_descriptor_t desc; - do_shmem_file_read(filp, ppos, &desc, file_read_actor); - if (desc.written) - return desc.written; - return desc.error; + desc.written = 0; + desc.arg.buf = iov[seg].iov_base; + desc.count = iov[seg].iov_len; + if (desc.count == 0) + continue; + desc.error = 0; + do_shmem_file_read(filp, ppos, &desc, file_read_actor); + retval += desc.written; + if (desc.error) { + retval = retval ?: desc.error; + break; + } + if (desc.count > 0) + break; + } + return retval; } static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) @@ -1676,9 +1811,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) static int shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { - struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); + struct inode *inode; int error = -ENOSPC; + inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE); if (inode) { error = security_inode_init_security(inode, dir, NULL, NULL, NULL); @@ -1688,11 +1824,15 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) return error; } } - error = shmem_acl_init(inode, dir); +#ifdef CONFIG_TMPFS_POSIX_ACL + error = generic_acl_init(inode, dir); if (error) { iput(inode); return error; } +#else + error = 0; +#endif if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) @@ -1817,7 +1957,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s if (len > PAGE_CACHE_SIZE) return -ENAMETOOLONG; - inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); + inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); if (!inode) return -ENOSPC; @@ -1843,12 +1983,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s iput(inode); return error; } - unlock_page(page); + inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); kunmap_atomic(kaddr, KM_USER0); set_page_dirty(page); + unlock_page(page); page_cache_release(page); } if (dir->i_mode & S_ISGID) @@ -1899,35 +2040,35 @@ static const struct inode_operations shmem_symlink_inode_operations = { }; #ifdef CONFIG_TMPFS_POSIX_ACL -/** +/* * Superblocks without xattr inode operations will get security.* xattr * support from the VFS "for free". As soon as we have any other xattrs * like ACLs, we also need to implement the security.* handlers at * filesystem level, though. */ -static size_t shmem_xattr_security_list(struct inode *inode, char *list, +static size_t shmem_xattr_security_list(struct dentry *dentry, char *list, size_t list_len, const char *name, - size_t name_len) + size_t name_len, int handler_flags) { - return security_inode_listsecurity(inode, list, list_len); + return security_inode_listsecurity(dentry->d_inode, list, list_len); } -static int shmem_xattr_security_get(struct inode *inode, const char *name, - void *buffer, size_t size) +static int shmem_xattr_security_get(struct dentry *dentry, const char *name, + void *buffer, size_t size, int handler_flags) { if (strcmp(name, "") == 0) return -EINVAL; - return security_inode_getsecurity(inode, name, buffer, size, - -EOPNOTSUPP); + return xattr_getsecurity(dentry->d_inode, name, buffer, size); } -static int shmem_xattr_security_set(struct inode *inode, const char *name, - const void *value, size_t size, int flags) +static int shmem_xattr_security_set(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int handler_flags) { if (strcmp(name, "") == 0) return -EINVAL; - return security_inode_setsecurity(inode, name, value, size, flags); + return security_inode_setsecurity(dentry->d_inode, name, value, + size, flags); } static struct xattr_handler shmem_xattr_security_handler = { @@ -1938,8 +2079,8 @@ static struct xattr_handler shmem_xattr_security_handler = { }; static struct xattr_handler *shmem_xattr_handlers[] = { - &shmem_xattr_acl_access_handler, - &shmem_xattr_acl_default_handler, + &generic_acl_access_handler, + &generic_acl_default_handler, &shmem_xattr_security_handler, NULL }; @@ -2015,9 +2156,8 @@ static const struct export_operations shmem_export_ops = { .fh_to_dentry = shmem_fh_to_dentry, }; -static int shmem_parse_options(char *options, int *mode, uid_t *uid, - gid_t *gid, unsigned long *blocks, unsigned long *inodes, - int *policy, nodemask_t *policy_nodes) +static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, + bool remount) { char *this_char, *value, *rest; @@ -2060,35 +2200,36 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, } if (*rest) goto bad_val; - *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE); + sbinfo->max_blocks = + DIV_ROUND_UP(size, PAGE_CACHE_SIZE); } else if (!strcmp(this_char,"nr_blocks")) { - *blocks = memparse(value,&rest); + sbinfo->max_blocks = memparse(value, &rest); if (*rest) goto bad_val; } else if (!strcmp(this_char,"nr_inodes")) { - *inodes = memparse(value,&rest); + sbinfo->max_inodes = memparse(value, &rest); if (*rest) goto bad_val; } else if (!strcmp(this_char,"mode")) { - if (!mode) + if (remount) continue; - *mode = simple_strtoul(value,&rest,8); + sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; if (*rest) goto bad_val; } else if (!strcmp(this_char,"uid")) { - if (!uid) + if (remount) continue; - *uid = simple_strtoul(value,&rest,0); + sbinfo->uid = simple_strtoul(value, &rest, 0); if (*rest) goto bad_val; } else if (!strcmp(this_char,"gid")) { - if (!gid) + if (remount) continue; - *gid = simple_strtoul(value,&rest,0); + sbinfo->gid = simple_strtoul(value, &rest, 0); if (*rest) goto bad_val; } else if (!strcmp(this_char,"mpol")) { - if (shmem_parse_mpol(value,policy,policy_nodes)) + if (mpol_parse_str(value, &sbinfo->mpol, 1)) goto bad_val; } else { printk(KERN_ERR "tmpfs: Bad mount option %s\n", @@ -2108,24 +2249,20 @@ bad_val: static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); - unsigned long max_blocks = sbinfo->max_blocks; - unsigned long max_inodes = sbinfo->max_inodes; - int policy = sbinfo->policy; - nodemask_t policy_nodes = sbinfo->policy_nodes; + struct shmem_sb_info config = *sbinfo; unsigned long blocks; unsigned long inodes; int error = -EINVAL; - if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, - &max_inodes, &policy, &policy_nodes)) + if (shmem_parse_options(data, &config, true)) return error; spin_lock(&sbinfo->stat_lock); blocks = sbinfo->max_blocks - sbinfo->free_blocks; inodes = sbinfo->max_inodes - sbinfo->free_inodes; - if (max_blocks < blocks) + if (config.max_blocks < blocks) goto out; - if (max_inodes < inodes) + if (config.max_inodes < inodes) goto out; /* * Those tests also disallow limited->unlimited while any are in @@ -2133,23 +2270,43 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) * but we must separately disallow unlimited->limited, because * in that case we have no record of how much is already in use. */ - if (max_blocks && !sbinfo->max_blocks) + if (config.max_blocks && !sbinfo->max_blocks) goto out; - if (max_inodes && !sbinfo->max_inodes) + if (config.max_inodes && !sbinfo->max_inodes) goto out; error = 0; - sbinfo->max_blocks = max_blocks; - sbinfo->free_blocks = max_blocks - blocks; - sbinfo->max_inodes = max_inodes; - sbinfo->free_inodes = max_inodes - inodes; - sbinfo->policy = policy; - sbinfo->policy_nodes = policy_nodes; + sbinfo->max_blocks = config.max_blocks; + sbinfo->free_blocks = config.max_blocks - blocks; + sbinfo->max_inodes = config.max_inodes; + sbinfo->free_inodes = config.max_inodes - inodes; + + mpol_put(sbinfo->mpol); + sbinfo->mpol = config.mpol; /* transfers initial ref */ out: spin_unlock(&sbinfo->stat_lock); return error; } -#endif + +static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); + + if (sbinfo->max_blocks != shmem_default_max_blocks()) + seq_printf(seq, ",size=%luk", + sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); + if (sbinfo->max_inodes != shmem_default_max_inodes()) + seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); + if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) + seq_printf(seq, ",mode=%03o", sbinfo->mode); + if (sbinfo->uid != 0) + seq_printf(seq, ",uid=%u", sbinfo->uid); + if (sbinfo->gid != 0) + seq_printf(seq, ",gid=%u", sbinfo->gid); + shmem_show_mpol(seq, sbinfo->mpol); + return 0; +} +#endif /* CONFIG_TMPFS */ static void shmem_put_super(struct super_block *sb) { @@ -2157,20 +2314,23 @@ static void shmem_put_super(struct super_block *sb) sb->s_fs_info = NULL; } -static int shmem_fill_super(struct super_block *sb, - void *data, int silent) +int shmem_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; - int mode = S_IRWXUGO | S_ISVTX; - uid_t uid = current->fsuid; - gid_t gid = current->fsgid; - int err = -ENOMEM; struct shmem_sb_info *sbinfo; - unsigned long blocks = 0; - unsigned long inodes = 0; - int policy = MPOL_DEFAULT; - nodemask_t policy_nodes = node_states[N_HIGH_MEMORY]; + int err = -ENOMEM; + + /* Round up to L1_CACHE_BYTES to resist false sharing */ + sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), + L1_CACHE_BYTES), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + + sbinfo->mode = S_IRWXUGO | S_ISVTX; + sbinfo->uid = current_fsuid(); + sbinfo->gid = current_fsgid(); + sb->s_fs_info = sbinfo; #ifdef CONFIG_TMPFS /* @@ -2179,34 +2339,22 @@ static int shmem_fill_super(struct super_block *sb, * but the internal instance is left unlimited. */ if (!(sb->s_flags & MS_NOUSER)) { - blocks = totalram_pages / 2; - inodes = totalram_pages - totalhigh_pages; - if (inodes > blocks) - inodes = blocks; - if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, - &inodes, &policy, &policy_nodes)) - return -EINVAL; + sbinfo->max_blocks = shmem_default_max_blocks(); + sbinfo->max_inodes = shmem_default_max_inodes(); + if (shmem_parse_options(data, sbinfo, false)) { + err = -EINVAL; + goto failed; + } } sb->s_export_op = &shmem_export_ops; #else sb->s_flags |= MS_NOUSER; #endif - /* Round up to L1_CACHE_BYTES to resist false sharing */ - sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), - L1_CACHE_BYTES), GFP_KERNEL); - if (!sbinfo) - return -ENOMEM; - spin_lock_init(&sbinfo->stat_lock); - sbinfo->max_blocks = blocks; - sbinfo->free_blocks = blocks; - sbinfo->max_inodes = inodes; - sbinfo->free_inodes = inodes; - sbinfo->policy = policy; - sbinfo->policy_nodes = policy_nodes; + sbinfo->free_blocks = sbinfo->max_blocks; + sbinfo->free_inodes = sbinfo->max_inodes; - sb->s_fs_info = sbinfo; sb->s_maxbytes = SHMEM_MAX_BYTES; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; @@ -2218,11 +2366,11 @@ static int shmem_fill_super(struct super_block *sb, sb->s_flags |= MS_POSIXACL; #endif - inode = shmem_get_inode(sb, S_IFDIR | mode, 0); + inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); if (!inode) goto failed; - inode->i_uid = uid; - inode->i_gid = gid; + inode->i_uid = sbinfo->uid; + inode->i_gid = sbinfo->gid; root = d_alloc_root(inode); if (!root) goto failed_iput; @@ -2253,19 +2401,14 @@ static void shmem_destroy_inode(struct inode *inode) /* only struct inode is valid if it's an inline symlink */ mpol_free_shared_policy(&SHMEM_I(inode)->policy); } - shmem_acl_destroy_inode(inode); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; inode_init_once(&p->vfs_inode); -#ifdef CONFIG_TMPFS_POSIX_ACL - p->i_acl = NULL; - p->i_default_acl = NULL; -#endif } static int init_inodecache(void) @@ -2290,14 +2433,16 @@ static const struct address_space_operations shmem_aops = { .write_end = shmem_write_end, #endif .migratepage = migrate_page, + .error_remove_page = generic_error_remove_page, }; static const struct file_operations shmem_file_operations = { .mmap = shmem_mmap, #ifdef CONFIG_TMPFS .llseek = generic_file_llseek, - .read = shmem_file_read, + .read = do_sync_read, .write = do_sync_write, + .aio_read = shmem_file_aio_read, .aio_write = generic_file_aio_write, .fsync = simple_sync_file, .splice_read = generic_file_splice_read, @@ -2314,7 +2459,7 @@ static const struct inode_operations shmem_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = generic_check_acl, #endif }; @@ -2337,7 +2482,7 @@ static const struct inode_operations shmem_dir_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = generic_check_acl, #endif }; @@ -2348,7 +2493,7 @@ static const struct inode_operations shmem_special_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = generic_check_acl, #endif }; @@ -2358,13 +2503,14 @@ static const struct super_operations shmem_ops = { #ifdef CONFIG_TMPFS .statfs = shmem_statfs, .remount_fs = shmem_remount_fs, + .show_options = shmem_show_options, #endif .delete_inode = shmem_delete_inode, .drop_inode = generic_delete_inode, .put_super = shmem_put_super, }; -static struct vm_operations_struct shmem_vm_ops = { +static const struct vm_operations_struct shmem_vm_ops = { .fault = shmem_fault, #ifdef CONFIG_NUMA .set_policy = shmem_set_policy, @@ -2385,9 +2531,8 @@ static struct file_system_type tmpfs_fs_type = { .get_sb = shmem_get_sb, .kill_sb = kill_litter_super, }; -static struct vfsmount *shm_mnt; -static int __init init_tmpfs(void) +int __init init_tmpfs(void) { int error; @@ -2424,21 +2569,70 @@ out4: shm_mnt = ERR_PTR(error); return error; } -module_init(init_tmpfs) + +#else /* !CONFIG_SHMEM */ /* - * shmem_file_setup - get an unlinked file living in tmpfs + * tiny-shmem: simple shmemfs and tmpfs using ramfs code * + * This is intended for small system where the benefits of the full + * shmem code (swap-backed and resource-limited) are outweighed by + * their complexity. On systems without swap this code should be + * effectively equivalent, but much lighter weight. + */ + +#include + +static struct file_system_type tmpfs_fs_type = { + .name = "tmpfs", + .get_sb = ramfs_get_sb, + .kill_sb = kill_litter_super, +}; + +int __init init_tmpfs(void) +{ + BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); + + shm_mnt = kern_mount(&tmpfs_fs_type); + BUG_ON(IS_ERR(shm_mnt)); + + return 0; +} + +int shmem_unuse(swp_entry_t entry, struct page *page) +{ + return 0; +} + +int shmem_lock(struct file *file, int lock, struct user_struct *user) +{ + return 0; +} + +#define shmem_vm_ops generic_file_vm_ops +#define shmem_file_operations ramfs_file_operations +#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) +#define shmem_acct_size(flags, size) 0 +#define shmem_unacct_size(flags, size) do {} while (0) +#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE + +#endif /* CONFIG_SHMEM */ + +/* common code */ + +/** + * shmem_file_setup - get an unlinked file living in tmpfs * @name: name for dentry (to be seen in /proc//maps * @size: size to be set for the file - * + * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ -struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) { int error; struct file *file; struct inode *inode; - struct dentry *dentry, *root; + struct path path; + struct dentry *root; struct qstr this; if (IS_ERR(shm_mnt)) @@ -2455,40 +2649,43 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) this.len = strlen(name); this.hash = 0; /* will go */ root = shm_mnt->mnt_root; - dentry = d_alloc(root, &this); - if (!dentry) + path.dentry = d_alloc(root, &this); + if (!path.dentry) goto put_memory; - - error = -ENFILE; - file = get_empty_filp(); - if (!file) - goto put_dentry; + path.mnt = mntget(shm_mnt); error = -ENOSPC; - inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); + inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags); if (!inode) - goto close_file; + goto put_dentry; - SHMEM_I(inode)->flags = flags & VM_ACCOUNT; - d_instantiate(dentry, inode); + d_instantiate(path.dentry, inode); inode->i_size = size; inode->i_nlink = 0; /* It is unlinked */ - init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, - &shmem_file_operations); +#ifndef CONFIG_MMU + error = ramfs_nommu_expand_for_mapping(inode, size); + if (error) + goto put_dentry; +#endif + + error = -ENFILE; + file = alloc_file(&path, FMODE_WRITE | FMODE_READ, + &shmem_file_operations); + if (!file) + goto put_dentry; + return file; -close_file: - put_filp(file); put_dentry: - dput(dentry); + path_put(&path); put_memory: shmem_unacct_size(flags, size); return ERR_PTR(error); } +EXPORT_SYMBOL_GPL(shmem_file_setup); -/* +/** * shmem_zero_setup - setup a shared anonymous mapping - * * @vma: the vma to be mmapped is prepared by do_mmap_pgoff */ int shmem_zero_setup(struct vm_area_struct *vma)