X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fshmem.c;h=4fb41c83daca8d7c1096c380cc0f579f04908e1a;hb=57f9fd7d25ac9a0d7e3a4ced580e780ab4524e3b;hp=4103a239ce843326faca51c61c5b0b105797b24b;hpb=0b0a0806b0d8635e046bf533225a25903b1cddce;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/shmem.c b/mm/shmem.c index 4103a23..4fb41c8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -24,10 +24,12 @@ #include #include #include +#include #include #include #include #include +#include static struct vfsmount *shm_mnt; @@ -42,13 +44,11 @@ static struct vfsmount *shm_mnt; #include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -64,13 +64,28 @@ static struct vfsmount *shm_mnt; #include #include +/* + * The maximum size of a shmem/tmpfs file is limited by the maximum size of + * its triple-indirect swap vector - see illustration at shmem_swp_entry(). + * + * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, + * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum + * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, + * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. + * + * We use / and * instead of shifts in the definitions below, so that the swap + * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. + */ #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) -#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) +#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) -#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) -#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) +#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) +#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) +#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) + +#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ @@ -203,7 +218,7 @@ static const struct file_operations shmem_file_operations; static const struct inode_operations shmem_inode_operations; static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; -static struct vm_operations_struct shmem_vm_ops; +static const struct vm_operations_struct shmem_vm_ops; static struct backing_dev_info shmem_backing_dev_info __read_mostly = { .ra_pages = 0, /* No readahead */ @@ -1002,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page) goto out; } mutex_unlock(&shmem_swaplist_mutex); -out: return found; /* 0 or 1 or -ENOMEM */ + /* + * Can some race bring us here? We've been holding page lock, + * so I think not; but would rather try again later than BUG() + */ + unlock_page(page); + page_cache_release(page); +out: + return (found < 0) ? found : 0; } /* @@ -1031,8 +1053,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) * sync from ever calling shmem_writepage; but a stacking filesystem * may use the ->writepage of its underlying filesystem, in which case * tmpfs should write out to swap only in response to memory pressure, - * and not for pdflush or sync. However, in those cases, we do still - * want to check if there's a redundant swappage to be discarded. + * and not for the writeback threads or sync. However, in those cases, + * we do still want to check if there's a redundant swappage to be + * discarded. */ if (wbc->for_reclaim) swap = get_swap_page(); @@ -1064,11 +1087,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else inode = NULL; spin_unlock(&info->lock); - swap_duplicate(swap); + swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); page_cache_release(page); /* pagecache ref */ - set_page_dirty(page); - unlock_page(page); + swap_writepage(page, wbc); if (inode) { mutex_lock(&shmem_swaplist_mutex); /* move instead of add in case we're racing */ @@ -1082,7 +1104,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) shmem_swp_unmap(entry); unlock: spin_unlock(&info->lock); - swap_free(swap); + /* + * add_to_swap_cache() doesn't return -EEXIST, so we can safely + * clear SWAP_HAS_CACHE flag. + */ + swapcache_free(swap, NULL); redirty: set_page_dirty(page); if (wbc->for_reclaim) @@ -1325,8 +1351,12 @@ repeat: shmem_swp_unmap(entry); spin_unlock(&info->lock); if (error == -ENOMEM) { - /* allow reclaim from this memory cgroup */ - error = mem_cgroup_shrink_usage(swappage, + /* + * reclaim from proper memory cgroup and + * call memcg's OOM if needed. + */ + error = mem_cgroup_shmem_charge_fallback( + swappage, current->mm, gfp); if (error) { @@ -1539,6 +1569,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode, spin_lock_init(&info->lock); info->flags = flags & VM_NORESERVE; INIT_LIST_HEAD(&info->swaplist); + cache_no_acl(inode); switch (mode & S_IFMT) { default: @@ -1610,8 +1641,8 @@ shmem_write_end(struct file *file, struct address_space *mapping, if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); - unlock_page(page); set_page_dirty(page); + unlock_page(page); page_cache_release(page); return copied; @@ -1948,13 +1979,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s iput(inode); return error; } - unlock_page(page); inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); kunmap_atomic(kaddr, KM_USER0); set_page_dirty(page); + unlock_page(page); page_cache_release(page); } if (dir->i_mode & S_ISGID) @@ -2278,8 +2309,7 @@ static void shmem_put_super(struct super_block *sb) sb->s_fs_info = NULL; } -static int shmem_fill_super(struct super_block *sb, - void *data, int silent) +int shmem_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; @@ -2287,17 +2317,14 @@ static int shmem_fill_super(struct super_block *sb, int err = -ENOMEM; /* Round up to L1_CACHE_BYTES to resist false sharing */ - sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), + sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); if (!sbinfo) return -ENOMEM; - sbinfo->max_blocks = 0; - sbinfo->max_inodes = 0; sbinfo->mode = S_IRWXUGO | S_ISVTX; sbinfo->uid = current_fsuid(); sbinfo->gid = current_fsgid(); - sbinfo->mpol = NULL; sb->s_fs_info = sbinfo; #ifdef CONFIG_TMPFS @@ -2369,7 +2396,6 @@ static void shmem_destroy_inode(struct inode *inode) /* only struct inode is valid if it's an inline symlink */ mpol_free_shared_policy(&SHMEM_I(inode)->policy); } - shmem_acl_destroy_inode(inode); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } @@ -2378,10 +2404,6 @@ static void init_once(void *foo) struct shmem_inode_info *p = (struct shmem_inode_info *) foo; inode_init_once(&p->vfs_inode); -#ifdef CONFIG_TMPFS_POSIX_ACL - p->i_acl = NULL; - p->i_default_acl = NULL; -#endif } static int init_inodecache(void) @@ -2406,6 +2428,7 @@ static const struct address_space_operations shmem_aops = { .write_end = shmem_write_end, #endif .migratepage = migrate_page, + .error_remove_page = generic_error_remove_page, }; static const struct file_operations shmem_file_operations = { @@ -2431,7 +2454,7 @@ static const struct inode_operations shmem_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = shmem_check_acl, #endif }; @@ -2454,7 +2477,7 @@ static const struct inode_operations shmem_dir_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = shmem_check_acl, #endif }; @@ -2465,7 +2488,7 @@ static const struct inode_operations shmem_special_inode_operations = { .getxattr = generic_getxattr, .listxattr = generic_listxattr, .removexattr = generic_removexattr, - .permission = shmem_permission, + .check_acl = shmem_check_acl, #endif }; @@ -2482,7 +2505,7 @@ static const struct super_operations shmem_ops = { .put_super = shmem_put_super, }; -static struct vm_operations_struct shmem_vm_ops = { +static const struct vm_operations_struct shmem_vm_ops = { .fault = shmem_fault, #ifdef CONFIG_NUMA .set_policy = shmem_set_policy, @@ -2504,7 +2527,7 @@ static struct file_system_type tmpfs_fs_type = { .kill_sb = kill_litter_super, }; -static int __init init_tmpfs(void) +int __init init_tmpfs(void) { int error; @@ -2561,7 +2584,7 @@ static struct file_system_type tmpfs_fs_type = { .kill_sb = kill_litter_super, }; -static int __init init_tmpfs(void) +int __init init_tmpfs(void) { BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); @@ -2576,12 +2599,17 @@ int shmem_unuse(swp_entry_t entry, struct page *page) return 0; } +int shmem_lock(struct file *file, int lock, struct user_struct *user) +{ + return 0; +} + #define shmem_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) -#define SHMEM_MAX_BYTES LLONG_MAX +#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE #endif /* CONFIG_SHMEM */ @@ -2593,7 +2621,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ -struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) { int error; struct file *file; @@ -2640,6 +2668,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) if (error) goto close_file; #endif + ima_counts_get(file); return file; close_file: @@ -2671,5 +2700,3 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_ops = &shmem_vm_ops; return 0; } - -module_init(init_tmpfs)