nfsd: nfsd should drop CAP_MKNOD for non-root
[safe/jmp/linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/file.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/swap.h>
31
32 static struct vfsmount *shm_mnt;
33
34 #ifdef CONFIG_SHMEM
35 /*
36  * This virtual memory filesystem is heavily based on the ramfs. It
37  * extends ramfs by the ability to use swap and honor resource limits
38  * which makes it a completely usable filesystem.
39  */
40
41 #include <linux/xattr.h>
42 #include <linux/exportfs.h>
43 #include <linux/generic_acl.h>
44 #include <linux/mman.h>
45 #include <linux/pagemap.h>
46 #include <linux/string.h>
47 #include <linux/slab.h>
48 #include <linux/backing-dev.h>
49 #include <linux/shmem_fs.h>
50 #include <linux/writeback.h>
51 #include <linux/vfs.h>
52 #include <linux/blkdev.h>
53 #include <linux/security.h>
54 #include <linux/swapops.h>
55 #include <linux/mempolicy.h>
56 #include <linux/namei.h>
57 #include <linux/ctype.h>
58 #include <linux/migrate.h>
59 #include <linux/highmem.h>
60 #include <linux/seq_file.h>
61 #include <linux/magic.h>
62
63 #include <asm/uaccess.h>
64 #include <asm/div64.h>
65 #include <asm/pgtable.h>
66
67 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
68 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
69 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
70
71 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
72 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
73
74 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
75
76 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
77 #define SHMEM_PAGEIN     VM_READ
78 #define SHMEM_TRUNCATE   VM_WRITE
79
80 /* Definition to limit shmem_truncate's steps between cond_rescheds */
81 #define LATENCY_LIMIT    64
82
83 /* Pretend that each entry is of this size in directory's i_size */
84 #define BOGO_DIRENT_SIZE 20
85
86 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
87 enum sgp_type {
88         SGP_READ,       /* don't exceed i_size, don't allocate page */
89         SGP_CACHE,      /* don't exceed i_size, may allocate page */
90         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
91         SGP_WRITE,      /* may exceed i_size, may allocate page */
92 };
93
94 #ifdef CONFIG_TMPFS
95 static unsigned long shmem_default_max_blocks(void)
96 {
97         return totalram_pages / 2;
98 }
99
100 static unsigned long shmem_default_max_inodes(void)
101 {
102         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
103 }
104 #endif
105
106 static int shmem_getpage(struct inode *inode, unsigned long idx,
107                          struct page **pagep, enum sgp_type sgp, int *type);
108
109 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
110 {
111         /*
112          * The above definition of ENTRIES_PER_PAGE, and the use of
113          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
114          * might be reconsidered if it ever diverges from PAGE_SIZE.
115          *
116          * Mobility flags are masked out as swap vectors cannot move
117          */
118         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
119                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
120 }
121
122 static inline void shmem_dir_free(struct page *page)
123 {
124         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
125 }
126
127 static struct page **shmem_dir_map(struct page *page)
128 {
129         return (struct page **)kmap_atomic(page, KM_USER0);
130 }
131
132 static inline void shmem_dir_unmap(struct page **dir)
133 {
134         kunmap_atomic(dir, KM_USER0);
135 }
136
137 static swp_entry_t *shmem_swp_map(struct page *page)
138 {
139         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
140 }
141
142 static inline void shmem_swp_balance_unmap(void)
143 {
144         /*
145          * When passing a pointer to an i_direct entry, to code which
146          * also handles indirect entries and so will shmem_swp_unmap,
147          * we must arrange for the preempt count to remain in balance.
148          * What kmap_atomic of a lowmem page does depends on config
149          * and architecture, so pretend to kmap_atomic some lowmem page.
150          */
151         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
152 }
153
154 static inline void shmem_swp_unmap(swp_entry_t *entry)
155 {
156         kunmap_atomic(entry, KM_USER1);
157 }
158
159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
160 {
161         return sb->s_fs_info;
162 }
163
164 /*
165  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166  * for shared memory and for shared anonymous (/dev/zero) mappings
167  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168  * consistent with the pre-accounting of private mappings ...
169  */
170 static inline int shmem_acct_size(unsigned long flags, loff_t size)
171 {
172         return (flags & VM_NORESERVE) ?
173                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
174 }
175
176 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177 {
178         if (!(flags & VM_NORESERVE))
179                 vm_unacct_memory(VM_ACCT(size));
180 }
181
182 /*
183  * ... whereas tmpfs objects are accounted incrementally as
184  * pages are allocated, in order to allow huge sparse files.
185  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
186  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
187  */
188 static inline int shmem_acct_block(unsigned long flags)
189 {
190         return (flags & VM_NORESERVE) ?
191                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
192 }
193
194 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
195 {
196         if (flags & VM_NORESERVE)
197                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
198 }
199
200 static const struct super_operations shmem_ops;
201 static const struct address_space_operations shmem_aops;
202 static const struct file_operations shmem_file_operations;
203 static const struct inode_operations shmem_inode_operations;
204 static const struct inode_operations shmem_dir_inode_operations;
205 static const struct inode_operations shmem_special_inode_operations;
206 static struct vm_operations_struct shmem_vm_ops;
207
208 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
209         .ra_pages       = 0,    /* No readahead */
210         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
211         .unplug_io_fn   = default_unplug_io_fn,
212 };
213
214 static LIST_HEAD(shmem_swaplist);
215 static DEFINE_MUTEX(shmem_swaplist_mutex);
216
217 static void shmem_free_blocks(struct inode *inode, long pages)
218 {
219         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220         if (sbinfo->max_blocks) {
221                 spin_lock(&sbinfo->stat_lock);
222                 sbinfo->free_blocks += pages;
223                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
224                 spin_unlock(&sbinfo->stat_lock);
225         }
226 }
227
228 static int shmem_reserve_inode(struct super_block *sb)
229 {
230         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
231         if (sbinfo->max_inodes) {
232                 spin_lock(&sbinfo->stat_lock);
233                 if (!sbinfo->free_inodes) {
234                         spin_unlock(&sbinfo->stat_lock);
235                         return -ENOSPC;
236                 }
237                 sbinfo->free_inodes--;
238                 spin_unlock(&sbinfo->stat_lock);
239         }
240         return 0;
241 }
242
243 static void shmem_free_inode(struct super_block *sb)
244 {
245         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
246         if (sbinfo->max_inodes) {
247                 spin_lock(&sbinfo->stat_lock);
248                 sbinfo->free_inodes++;
249                 spin_unlock(&sbinfo->stat_lock);
250         }
251 }
252
253 /**
254  * shmem_recalc_inode - recalculate the size of an inode
255  * @inode: inode to recalc
256  *
257  * We have to calculate the free blocks since the mm can drop
258  * undirtied hole pages behind our back.
259  *
260  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
261  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
262  *
263  * It has to be called with the spinlock held.
264  */
265 static void shmem_recalc_inode(struct inode *inode)
266 {
267         struct shmem_inode_info *info = SHMEM_I(inode);
268         long freed;
269
270         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
271         if (freed > 0) {
272                 info->alloced -= freed;
273                 shmem_unacct_blocks(info->flags, freed);
274                 shmem_free_blocks(inode, freed);
275         }
276 }
277
278 /**
279  * shmem_swp_entry - find the swap vector position in the info structure
280  * @info:  info structure for the inode
281  * @index: index of the page to find
282  * @page:  optional page to add to the structure. Has to be preset to
283  *         all zeros
284  *
285  * If there is no space allocated yet it will return NULL when
286  * page is NULL, else it will use the page for the needed block,
287  * setting it to NULL on return to indicate that it has been used.
288  *
289  * The swap vector is organized the following way:
290  *
291  * There are SHMEM_NR_DIRECT entries directly stored in the
292  * shmem_inode_info structure. So small files do not need an addional
293  * allocation.
294  *
295  * For pages with index > SHMEM_NR_DIRECT there is the pointer
296  * i_indirect which points to a page which holds in the first half
297  * doubly indirect blocks, in the second half triple indirect blocks:
298  *
299  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
300  * following layout (for SHMEM_NR_DIRECT == 16):
301  *
302  * i_indirect -> dir --> 16-19
303  *            |      +-> 20-23
304  *            |
305  *            +-->dir2 --> 24-27
306  *            |        +-> 28-31
307  *            |        +-> 32-35
308  *            |        +-> 36-39
309  *            |
310  *            +-->dir3 --> 40-43
311  *                     +-> 44-47
312  *                     +-> 48-51
313  *                     +-> 52-55
314  */
315 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
316 {
317         unsigned long offset;
318         struct page **dir;
319         struct page *subdir;
320
321         if (index < SHMEM_NR_DIRECT) {
322                 shmem_swp_balance_unmap();
323                 return info->i_direct+index;
324         }
325         if (!info->i_indirect) {
326                 if (page) {
327                         info->i_indirect = *page;
328                         *page = NULL;
329                 }
330                 return NULL;                    /* need another page */
331         }
332
333         index -= SHMEM_NR_DIRECT;
334         offset = index % ENTRIES_PER_PAGE;
335         index /= ENTRIES_PER_PAGE;
336         dir = shmem_dir_map(info->i_indirect);
337
338         if (index >= ENTRIES_PER_PAGE/2) {
339                 index -= ENTRIES_PER_PAGE/2;
340                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
341                 index %= ENTRIES_PER_PAGE;
342                 subdir = *dir;
343                 if (!subdir) {
344                         if (page) {
345                                 *dir = *page;
346                                 *page = NULL;
347                         }
348                         shmem_dir_unmap(dir);
349                         return NULL;            /* need another page */
350                 }
351                 shmem_dir_unmap(dir);
352                 dir = shmem_dir_map(subdir);
353         }
354
355         dir += index;
356         subdir = *dir;
357         if (!subdir) {
358                 if (!page || !(subdir = *page)) {
359                         shmem_dir_unmap(dir);
360                         return NULL;            /* need a page */
361                 }
362                 *dir = subdir;
363                 *page = NULL;
364         }
365         shmem_dir_unmap(dir);
366         return shmem_swp_map(subdir) + offset;
367 }
368
369 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
370 {
371         long incdec = value? 1: -1;
372
373         entry->val = value;
374         info->swapped += incdec;
375         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
376                 struct page *page = kmap_atomic_to_page(entry);
377                 set_page_private(page, page_private(page) + incdec);
378         }
379 }
380
381 /**
382  * shmem_swp_alloc - get the position of the swap entry for the page.
383  * @info:       info structure for the inode
384  * @index:      index of the page to find
385  * @sgp:        check and recheck i_size? skip allocation?
386  *
387  * If the entry does not exist, allocate it.
388  */
389 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
390 {
391         struct inode *inode = &info->vfs_inode;
392         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
393         struct page *page = NULL;
394         swp_entry_t *entry;
395
396         if (sgp != SGP_WRITE &&
397             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
398                 return ERR_PTR(-EINVAL);
399
400         while (!(entry = shmem_swp_entry(info, index, &page))) {
401                 if (sgp == SGP_READ)
402                         return shmem_swp_map(ZERO_PAGE(0));
403                 /*
404                  * Test free_blocks against 1 not 0, since we have 1 data
405                  * page (and perhaps indirect index pages) yet to allocate:
406                  * a waste to allocate index if we cannot allocate data.
407                  */
408                 if (sbinfo->max_blocks) {
409                         spin_lock(&sbinfo->stat_lock);
410                         if (sbinfo->free_blocks <= 1) {
411                                 spin_unlock(&sbinfo->stat_lock);
412                                 return ERR_PTR(-ENOSPC);
413                         }
414                         sbinfo->free_blocks--;
415                         inode->i_blocks += BLOCKS_PER_PAGE;
416                         spin_unlock(&sbinfo->stat_lock);
417                 }
418
419                 spin_unlock(&info->lock);
420                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
421                 if (page)
422                         set_page_private(page, 0);
423                 spin_lock(&info->lock);
424
425                 if (!page) {
426                         shmem_free_blocks(inode, 1);
427                         return ERR_PTR(-ENOMEM);
428                 }
429                 if (sgp != SGP_WRITE &&
430                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
431                         entry = ERR_PTR(-EINVAL);
432                         break;
433                 }
434                 if (info->next_index <= index)
435                         info->next_index = index + 1;
436         }
437         if (page) {
438                 /* another task gave its page, or truncated the file */
439                 shmem_free_blocks(inode, 1);
440                 shmem_dir_free(page);
441         }
442         if (info->next_index <= index && !IS_ERR(entry))
443                 info->next_index = index + 1;
444         return entry;
445 }
446
447 /**
448  * shmem_free_swp - free some swap entries in a directory
449  * @dir:        pointer to the directory
450  * @edir:       pointer after last entry of the directory
451  * @punch_lock: pointer to spinlock when needed for the holepunch case
452  */
453 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
454                                                 spinlock_t *punch_lock)
455 {
456         spinlock_t *punch_unlock = NULL;
457         swp_entry_t *ptr;
458         int freed = 0;
459
460         for (ptr = dir; ptr < edir; ptr++) {
461                 if (ptr->val) {
462                         if (unlikely(punch_lock)) {
463                                 punch_unlock = punch_lock;
464                                 punch_lock = NULL;
465                                 spin_lock(punch_unlock);
466                                 if (!ptr->val)
467                                         continue;
468                         }
469                         free_swap_and_cache(*ptr);
470                         *ptr = (swp_entry_t){0};
471                         freed++;
472                 }
473         }
474         if (punch_unlock)
475                 spin_unlock(punch_unlock);
476         return freed;
477 }
478
479 static int shmem_map_and_free_swp(struct page *subdir, int offset,
480                 int limit, struct page ***dir, spinlock_t *punch_lock)
481 {
482         swp_entry_t *ptr;
483         int freed = 0;
484
485         ptr = shmem_swp_map(subdir);
486         for (; offset < limit; offset += LATENCY_LIMIT) {
487                 int size = limit - offset;
488                 if (size > LATENCY_LIMIT)
489                         size = LATENCY_LIMIT;
490                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
491                                                         punch_lock);
492                 if (need_resched()) {
493                         shmem_swp_unmap(ptr);
494                         if (*dir) {
495                                 shmem_dir_unmap(*dir);
496                                 *dir = NULL;
497                         }
498                         cond_resched();
499                         ptr = shmem_swp_map(subdir);
500                 }
501         }
502         shmem_swp_unmap(ptr);
503         return freed;
504 }
505
506 static void shmem_free_pages(struct list_head *next)
507 {
508         struct page *page;
509         int freed = 0;
510
511         do {
512                 page = container_of(next, struct page, lru);
513                 next = next->next;
514                 shmem_dir_free(page);
515                 freed++;
516                 if (freed >= LATENCY_LIMIT) {
517                         cond_resched();
518                         freed = 0;
519                 }
520         } while (next);
521 }
522
523 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
524 {
525         struct shmem_inode_info *info = SHMEM_I(inode);
526         unsigned long idx;
527         unsigned long size;
528         unsigned long limit;
529         unsigned long stage;
530         unsigned long diroff;
531         struct page **dir;
532         struct page *topdir;
533         struct page *middir;
534         struct page *subdir;
535         swp_entry_t *ptr;
536         LIST_HEAD(pages_to_free);
537         long nr_pages_to_free = 0;
538         long nr_swaps_freed = 0;
539         int offset;
540         int freed;
541         int punch_hole;
542         spinlock_t *needs_lock;
543         spinlock_t *punch_lock;
544         unsigned long upper_limit;
545
546         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
547         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
548         if (idx >= info->next_index)
549                 return;
550
551         spin_lock(&info->lock);
552         info->flags |= SHMEM_TRUNCATE;
553         if (likely(end == (loff_t) -1)) {
554                 limit = info->next_index;
555                 upper_limit = SHMEM_MAX_INDEX;
556                 info->next_index = idx;
557                 needs_lock = NULL;
558                 punch_hole = 0;
559         } else {
560                 if (end + 1 >= inode->i_size) { /* we may free a little more */
561                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
562                                                         PAGE_CACHE_SHIFT;
563                         upper_limit = SHMEM_MAX_INDEX;
564                 } else {
565                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
566                         upper_limit = limit;
567                 }
568                 needs_lock = &info->lock;
569                 punch_hole = 1;
570         }
571
572         topdir = info->i_indirect;
573         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
574                 info->i_indirect = NULL;
575                 nr_pages_to_free++;
576                 list_add(&topdir->lru, &pages_to_free);
577         }
578         spin_unlock(&info->lock);
579
580         if (info->swapped && idx < SHMEM_NR_DIRECT) {
581                 ptr = info->i_direct;
582                 size = limit;
583                 if (size > SHMEM_NR_DIRECT)
584                         size = SHMEM_NR_DIRECT;
585                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
586         }
587
588         /*
589          * If there are no indirect blocks or we are punching a hole
590          * below indirect blocks, nothing to be done.
591          */
592         if (!topdir || limit <= SHMEM_NR_DIRECT)
593                 goto done2;
594
595         /*
596          * The truncation case has already dropped info->lock, and we're safe
597          * because i_size and next_index have already been lowered, preventing
598          * access beyond.  But in the punch_hole case, we still need to take
599          * the lock when updating the swap directory, because there might be
600          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
601          * shmem_writepage.  However, whenever we find we can remove a whole
602          * directory page (not at the misaligned start or end of the range),
603          * we first NULLify its pointer in the level above, and then have no
604          * need to take the lock when updating its contents: needs_lock and
605          * punch_lock (either pointing to info->lock or NULL) manage this.
606          */
607
608         upper_limit -= SHMEM_NR_DIRECT;
609         limit -= SHMEM_NR_DIRECT;
610         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
611         offset = idx % ENTRIES_PER_PAGE;
612         idx -= offset;
613
614         dir = shmem_dir_map(topdir);
615         stage = ENTRIES_PER_PAGEPAGE/2;
616         if (idx < ENTRIES_PER_PAGEPAGE/2) {
617                 middir = topdir;
618                 diroff = idx/ENTRIES_PER_PAGE;
619         } else {
620                 dir += ENTRIES_PER_PAGE/2;
621                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
622                 while (stage <= idx)
623                         stage += ENTRIES_PER_PAGEPAGE;
624                 middir = *dir;
625                 if (*dir) {
626                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
627                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
628                         if (!diroff && !offset && upper_limit >= stage) {
629                                 if (needs_lock) {
630                                         spin_lock(needs_lock);
631                                         *dir = NULL;
632                                         spin_unlock(needs_lock);
633                                         needs_lock = NULL;
634                                 } else
635                                         *dir = NULL;
636                                 nr_pages_to_free++;
637                                 list_add(&middir->lru, &pages_to_free);
638                         }
639                         shmem_dir_unmap(dir);
640                         dir = shmem_dir_map(middir);
641                 } else {
642                         diroff = 0;
643                         offset = 0;
644                         idx = stage;
645                 }
646         }
647
648         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
649                 if (unlikely(idx == stage)) {
650                         shmem_dir_unmap(dir);
651                         dir = shmem_dir_map(topdir) +
652                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
653                         while (!*dir) {
654                                 dir++;
655                                 idx += ENTRIES_PER_PAGEPAGE;
656                                 if (idx >= limit)
657                                         goto done1;
658                         }
659                         stage = idx + ENTRIES_PER_PAGEPAGE;
660                         middir = *dir;
661                         if (punch_hole)
662                                 needs_lock = &info->lock;
663                         if (upper_limit >= stage) {
664                                 if (needs_lock) {
665                                         spin_lock(needs_lock);
666                                         *dir = NULL;
667                                         spin_unlock(needs_lock);
668                                         needs_lock = NULL;
669                                 } else
670                                         *dir = NULL;
671                                 nr_pages_to_free++;
672                                 list_add(&middir->lru, &pages_to_free);
673                         }
674                         shmem_dir_unmap(dir);
675                         cond_resched();
676                         dir = shmem_dir_map(middir);
677                         diroff = 0;
678                 }
679                 punch_lock = needs_lock;
680                 subdir = dir[diroff];
681                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
682                         if (needs_lock) {
683                                 spin_lock(needs_lock);
684                                 dir[diroff] = NULL;
685                                 spin_unlock(needs_lock);
686                                 punch_lock = NULL;
687                         } else
688                                 dir[diroff] = NULL;
689                         nr_pages_to_free++;
690                         list_add(&subdir->lru, &pages_to_free);
691                 }
692                 if (subdir && page_private(subdir) /* has swap entries */) {
693                         size = limit - idx;
694                         if (size > ENTRIES_PER_PAGE)
695                                 size = ENTRIES_PER_PAGE;
696                         freed = shmem_map_and_free_swp(subdir,
697                                         offset, size, &dir, punch_lock);
698                         if (!dir)
699                                 dir = shmem_dir_map(middir);
700                         nr_swaps_freed += freed;
701                         if (offset || punch_lock) {
702                                 spin_lock(&info->lock);
703                                 set_page_private(subdir,
704                                         page_private(subdir) - freed);
705                                 spin_unlock(&info->lock);
706                         } else
707                                 BUG_ON(page_private(subdir) != freed);
708                 }
709                 offset = 0;
710         }
711 done1:
712         shmem_dir_unmap(dir);
713 done2:
714         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
715                 /*
716                  * Call truncate_inode_pages again: racing shmem_unuse_inode
717                  * may have swizzled a page in from swap since vmtruncate or
718                  * generic_delete_inode did it, before we lowered next_index.
719                  * Also, though shmem_getpage checks i_size before adding to
720                  * cache, no recheck after: so fix the narrow window there too.
721                  *
722                  * Recalling truncate_inode_pages_range and unmap_mapping_range
723                  * every time for punch_hole (which never got a chance to clear
724                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
725                  * yet hardly ever necessary: try to optimize them out later.
726                  */
727                 truncate_inode_pages_range(inode->i_mapping, start, end);
728                 if (punch_hole)
729                         unmap_mapping_range(inode->i_mapping, start,
730                                                         end - start, 1);
731         }
732
733         spin_lock(&info->lock);
734         info->flags &= ~SHMEM_TRUNCATE;
735         info->swapped -= nr_swaps_freed;
736         if (nr_pages_to_free)
737                 shmem_free_blocks(inode, nr_pages_to_free);
738         shmem_recalc_inode(inode);
739         spin_unlock(&info->lock);
740
741         /*
742          * Empty swap vector directory pages to be freed?
743          */
744         if (!list_empty(&pages_to_free)) {
745                 pages_to_free.prev->next = NULL;
746                 shmem_free_pages(pages_to_free.next);
747         }
748 }
749
750 static void shmem_truncate(struct inode *inode)
751 {
752         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
753 }
754
755 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
756 {
757         struct inode *inode = dentry->d_inode;
758         struct page *page = NULL;
759         int error;
760
761         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
762                 if (attr->ia_size < inode->i_size) {
763                         /*
764                          * If truncating down to a partial page, then
765                          * if that page is already allocated, hold it
766                          * in memory until the truncation is over, so
767                          * truncate_partial_page cannnot miss it were
768                          * it assigned to swap.
769                          */
770                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
771                                 (void) shmem_getpage(inode,
772                                         attr->ia_size>>PAGE_CACHE_SHIFT,
773                                                 &page, SGP_READ, NULL);
774                                 if (page)
775                                         unlock_page(page);
776                         }
777                         /*
778                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
779                          * detect if any pages might have been added to cache
780                          * after truncate_inode_pages.  But we needn't bother
781                          * if it's being fully truncated to zero-length: the
782                          * nrpages check is efficient enough in that case.
783                          */
784                         if (attr->ia_size) {
785                                 struct shmem_inode_info *info = SHMEM_I(inode);
786                                 spin_lock(&info->lock);
787                                 info->flags &= ~SHMEM_PAGEIN;
788                                 spin_unlock(&info->lock);
789                         }
790                 }
791         }
792
793         error = inode_change_ok(inode, attr);
794         if (!error)
795                 error = inode_setattr(inode, attr);
796 #ifdef CONFIG_TMPFS_POSIX_ACL
797         if (!error && (attr->ia_valid & ATTR_MODE))
798                 error = generic_acl_chmod(inode, &shmem_acl_ops);
799 #endif
800         if (page)
801                 page_cache_release(page);
802         return error;
803 }
804
805 static void shmem_delete_inode(struct inode *inode)
806 {
807         struct shmem_inode_info *info = SHMEM_I(inode);
808
809         if (inode->i_op->truncate == shmem_truncate) {
810                 truncate_inode_pages(inode->i_mapping, 0);
811                 shmem_unacct_size(info->flags, inode->i_size);
812                 inode->i_size = 0;
813                 shmem_truncate(inode);
814                 if (!list_empty(&info->swaplist)) {
815                         mutex_lock(&shmem_swaplist_mutex);
816                         list_del_init(&info->swaplist);
817                         mutex_unlock(&shmem_swaplist_mutex);
818                 }
819         }
820         BUG_ON(inode->i_blocks);
821         shmem_free_inode(inode->i_sb);
822         clear_inode(inode);
823 }
824
825 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
826 {
827         swp_entry_t *ptr;
828
829         for (ptr = dir; ptr < edir; ptr++) {
830                 if (ptr->val == entry.val)
831                         return ptr - dir;
832         }
833         return -1;
834 }
835
836 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
837 {
838         struct inode *inode;
839         unsigned long idx;
840         unsigned long size;
841         unsigned long limit;
842         unsigned long stage;
843         struct page **dir;
844         struct page *subdir;
845         swp_entry_t *ptr;
846         int offset;
847         int error;
848
849         idx = 0;
850         ptr = info->i_direct;
851         spin_lock(&info->lock);
852         if (!info->swapped) {
853                 list_del_init(&info->swaplist);
854                 goto lost2;
855         }
856         limit = info->next_index;
857         size = limit;
858         if (size > SHMEM_NR_DIRECT)
859                 size = SHMEM_NR_DIRECT;
860         offset = shmem_find_swp(entry, ptr, ptr+size);
861         if (offset >= 0)
862                 goto found;
863         if (!info->i_indirect)
864                 goto lost2;
865
866         dir = shmem_dir_map(info->i_indirect);
867         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
868
869         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
870                 if (unlikely(idx == stage)) {
871                         shmem_dir_unmap(dir-1);
872                         if (cond_resched_lock(&info->lock)) {
873                                 /* check it has not been truncated */
874                                 if (limit > info->next_index) {
875                                         limit = info->next_index;
876                                         if (idx >= limit)
877                                                 goto lost2;
878                                 }
879                         }
880                         dir = shmem_dir_map(info->i_indirect) +
881                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
882                         while (!*dir) {
883                                 dir++;
884                                 idx += ENTRIES_PER_PAGEPAGE;
885                                 if (idx >= limit)
886                                         goto lost1;
887                         }
888                         stage = idx + ENTRIES_PER_PAGEPAGE;
889                         subdir = *dir;
890                         shmem_dir_unmap(dir);
891                         dir = shmem_dir_map(subdir);
892                 }
893                 subdir = *dir;
894                 if (subdir && page_private(subdir)) {
895                         ptr = shmem_swp_map(subdir);
896                         size = limit - idx;
897                         if (size > ENTRIES_PER_PAGE)
898                                 size = ENTRIES_PER_PAGE;
899                         offset = shmem_find_swp(entry, ptr, ptr+size);
900                         shmem_swp_unmap(ptr);
901                         if (offset >= 0) {
902                                 shmem_dir_unmap(dir);
903                                 goto found;
904                         }
905                 }
906         }
907 lost1:
908         shmem_dir_unmap(dir-1);
909 lost2:
910         spin_unlock(&info->lock);
911         return 0;
912 found:
913         idx += offset;
914         inode = igrab(&info->vfs_inode);
915         spin_unlock(&info->lock);
916
917         /*
918          * Move _head_ to start search for next from here.
919          * But be careful: shmem_delete_inode checks list_empty without taking
920          * mutex, and there's an instant in list_move_tail when info->swaplist
921          * would appear empty, if it were the only one on shmem_swaplist.  We
922          * could avoid doing it if inode NULL; or use this minor optimization.
923          */
924         if (shmem_swaplist.next != &info->swaplist)
925                 list_move_tail(&shmem_swaplist, &info->swaplist);
926         mutex_unlock(&shmem_swaplist_mutex);
927
928         error = 1;
929         if (!inode)
930                 goto out;
931         /*
932          * Charge page using GFP_KERNEL while we can wait.
933          * Charged back to the user(not to caller) when swap account is used.
934          * add_to_page_cache() will be called with GFP_NOWAIT.
935          */
936         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
937         if (error)
938                 goto out;
939         error = radix_tree_preload(GFP_KERNEL);
940         if (error) {
941                 mem_cgroup_uncharge_cache_page(page);
942                 goto out;
943         }
944         error = 1;
945
946         spin_lock(&info->lock);
947         ptr = shmem_swp_entry(info, idx, NULL);
948         if (ptr && ptr->val == entry.val) {
949                 error = add_to_page_cache_locked(page, inode->i_mapping,
950                                                 idx, GFP_NOWAIT);
951                 /* does mem_cgroup_uncharge_cache_page on error */
952         } else  /* we must compensate for our precharge above */
953                 mem_cgroup_uncharge_cache_page(page);
954
955         if (error == -EEXIST) {
956                 struct page *filepage = find_get_page(inode->i_mapping, idx);
957                 error = 1;
958                 if (filepage) {
959                         /*
960                          * There might be a more uptodate page coming down
961                          * from a stacked writepage: forget our swappage if so.
962                          */
963                         if (PageUptodate(filepage))
964                                 error = 0;
965                         page_cache_release(filepage);
966                 }
967         }
968         if (!error) {
969                 delete_from_swap_cache(page);
970                 set_page_dirty(page);
971                 info->flags |= SHMEM_PAGEIN;
972                 shmem_swp_set(info, ptr, 0);
973                 swap_free(entry);
974                 error = 1;      /* not an error, but entry was found */
975         }
976         if (ptr)
977                 shmem_swp_unmap(ptr);
978         spin_unlock(&info->lock);
979         radix_tree_preload_end();
980 out:
981         unlock_page(page);
982         page_cache_release(page);
983         iput(inode);            /* allows for NULL */
984         return error;
985 }
986
987 /*
988  * shmem_unuse() search for an eventually swapped out shmem page.
989  */
990 int shmem_unuse(swp_entry_t entry, struct page *page)
991 {
992         struct list_head *p, *next;
993         struct shmem_inode_info *info;
994         int found = 0;
995
996         mutex_lock(&shmem_swaplist_mutex);
997         list_for_each_safe(p, next, &shmem_swaplist) {
998                 info = list_entry(p, struct shmem_inode_info, swaplist);
999                 found = shmem_unuse_inode(info, entry, page);
1000                 cond_resched();
1001                 if (found)
1002                         goto out;
1003         }
1004         mutex_unlock(&shmem_swaplist_mutex);
1005 out:    return found;   /* 0 or 1 or -ENOMEM */
1006 }
1007
1008 /*
1009  * Move the page from the page cache to the swap cache.
1010  */
1011 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1012 {
1013         struct shmem_inode_info *info;
1014         swp_entry_t *entry, swap;
1015         struct address_space *mapping;
1016         unsigned long index;
1017         struct inode *inode;
1018
1019         BUG_ON(!PageLocked(page));
1020         mapping = page->mapping;
1021         index = page->index;
1022         inode = mapping->host;
1023         info = SHMEM_I(inode);
1024         if (info->flags & VM_LOCKED)
1025                 goto redirty;
1026         if (!total_swap_pages)
1027                 goto redirty;
1028
1029         /*
1030          * shmem_backing_dev_info's capabilities prevent regular writeback or
1031          * sync from ever calling shmem_writepage; but a stacking filesystem
1032          * may use the ->writepage of its underlying filesystem, in which case
1033          * tmpfs should write out to swap only in response to memory pressure,
1034          * and not for pdflush or sync.  However, in those cases, we do still
1035          * want to check if there's a redundant swappage to be discarded.
1036          */
1037         if (wbc->for_reclaim)
1038                 swap = get_swap_page();
1039         else
1040                 swap.val = 0;
1041
1042         spin_lock(&info->lock);
1043         if (index >= info->next_index) {
1044                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1045                 goto unlock;
1046         }
1047         entry = shmem_swp_entry(info, index, NULL);
1048         if (entry->val) {
1049                 /*
1050                  * The more uptodate page coming down from a stacked
1051                  * writepage should replace our old swappage.
1052                  */
1053                 free_swap_and_cache(*entry);
1054                 shmem_swp_set(info, entry, 0);
1055         }
1056         shmem_recalc_inode(inode);
1057
1058         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1059                 remove_from_page_cache(page);
1060                 shmem_swp_set(info, entry, swap.val);
1061                 shmem_swp_unmap(entry);
1062                 if (list_empty(&info->swaplist))
1063                         inode = igrab(inode);
1064                 else
1065                         inode = NULL;
1066                 spin_unlock(&info->lock);
1067                 swap_duplicate(swap);
1068                 BUG_ON(page_mapped(page));
1069                 page_cache_release(page);       /* pagecache ref */
1070                 set_page_dirty(page);
1071                 unlock_page(page);
1072                 if (inode) {
1073                         mutex_lock(&shmem_swaplist_mutex);
1074                         /* move instead of add in case we're racing */
1075                         list_move_tail(&info->swaplist, &shmem_swaplist);
1076                         mutex_unlock(&shmem_swaplist_mutex);
1077                         iput(inode);
1078                 }
1079                 return 0;
1080         }
1081
1082         shmem_swp_unmap(entry);
1083 unlock:
1084         spin_unlock(&info->lock);
1085         swap_free(swap);
1086 redirty:
1087         set_page_dirty(page);
1088         if (wbc->for_reclaim)
1089                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1090         unlock_page(page);
1091         return 0;
1092 }
1093
1094 #ifdef CONFIG_NUMA
1095 #ifdef CONFIG_TMPFS
1096 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1097 {
1098         char buffer[64];
1099
1100         if (!mpol || mpol->mode == MPOL_DEFAULT)
1101                 return;         /* show nothing */
1102
1103         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1104
1105         seq_printf(seq, ",mpol=%s", buffer);
1106 }
1107
1108 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1109 {
1110         struct mempolicy *mpol = NULL;
1111         if (sbinfo->mpol) {
1112                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1113                 mpol = sbinfo->mpol;
1114                 mpol_get(mpol);
1115                 spin_unlock(&sbinfo->stat_lock);
1116         }
1117         return mpol;
1118 }
1119 #endif /* CONFIG_TMPFS */
1120
1121 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1122                         struct shmem_inode_info *info, unsigned long idx)
1123 {
1124         struct mempolicy mpol, *spol;
1125         struct vm_area_struct pvma;
1126         struct page *page;
1127
1128         spol = mpol_cond_copy(&mpol,
1129                                 mpol_shared_policy_lookup(&info->policy, idx));
1130
1131         /* Create a pseudo vma that just contains the policy */
1132         pvma.vm_start = 0;
1133         pvma.vm_pgoff = idx;
1134         pvma.vm_ops = NULL;
1135         pvma.vm_policy = spol;
1136         page = swapin_readahead(entry, gfp, &pvma, 0);
1137         return page;
1138 }
1139
1140 static struct page *shmem_alloc_page(gfp_t gfp,
1141                         struct shmem_inode_info *info, unsigned long idx)
1142 {
1143         struct vm_area_struct pvma;
1144
1145         /* Create a pseudo vma that just contains the policy */
1146         pvma.vm_start = 0;
1147         pvma.vm_pgoff = idx;
1148         pvma.vm_ops = NULL;
1149         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1150
1151         /*
1152          * alloc_page_vma() will drop the shared policy reference
1153          */
1154         return alloc_page_vma(gfp, &pvma, 0);
1155 }
1156 #else /* !CONFIG_NUMA */
1157 #ifdef CONFIG_TMPFS
1158 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1159 {
1160 }
1161 #endif /* CONFIG_TMPFS */
1162
1163 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1164                         struct shmem_inode_info *info, unsigned long idx)
1165 {
1166         return swapin_readahead(entry, gfp, NULL, 0);
1167 }
1168
1169 static inline struct page *shmem_alloc_page(gfp_t gfp,
1170                         struct shmem_inode_info *info, unsigned long idx)
1171 {
1172         return alloc_page(gfp);
1173 }
1174 #endif /* CONFIG_NUMA */
1175
1176 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1177 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1178 {
1179         return NULL;
1180 }
1181 #endif
1182
1183 /*
1184  * shmem_getpage - either get the page from swap or allocate a new one
1185  *
1186  * If we allocate a new one we do not mark it dirty. That's up to the
1187  * vm. If we swap it in we mark it dirty since we also free the swap
1188  * entry since a page cannot live in both the swap and page cache
1189  */
1190 static int shmem_getpage(struct inode *inode, unsigned long idx,
1191                         struct page **pagep, enum sgp_type sgp, int *type)
1192 {
1193         struct address_space *mapping = inode->i_mapping;
1194         struct shmem_inode_info *info = SHMEM_I(inode);
1195         struct shmem_sb_info *sbinfo;
1196         struct page *filepage = *pagep;
1197         struct page *swappage;
1198         swp_entry_t *entry;
1199         swp_entry_t swap;
1200         gfp_t gfp;
1201         int error;
1202
1203         if (idx >= SHMEM_MAX_INDEX)
1204                 return -EFBIG;
1205
1206         if (type)
1207                 *type = 0;
1208
1209         /*
1210          * Normally, filepage is NULL on entry, and either found
1211          * uptodate immediately, or allocated and zeroed, or read
1212          * in under swappage, which is then assigned to filepage.
1213          * But shmem_readpage (required for splice) passes in a locked
1214          * filepage, which may be found not uptodate by other callers
1215          * too, and may need to be copied from the swappage read in.
1216          */
1217 repeat:
1218         if (!filepage)
1219                 filepage = find_lock_page(mapping, idx);
1220         if (filepage && PageUptodate(filepage))
1221                 goto done;
1222         error = 0;
1223         gfp = mapping_gfp_mask(mapping);
1224         if (!filepage) {
1225                 /*
1226                  * Try to preload while we can wait, to not make a habit of
1227                  * draining atomic reserves; but don't latch on to this cpu.
1228                  */
1229                 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1230                 if (error)
1231                         goto failed;
1232                 radix_tree_preload_end();
1233         }
1234
1235         spin_lock(&info->lock);
1236         shmem_recalc_inode(inode);
1237         entry = shmem_swp_alloc(info, idx, sgp);
1238         if (IS_ERR(entry)) {
1239                 spin_unlock(&info->lock);
1240                 error = PTR_ERR(entry);
1241                 goto failed;
1242         }
1243         swap = *entry;
1244
1245         if (swap.val) {
1246                 /* Look it up and read it in.. */
1247                 swappage = lookup_swap_cache(swap);
1248                 if (!swappage) {
1249                         shmem_swp_unmap(entry);
1250                         /* here we actually do the io */
1251                         if (type && !(*type & VM_FAULT_MAJOR)) {
1252                                 __count_vm_event(PGMAJFAULT);
1253                                 *type |= VM_FAULT_MAJOR;
1254                         }
1255                         spin_unlock(&info->lock);
1256                         swappage = shmem_swapin(swap, gfp, info, idx);
1257                         if (!swappage) {
1258                                 spin_lock(&info->lock);
1259                                 entry = shmem_swp_alloc(info, idx, sgp);
1260                                 if (IS_ERR(entry))
1261                                         error = PTR_ERR(entry);
1262                                 else {
1263                                         if (entry->val == swap.val)
1264                                                 error = -ENOMEM;
1265                                         shmem_swp_unmap(entry);
1266                                 }
1267                                 spin_unlock(&info->lock);
1268                                 if (error)
1269                                         goto failed;
1270                                 goto repeat;
1271                         }
1272                         wait_on_page_locked(swappage);
1273                         page_cache_release(swappage);
1274                         goto repeat;
1275                 }
1276
1277                 /* We have to do this with page locked to prevent races */
1278                 if (!trylock_page(swappage)) {
1279                         shmem_swp_unmap(entry);
1280                         spin_unlock(&info->lock);
1281                         wait_on_page_locked(swappage);
1282                         page_cache_release(swappage);
1283                         goto repeat;
1284                 }
1285                 if (PageWriteback(swappage)) {
1286                         shmem_swp_unmap(entry);
1287                         spin_unlock(&info->lock);
1288                         wait_on_page_writeback(swappage);
1289                         unlock_page(swappage);
1290                         page_cache_release(swappage);
1291                         goto repeat;
1292                 }
1293                 if (!PageUptodate(swappage)) {
1294                         shmem_swp_unmap(entry);
1295                         spin_unlock(&info->lock);
1296                         unlock_page(swappage);
1297                         page_cache_release(swappage);
1298                         error = -EIO;
1299                         goto failed;
1300                 }
1301
1302                 if (filepage) {
1303                         shmem_swp_set(info, entry, 0);
1304                         shmem_swp_unmap(entry);
1305                         delete_from_swap_cache(swappage);
1306                         spin_unlock(&info->lock);
1307                         copy_highpage(filepage, swappage);
1308                         unlock_page(swappage);
1309                         page_cache_release(swappage);
1310                         flush_dcache_page(filepage);
1311                         SetPageUptodate(filepage);
1312                         set_page_dirty(filepage);
1313                         swap_free(swap);
1314                 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1315                                         idx, GFP_NOWAIT))) {
1316                         info->flags |= SHMEM_PAGEIN;
1317                         shmem_swp_set(info, entry, 0);
1318                         shmem_swp_unmap(entry);
1319                         delete_from_swap_cache(swappage);
1320                         spin_unlock(&info->lock);
1321                         filepage = swappage;
1322                         set_page_dirty(filepage);
1323                         swap_free(swap);
1324                 } else {
1325                         shmem_swp_unmap(entry);
1326                         spin_unlock(&info->lock);
1327                         if (error == -ENOMEM) {
1328                                 /* allow reclaim from this memory cgroup */
1329                                 error = mem_cgroup_shrink_usage(swappage,
1330                                                                 current->mm,
1331                                                                 gfp);
1332                                 if (error) {
1333                                         unlock_page(swappage);
1334                                         page_cache_release(swappage);
1335                                         goto failed;
1336                                 }
1337                         }
1338                         unlock_page(swappage);
1339                         page_cache_release(swappage);
1340                         goto repeat;
1341                 }
1342         } else if (sgp == SGP_READ && !filepage) {
1343                 shmem_swp_unmap(entry);
1344                 filepage = find_get_page(mapping, idx);
1345                 if (filepage &&
1346                     (!PageUptodate(filepage) || !trylock_page(filepage))) {
1347                         spin_unlock(&info->lock);
1348                         wait_on_page_locked(filepage);
1349                         page_cache_release(filepage);
1350                         filepage = NULL;
1351                         goto repeat;
1352                 }
1353                 spin_unlock(&info->lock);
1354         } else {
1355                 shmem_swp_unmap(entry);
1356                 sbinfo = SHMEM_SB(inode->i_sb);
1357                 if (sbinfo->max_blocks) {
1358                         spin_lock(&sbinfo->stat_lock);
1359                         if (sbinfo->free_blocks == 0 ||
1360                             shmem_acct_block(info->flags)) {
1361                                 spin_unlock(&sbinfo->stat_lock);
1362                                 spin_unlock(&info->lock);
1363                                 error = -ENOSPC;
1364                                 goto failed;
1365                         }
1366                         sbinfo->free_blocks--;
1367                         inode->i_blocks += BLOCKS_PER_PAGE;
1368                         spin_unlock(&sbinfo->stat_lock);
1369                 } else if (shmem_acct_block(info->flags)) {
1370                         spin_unlock(&info->lock);
1371                         error = -ENOSPC;
1372                         goto failed;
1373                 }
1374
1375                 if (!filepage) {
1376                         int ret;
1377
1378                         spin_unlock(&info->lock);
1379                         filepage = shmem_alloc_page(gfp, info, idx);
1380                         if (!filepage) {
1381                                 shmem_unacct_blocks(info->flags, 1);
1382                                 shmem_free_blocks(inode, 1);
1383                                 error = -ENOMEM;
1384                                 goto failed;
1385                         }
1386                         SetPageSwapBacked(filepage);
1387
1388                         /* Precharge page while we can wait, compensate after */
1389                         error = mem_cgroup_cache_charge(filepage, current->mm,
1390                                         GFP_KERNEL);
1391                         if (error) {
1392                                 page_cache_release(filepage);
1393                                 shmem_unacct_blocks(info->flags, 1);
1394                                 shmem_free_blocks(inode, 1);
1395                                 filepage = NULL;
1396                                 goto failed;
1397                         }
1398
1399                         spin_lock(&info->lock);
1400                         entry = shmem_swp_alloc(info, idx, sgp);
1401                         if (IS_ERR(entry))
1402                                 error = PTR_ERR(entry);
1403                         else {
1404                                 swap = *entry;
1405                                 shmem_swp_unmap(entry);
1406                         }
1407                         ret = error || swap.val;
1408                         if (ret)
1409                                 mem_cgroup_uncharge_cache_page(filepage);
1410                         else
1411                                 ret = add_to_page_cache_lru(filepage, mapping,
1412                                                 idx, GFP_NOWAIT);
1413                         /*
1414                          * At add_to_page_cache_lru() failure, uncharge will
1415                          * be done automatically.
1416                          */
1417                         if (ret) {
1418                                 spin_unlock(&info->lock);
1419                                 page_cache_release(filepage);
1420                                 shmem_unacct_blocks(info->flags, 1);
1421                                 shmem_free_blocks(inode, 1);
1422                                 filepage = NULL;
1423                                 if (error)
1424                                         goto failed;
1425                                 goto repeat;
1426                         }
1427                         info->flags |= SHMEM_PAGEIN;
1428                 }
1429
1430                 info->alloced++;
1431                 spin_unlock(&info->lock);
1432                 clear_highpage(filepage);
1433                 flush_dcache_page(filepage);
1434                 SetPageUptodate(filepage);
1435                 if (sgp == SGP_DIRTY)
1436                         set_page_dirty(filepage);
1437         }
1438 done:
1439         *pagep = filepage;
1440         return 0;
1441
1442 failed:
1443         if (*pagep != filepage) {
1444                 unlock_page(filepage);
1445                 page_cache_release(filepage);
1446         }
1447         return error;
1448 }
1449
1450 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1451 {
1452         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1453         int error;
1454         int ret;
1455
1456         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1457                 return VM_FAULT_SIGBUS;
1458
1459         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1460         if (error)
1461                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1462
1463         return ret | VM_FAULT_LOCKED;
1464 }
1465
1466 #ifdef CONFIG_NUMA
1467 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1468 {
1469         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1470         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1471 }
1472
1473 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1474                                           unsigned long addr)
1475 {
1476         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1477         unsigned long idx;
1478
1479         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1480         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1481 }
1482 #endif
1483
1484 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1485 {
1486         struct inode *inode = file->f_path.dentry->d_inode;
1487         struct shmem_inode_info *info = SHMEM_I(inode);
1488         int retval = -ENOMEM;
1489
1490         spin_lock(&info->lock);
1491         if (lock && !(info->flags & VM_LOCKED)) {
1492                 if (!user_shm_lock(inode->i_size, user))
1493                         goto out_nomem;
1494                 info->flags |= VM_LOCKED;
1495                 mapping_set_unevictable(file->f_mapping);
1496         }
1497         if (!lock && (info->flags & VM_LOCKED) && user) {
1498                 user_shm_unlock(inode->i_size, user);
1499                 info->flags &= ~VM_LOCKED;
1500                 mapping_clear_unevictable(file->f_mapping);
1501                 scan_mapping_unevictable_pages(file->f_mapping);
1502         }
1503         retval = 0;
1504
1505 out_nomem:
1506         spin_unlock(&info->lock);
1507         return retval;
1508 }
1509
1510 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1511 {
1512         file_accessed(file);
1513         vma->vm_ops = &shmem_vm_ops;
1514         vma->vm_flags |= VM_CAN_NONLINEAR;
1515         return 0;
1516 }
1517
1518 static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1519                                         dev_t dev, unsigned long flags)
1520 {
1521         struct inode *inode;
1522         struct shmem_inode_info *info;
1523         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1524
1525         if (shmem_reserve_inode(sb))
1526                 return NULL;
1527
1528         inode = new_inode(sb);
1529         if (inode) {
1530                 inode->i_mode = mode;
1531                 inode->i_uid = current_fsuid();
1532                 inode->i_gid = current_fsgid();
1533                 inode->i_blocks = 0;
1534                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1535                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1536                 inode->i_generation = get_seconds();
1537                 info = SHMEM_I(inode);
1538                 memset(info, 0, (char *)inode - (char *)info);
1539                 spin_lock_init(&info->lock);
1540                 info->flags = flags & VM_NORESERVE;
1541                 INIT_LIST_HEAD(&info->swaplist);
1542
1543                 switch (mode & S_IFMT) {
1544                 default:
1545                         inode->i_op = &shmem_special_inode_operations;
1546                         init_special_inode(inode, mode, dev);
1547                         break;
1548                 case S_IFREG:
1549                         inode->i_mapping->a_ops = &shmem_aops;
1550                         inode->i_op = &shmem_inode_operations;
1551                         inode->i_fop = &shmem_file_operations;
1552                         mpol_shared_policy_init(&info->policy,
1553                                                  shmem_get_sbmpol(sbinfo));
1554                         break;
1555                 case S_IFDIR:
1556                         inc_nlink(inode);
1557                         /* Some things misbehave if size == 0 on a directory */
1558                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1559                         inode->i_op = &shmem_dir_inode_operations;
1560                         inode->i_fop = &simple_dir_operations;
1561                         break;
1562                 case S_IFLNK:
1563                         /*
1564                          * Must not load anything in the rbtree,
1565                          * mpol_free_shared_policy will not be called.
1566                          */
1567                         mpol_shared_policy_init(&info->policy, NULL);
1568                         break;
1569                 }
1570         } else
1571                 shmem_free_inode(sb);
1572         return inode;
1573 }
1574
1575 #ifdef CONFIG_TMPFS
1576 static const struct inode_operations shmem_symlink_inode_operations;
1577 static const struct inode_operations shmem_symlink_inline_operations;
1578
1579 /*
1580  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1581  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1582  * below the loop driver, in the generic fashion that many filesystems support.
1583  */
1584 static int shmem_readpage(struct file *file, struct page *page)
1585 {
1586         struct inode *inode = page->mapping->host;
1587         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1588         unlock_page(page);
1589         return error;
1590 }
1591
1592 static int
1593 shmem_write_begin(struct file *file, struct address_space *mapping,
1594                         loff_t pos, unsigned len, unsigned flags,
1595                         struct page **pagep, void **fsdata)
1596 {
1597         struct inode *inode = mapping->host;
1598         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1599         *pagep = NULL;
1600         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1601 }
1602
1603 static int
1604 shmem_write_end(struct file *file, struct address_space *mapping,
1605                         loff_t pos, unsigned len, unsigned copied,
1606                         struct page *page, void *fsdata)
1607 {
1608         struct inode *inode = mapping->host;
1609
1610         if (pos + copied > inode->i_size)
1611                 i_size_write(inode, pos + copied);
1612
1613         unlock_page(page);
1614         set_page_dirty(page);
1615         page_cache_release(page);
1616
1617         return copied;
1618 }
1619
1620 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1621 {
1622         struct inode *inode = filp->f_path.dentry->d_inode;
1623         struct address_space *mapping = inode->i_mapping;
1624         unsigned long index, offset;
1625         enum sgp_type sgp = SGP_READ;
1626
1627         /*
1628          * Might this read be for a stacking filesystem?  Then when reading
1629          * holes of a sparse file, we actually need to allocate those pages,
1630          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1631          */
1632         if (segment_eq(get_fs(), KERNEL_DS))
1633                 sgp = SGP_DIRTY;
1634
1635         index = *ppos >> PAGE_CACHE_SHIFT;
1636         offset = *ppos & ~PAGE_CACHE_MASK;
1637
1638         for (;;) {
1639                 struct page *page = NULL;
1640                 unsigned long end_index, nr, ret;
1641                 loff_t i_size = i_size_read(inode);
1642
1643                 end_index = i_size >> PAGE_CACHE_SHIFT;
1644                 if (index > end_index)
1645                         break;
1646                 if (index == end_index) {
1647                         nr = i_size & ~PAGE_CACHE_MASK;
1648                         if (nr <= offset)
1649                                 break;
1650                 }
1651
1652                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1653                 if (desc->error) {
1654                         if (desc->error == -EINVAL)
1655                                 desc->error = 0;
1656                         break;
1657                 }
1658                 if (page)
1659                         unlock_page(page);
1660
1661                 /*
1662                  * We must evaluate after, since reads (unlike writes)
1663                  * are called without i_mutex protection against truncate
1664                  */
1665                 nr = PAGE_CACHE_SIZE;
1666                 i_size = i_size_read(inode);
1667                 end_index = i_size >> PAGE_CACHE_SHIFT;
1668                 if (index == end_index) {
1669                         nr = i_size & ~PAGE_CACHE_MASK;
1670                         if (nr <= offset) {
1671                                 if (page)
1672                                         page_cache_release(page);
1673                                 break;
1674                         }
1675                 }
1676                 nr -= offset;
1677
1678                 if (page) {
1679                         /*
1680                          * If users can be writing to this page using arbitrary
1681                          * virtual addresses, take care about potential aliasing
1682                          * before reading the page on the kernel side.
1683                          */
1684                         if (mapping_writably_mapped(mapping))
1685                                 flush_dcache_page(page);
1686                         /*
1687                          * Mark the page accessed if we read the beginning.
1688                          */
1689                         if (!offset)
1690                                 mark_page_accessed(page);
1691                 } else {
1692                         page = ZERO_PAGE(0);
1693                         page_cache_get(page);
1694                 }
1695
1696                 /*
1697                  * Ok, we have the page, and it's up-to-date, so
1698                  * now we can copy it to user space...
1699                  *
1700                  * The actor routine returns how many bytes were actually used..
1701                  * NOTE! This may not be the same as how much of a user buffer
1702                  * we filled up (we may be padding etc), so we can only update
1703                  * "pos" here (the actor routine has to update the user buffer
1704                  * pointers and the remaining count).
1705                  */
1706                 ret = actor(desc, page, offset, nr);
1707                 offset += ret;
1708                 index += offset >> PAGE_CACHE_SHIFT;
1709                 offset &= ~PAGE_CACHE_MASK;
1710
1711                 page_cache_release(page);
1712                 if (ret != nr || !desc->count)
1713                         break;
1714
1715                 cond_resched();
1716         }
1717
1718         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1719         file_accessed(filp);
1720 }
1721
1722 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1723                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1724 {
1725         struct file *filp = iocb->ki_filp;
1726         ssize_t retval;
1727         unsigned long seg;
1728         size_t count;
1729         loff_t *ppos = &iocb->ki_pos;
1730
1731         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1732         if (retval)
1733                 return retval;
1734
1735         for (seg = 0; seg < nr_segs; seg++) {
1736                 read_descriptor_t desc;
1737
1738                 desc.written = 0;
1739                 desc.arg.buf = iov[seg].iov_base;
1740                 desc.count = iov[seg].iov_len;
1741                 if (desc.count == 0)
1742                         continue;
1743                 desc.error = 0;
1744                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1745                 retval += desc.written;
1746                 if (desc.error) {
1747                         retval = retval ?: desc.error;
1748                         break;
1749                 }
1750                 if (desc.count > 0)
1751                         break;
1752         }
1753         return retval;
1754 }
1755
1756 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1757 {
1758         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1759
1760         buf->f_type = TMPFS_MAGIC;
1761         buf->f_bsize = PAGE_CACHE_SIZE;
1762         buf->f_namelen = NAME_MAX;
1763         spin_lock(&sbinfo->stat_lock);
1764         if (sbinfo->max_blocks) {
1765                 buf->f_blocks = sbinfo->max_blocks;
1766                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1767         }
1768         if (sbinfo->max_inodes) {
1769                 buf->f_files = sbinfo->max_inodes;
1770                 buf->f_ffree = sbinfo->free_inodes;
1771         }
1772         /* else leave those fields 0 like simple_statfs */
1773         spin_unlock(&sbinfo->stat_lock);
1774         return 0;
1775 }
1776
1777 /*
1778  * File creation. Allocate an inode, and we're done..
1779  */
1780 static int
1781 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1782 {
1783         struct inode *inode;
1784         int error = -ENOSPC;
1785
1786         inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1787         if (inode) {
1788                 error = security_inode_init_security(inode, dir, NULL, NULL,
1789                                                      NULL);
1790                 if (error) {
1791                         if (error != -EOPNOTSUPP) {
1792                                 iput(inode);
1793                                 return error;
1794                         }
1795                 }
1796                 error = shmem_acl_init(inode, dir);
1797                 if (error) {
1798                         iput(inode);
1799                         return error;
1800                 }
1801                 if (dir->i_mode & S_ISGID) {
1802                         inode->i_gid = dir->i_gid;
1803                         if (S_ISDIR(mode))
1804                                 inode->i_mode |= S_ISGID;
1805                 }
1806                 dir->i_size += BOGO_DIRENT_SIZE;
1807                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1808                 d_instantiate(dentry, inode);
1809                 dget(dentry); /* Extra count - pin the dentry in core */
1810         }
1811         return error;
1812 }
1813
1814 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1815 {
1816         int error;
1817
1818         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1819                 return error;
1820         inc_nlink(dir);
1821         return 0;
1822 }
1823
1824 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1825                 struct nameidata *nd)
1826 {
1827         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1828 }
1829
1830 /*
1831  * Link a file..
1832  */
1833 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1834 {
1835         struct inode *inode = old_dentry->d_inode;
1836         int ret;
1837
1838         /*
1839          * No ordinary (disk based) filesystem counts links as inodes;
1840          * but each new link needs a new dentry, pinning lowmem, and
1841          * tmpfs dentries cannot be pruned until they are unlinked.
1842          */
1843         ret = shmem_reserve_inode(inode->i_sb);
1844         if (ret)
1845                 goto out;
1846
1847         dir->i_size += BOGO_DIRENT_SIZE;
1848         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1849         inc_nlink(inode);
1850         atomic_inc(&inode->i_count);    /* New dentry reference */
1851         dget(dentry);           /* Extra pinning count for the created dentry */
1852         d_instantiate(dentry, inode);
1853 out:
1854         return ret;
1855 }
1856
1857 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1858 {
1859         struct inode *inode = dentry->d_inode;
1860
1861         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1862                 shmem_free_inode(inode->i_sb);
1863
1864         dir->i_size -= BOGO_DIRENT_SIZE;
1865         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1866         drop_nlink(inode);
1867         dput(dentry);   /* Undo the count from "create" - this does all the work */
1868         return 0;
1869 }
1870
1871 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1872 {
1873         if (!simple_empty(dentry))
1874                 return -ENOTEMPTY;
1875
1876         drop_nlink(dentry->d_inode);
1877         drop_nlink(dir);
1878         return shmem_unlink(dir, dentry);
1879 }
1880
1881 /*
1882  * The VFS layer already does all the dentry stuff for rename,
1883  * we just have to decrement the usage count for the target if
1884  * it exists so that the VFS layer correctly free's it when it
1885  * gets overwritten.
1886  */
1887 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1888 {
1889         struct inode *inode = old_dentry->d_inode;
1890         int they_are_dirs = S_ISDIR(inode->i_mode);
1891
1892         if (!simple_empty(new_dentry))
1893                 return -ENOTEMPTY;
1894
1895         if (new_dentry->d_inode) {
1896                 (void) shmem_unlink(new_dir, new_dentry);
1897                 if (they_are_dirs)
1898                         drop_nlink(old_dir);
1899         } else if (they_are_dirs) {
1900                 drop_nlink(old_dir);
1901                 inc_nlink(new_dir);
1902         }
1903
1904         old_dir->i_size -= BOGO_DIRENT_SIZE;
1905         new_dir->i_size += BOGO_DIRENT_SIZE;
1906         old_dir->i_ctime = old_dir->i_mtime =
1907         new_dir->i_ctime = new_dir->i_mtime =
1908         inode->i_ctime = CURRENT_TIME;
1909         return 0;
1910 }
1911
1912 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1913 {
1914         int error;
1915         int len;
1916         struct inode *inode;
1917         struct page *page = NULL;
1918         char *kaddr;
1919         struct shmem_inode_info *info;
1920
1921         len = strlen(symname) + 1;
1922         if (len > PAGE_CACHE_SIZE)
1923                 return -ENAMETOOLONG;
1924
1925         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1926         if (!inode)
1927                 return -ENOSPC;
1928
1929         error = security_inode_init_security(inode, dir, NULL, NULL,
1930                                              NULL);
1931         if (error) {
1932                 if (error != -EOPNOTSUPP) {
1933                         iput(inode);
1934                         return error;
1935                 }
1936                 error = 0;
1937         }
1938
1939         info = SHMEM_I(inode);
1940         inode->i_size = len-1;
1941         if (len <= (char *)inode - (char *)info) {
1942                 /* do it inline */
1943                 memcpy(info, symname, len);
1944                 inode->i_op = &shmem_symlink_inline_operations;
1945         } else {
1946                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1947                 if (error) {
1948                         iput(inode);
1949                         return error;
1950                 }
1951                 unlock_page(page);
1952                 inode->i_mapping->a_ops = &shmem_aops;
1953                 inode->i_op = &shmem_symlink_inode_operations;
1954                 kaddr = kmap_atomic(page, KM_USER0);
1955                 memcpy(kaddr, symname, len);
1956                 kunmap_atomic(kaddr, KM_USER0);
1957                 set_page_dirty(page);
1958                 page_cache_release(page);
1959         }
1960         if (dir->i_mode & S_ISGID)
1961                 inode->i_gid = dir->i_gid;
1962         dir->i_size += BOGO_DIRENT_SIZE;
1963         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1964         d_instantiate(dentry, inode);
1965         dget(dentry);
1966         return 0;
1967 }
1968
1969 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1970 {
1971         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1972         return NULL;
1973 }
1974
1975 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1976 {
1977         struct page *page = NULL;
1978         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1979         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1980         if (page)
1981                 unlock_page(page);
1982         return page;
1983 }
1984
1985 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1986 {
1987         if (!IS_ERR(nd_get_link(nd))) {
1988                 struct page *page = cookie;
1989                 kunmap(page);
1990                 mark_page_accessed(page);
1991                 page_cache_release(page);
1992         }
1993 }
1994
1995 static const struct inode_operations shmem_symlink_inline_operations = {
1996         .readlink       = generic_readlink,
1997         .follow_link    = shmem_follow_link_inline,
1998 };
1999
2000 static const struct inode_operations shmem_symlink_inode_operations = {
2001         .truncate       = shmem_truncate,
2002         .readlink       = generic_readlink,
2003         .follow_link    = shmem_follow_link,
2004         .put_link       = shmem_put_link,
2005 };
2006
2007 #ifdef CONFIG_TMPFS_POSIX_ACL
2008 /*
2009  * Superblocks without xattr inode operations will get security.* xattr
2010  * support from the VFS "for free". As soon as we have any other xattrs
2011  * like ACLs, we also need to implement the security.* handlers at
2012  * filesystem level, though.
2013  */
2014
2015 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
2016                                         size_t list_len, const char *name,
2017                                         size_t name_len)
2018 {
2019         return security_inode_listsecurity(inode, list, list_len);
2020 }
2021
2022 static int shmem_xattr_security_get(struct inode *inode, const char *name,
2023                                     void *buffer, size_t size)
2024 {
2025         if (strcmp(name, "") == 0)
2026                 return -EINVAL;
2027         return xattr_getsecurity(inode, name, buffer, size);
2028 }
2029
2030 static int shmem_xattr_security_set(struct inode *inode, const char *name,
2031                                     const void *value, size_t size, int flags)
2032 {
2033         if (strcmp(name, "") == 0)
2034                 return -EINVAL;
2035         return security_inode_setsecurity(inode, name, value, size, flags);
2036 }
2037
2038 static struct xattr_handler shmem_xattr_security_handler = {
2039         .prefix = XATTR_SECURITY_PREFIX,
2040         .list   = shmem_xattr_security_list,
2041         .get    = shmem_xattr_security_get,
2042         .set    = shmem_xattr_security_set,
2043 };
2044
2045 static struct xattr_handler *shmem_xattr_handlers[] = {
2046         &shmem_xattr_acl_access_handler,
2047         &shmem_xattr_acl_default_handler,
2048         &shmem_xattr_security_handler,
2049         NULL
2050 };
2051 #endif
2052
2053 static struct dentry *shmem_get_parent(struct dentry *child)
2054 {
2055         return ERR_PTR(-ESTALE);
2056 }
2057
2058 static int shmem_match(struct inode *ino, void *vfh)
2059 {
2060         __u32 *fh = vfh;
2061         __u64 inum = fh[2];
2062         inum = (inum << 32) | fh[1];
2063         return ino->i_ino == inum && fh[0] == ino->i_generation;
2064 }
2065
2066 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2067                 struct fid *fid, int fh_len, int fh_type)
2068 {
2069         struct inode *inode;
2070         struct dentry *dentry = NULL;
2071         u64 inum = fid->raw[2];
2072         inum = (inum << 32) | fid->raw[1];
2073
2074         if (fh_len < 3)
2075                 return NULL;
2076
2077         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2078                         shmem_match, fid->raw);
2079         if (inode) {
2080                 dentry = d_find_alias(inode);
2081                 iput(inode);
2082         }
2083
2084         return dentry;
2085 }
2086
2087 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2088                                 int connectable)
2089 {
2090         struct inode *inode = dentry->d_inode;
2091
2092         if (*len < 3)
2093                 return 255;
2094
2095         if (hlist_unhashed(&inode->i_hash)) {
2096                 /* Unfortunately insert_inode_hash is not idempotent,
2097                  * so as we hash inodes here rather than at creation
2098                  * time, we need a lock to ensure we only try
2099                  * to do it once
2100                  */
2101                 static DEFINE_SPINLOCK(lock);
2102                 spin_lock(&lock);
2103                 if (hlist_unhashed(&inode->i_hash))
2104                         __insert_inode_hash(inode,
2105                                             inode->i_ino + inode->i_generation);
2106                 spin_unlock(&lock);
2107         }
2108
2109         fh[0] = inode->i_generation;
2110         fh[1] = inode->i_ino;
2111         fh[2] = ((__u64)inode->i_ino) >> 32;
2112
2113         *len = 3;
2114         return 1;
2115 }
2116
2117 static const struct export_operations shmem_export_ops = {
2118         .get_parent     = shmem_get_parent,
2119         .encode_fh      = shmem_encode_fh,
2120         .fh_to_dentry   = shmem_fh_to_dentry,
2121 };
2122
2123 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2124                                bool remount)
2125 {
2126         char *this_char, *value, *rest;
2127
2128         while (options != NULL) {
2129                 this_char = options;
2130                 for (;;) {
2131                         /*
2132                          * NUL-terminate this option: unfortunately,
2133                          * mount options form a comma-separated list,
2134                          * but mpol's nodelist may also contain commas.
2135                          */
2136                         options = strchr(options, ',');
2137                         if (options == NULL)
2138                                 break;
2139                         options++;
2140                         if (!isdigit(*options)) {
2141                                 options[-1] = '\0';
2142                                 break;
2143                         }
2144                 }
2145                 if (!*this_char)
2146                         continue;
2147                 if ((value = strchr(this_char,'=')) != NULL) {
2148                         *value++ = 0;
2149                 } else {
2150                         printk(KERN_ERR
2151                             "tmpfs: No value for mount option '%s'\n",
2152                             this_char);
2153                         return 1;
2154                 }
2155
2156                 if (!strcmp(this_char,"size")) {
2157                         unsigned long long size;
2158                         size = memparse(value,&rest);
2159                         if (*rest == '%') {
2160                                 size <<= PAGE_SHIFT;
2161                                 size *= totalram_pages;
2162                                 do_div(size, 100);
2163                                 rest++;
2164                         }
2165                         if (*rest)
2166                                 goto bad_val;
2167                         sbinfo->max_blocks =
2168                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2169                 } else if (!strcmp(this_char,"nr_blocks")) {
2170                         sbinfo->max_blocks = memparse(value, &rest);
2171                         if (*rest)
2172                                 goto bad_val;
2173                 } else if (!strcmp(this_char,"nr_inodes")) {
2174                         sbinfo->max_inodes = memparse(value, &rest);
2175                         if (*rest)
2176                                 goto bad_val;
2177                 } else if (!strcmp(this_char,"mode")) {
2178                         if (remount)
2179                                 continue;
2180                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2181                         if (*rest)
2182                                 goto bad_val;
2183                 } else if (!strcmp(this_char,"uid")) {
2184                         if (remount)
2185                                 continue;
2186                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2187                         if (*rest)
2188                                 goto bad_val;
2189                 } else if (!strcmp(this_char,"gid")) {
2190                         if (remount)
2191                                 continue;
2192                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2193                         if (*rest)
2194                                 goto bad_val;
2195                 } else if (!strcmp(this_char,"mpol")) {
2196                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2197                                 goto bad_val;
2198                 } else {
2199                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2200                                this_char);
2201                         return 1;
2202                 }
2203         }
2204         return 0;
2205
2206 bad_val:
2207         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2208                value, this_char);
2209         return 1;
2210
2211 }
2212
2213 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2214 {
2215         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2216         struct shmem_sb_info config = *sbinfo;
2217         unsigned long blocks;
2218         unsigned long inodes;
2219         int error = -EINVAL;
2220
2221         if (shmem_parse_options(data, &config, true))
2222                 return error;
2223
2224         spin_lock(&sbinfo->stat_lock);
2225         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2226         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2227         if (config.max_blocks < blocks)
2228                 goto out;
2229         if (config.max_inodes < inodes)
2230                 goto out;
2231         /*
2232          * Those tests also disallow limited->unlimited while any are in
2233          * use, so i_blocks will always be zero when max_blocks is zero;
2234          * but we must separately disallow unlimited->limited, because
2235          * in that case we have no record of how much is already in use.
2236          */
2237         if (config.max_blocks && !sbinfo->max_blocks)
2238                 goto out;
2239         if (config.max_inodes && !sbinfo->max_inodes)
2240                 goto out;
2241
2242         error = 0;
2243         sbinfo->max_blocks  = config.max_blocks;
2244         sbinfo->free_blocks = config.max_blocks - blocks;
2245         sbinfo->max_inodes  = config.max_inodes;
2246         sbinfo->free_inodes = config.max_inodes - inodes;
2247
2248         mpol_put(sbinfo->mpol);
2249         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2250 out:
2251         spin_unlock(&sbinfo->stat_lock);
2252         return error;
2253 }
2254
2255 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2256 {
2257         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2258
2259         if (sbinfo->max_blocks != shmem_default_max_blocks())
2260                 seq_printf(seq, ",size=%luk",
2261                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2262         if (sbinfo->max_inodes != shmem_default_max_inodes())
2263                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2264         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2265                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2266         if (sbinfo->uid != 0)
2267                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2268         if (sbinfo->gid != 0)
2269                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2270         shmem_show_mpol(seq, sbinfo->mpol);
2271         return 0;
2272 }
2273 #endif /* CONFIG_TMPFS */
2274
2275 static void shmem_put_super(struct super_block *sb)
2276 {
2277         kfree(sb->s_fs_info);
2278         sb->s_fs_info = NULL;
2279 }
2280
2281 static int shmem_fill_super(struct super_block *sb,
2282                             void *data, int silent)
2283 {
2284         struct inode *inode;
2285         struct dentry *root;
2286         struct shmem_sb_info *sbinfo;
2287         int err = -ENOMEM;
2288
2289         /* Round up to L1_CACHE_BYTES to resist false sharing */
2290         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2291                                 L1_CACHE_BYTES), GFP_KERNEL);
2292         if (!sbinfo)
2293                 return -ENOMEM;
2294
2295         sbinfo->max_blocks = 0;
2296         sbinfo->max_inodes = 0;
2297         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2298         sbinfo->uid = current_fsuid();
2299         sbinfo->gid = current_fsgid();
2300         sbinfo->mpol = NULL;
2301         sb->s_fs_info = sbinfo;
2302
2303 #ifdef CONFIG_TMPFS
2304         /*
2305          * Per default we only allow half of the physical ram per
2306          * tmpfs instance, limiting inodes to one per page of lowmem;
2307          * but the internal instance is left unlimited.
2308          */
2309         if (!(sb->s_flags & MS_NOUSER)) {
2310                 sbinfo->max_blocks = shmem_default_max_blocks();
2311                 sbinfo->max_inodes = shmem_default_max_inodes();
2312                 if (shmem_parse_options(data, sbinfo, false)) {
2313                         err = -EINVAL;
2314                         goto failed;
2315                 }
2316         }
2317         sb->s_export_op = &shmem_export_ops;
2318 #else
2319         sb->s_flags |= MS_NOUSER;
2320 #endif
2321
2322         spin_lock_init(&sbinfo->stat_lock);
2323         sbinfo->free_blocks = sbinfo->max_blocks;
2324         sbinfo->free_inodes = sbinfo->max_inodes;
2325
2326         sb->s_maxbytes = SHMEM_MAX_BYTES;
2327         sb->s_blocksize = PAGE_CACHE_SIZE;
2328         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2329         sb->s_magic = TMPFS_MAGIC;
2330         sb->s_op = &shmem_ops;
2331         sb->s_time_gran = 1;
2332 #ifdef CONFIG_TMPFS_POSIX_ACL
2333         sb->s_xattr = shmem_xattr_handlers;
2334         sb->s_flags |= MS_POSIXACL;
2335 #endif
2336
2337         inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2338         if (!inode)
2339                 goto failed;
2340         inode->i_uid = sbinfo->uid;
2341         inode->i_gid = sbinfo->gid;
2342         root = d_alloc_root(inode);
2343         if (!root)
2344                 goto failed_iput;
2345         sb->s_root = root;
2346         return 0;
2347
2348 failed_iput:
2349         iput(inode);
2350 failed:
2351         shmem_put_super(sb);
2352         return err;
2353 }
2354
2355 static struct kmem_cache *shmem_inode_cachep;
2356
2357 static struct inode *shmem_alloc_inode(struct super_block *sb)
2358 {
2359         struct shmem_inode_info *p;
2360         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2361         if (!p)
2362                 return NULL;
2363         return &p->vfs_inode;
2364 }
2365
2366 static void shmem_destroy_inode(struct inode *inode)
2367 {
2368         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2369                 /* only struct inode is valid if it's an inline symlink */
2370                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2371         }
2372         shmem_acl_destroy_inode(inode);
2373         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2374 }
2375
2376 static void init_once(void *foo)
2377 {
2378         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2379
2380         inode_init_once(&p->vfs_inode);
2381 #ifdef CONFIG_TMPFS_POSIX_ACL
2382         p->i_acl = NULL;
2383         p->i_default_acl = NULL;
2384 #endif
2385 }
2386
2387 static int init_inodecache(void)
2388 {
2389         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2390                                 sizeof(struct shmem_inode_info),
2391                                 0, SLAB_PANIC, init_once);
2392         return 0;
2393 }
2394
2395 static void destroy_inodecache(void)
2396 {
2397         kmem_cache_destroy(shmem_inode_cachep);
2398 }
2399
2400 static const struct address_space_operations shmem_aops = {
2401         .writepage      = shmem_writepage,
2402         .set_page_dirty = __set_page_dirty_no_writeback,
2403 #ifdef CONFIG_TMPFS
2404         .readpage       = shmem_readpage,
2405         .write_begin    = shmem_write_begin,
2406         .write_end      = shmem_write_end,
2407 #endif
2408         .migratepage    = migrate_page,
2409 };
2410
2411 static const struct file_operations shmem_file_operations = {
2412         .mmap           = shmem_mmap,
2413 #ifdef CONFIG_TMPFS
2414         .llseek         = generic_file_llseek,
2415         .read           = do_sync_read,
2416         .write          = do_sync_write,
2417         .aio_read       = shmem_file_aio_read,
2418         .aio_write      = generic_file_aio_write,
2419         .fsync          = simple_sync_file,
2420         .splice_read    = generic_file_splice_read,
2421         .splice_write   = generic_file_splice_write,
2422 #endif
2423 };
2424
2425 static const struct inode_operations shmem_inode_operations = {
2426         .truncate       = shmem_truncate,
2427         .setattr        = shmem_notify_change,
2428         .truncate_range = shmem_truncate_range,
2429 #ifdef CONFIG_TMPFS_POSIX_ACL
2430         .setxattr       = generic_setxattr,
2431         .getxattr       = generic_getxattr,
2432         .listxattr      = generic_listxattr,
2433         .removexattr    = generic_removexattr,
2434         .permission     = shmem_permission,
2435 #endif
2436
2437 };
2438
2439 static const struct inode_operations shmem_dir_inode_operations = {
2440 #ifdef CONFIG_TMPFS
2441         .create         = shmem_create,
2442         .lookup         = simple_lookup,
2443         .link           = shmem_link,
2444         .unlink         = shmem_unlink,
2445         .symlink        = shmem_symlink,
2446         .mkdir          = shmem_mkdir,
2447         .rmdir          = shmem_rmdir,
2448         .mknod          = shmem_mknod,
2449         .rename         = shmem_rename,
2450 #endif
2451 #ifdef CONFIG_TMPFS_POSIX_ACL
2452         .setattr        = shmem_notify_change,
2453         .setxattr       = generic_setxattr,
2454         .getxattr       = generic_getxattr,
2455         .listxattr      = generic_listxattr,
2456         .removexattr    = generic_removexattr,
2457         .permission     = shmem_permission,
2458 #endif
2459 };
2460
2461 static const struct inode_operations shmem_special_inode_operations = {
2462 #ifdef CONFIG_TMPFS_POSIX_ACL
2463         .setattr        = shmem_notify_change,
2464         .setxattr       = generic_setxattr,
2465         .getxattr       = generic_getxattr,
2466         .listxattr      = generic_listxattr,
2467         .removexattr    = generic_removexattr,
2468         .permission     = shmem_permission,
2469 #endif
2470 };
2471
2472 static const struct super_operations shmem_ops = {
2473         .alloc_inode    = shmem_alloc_inode,
2474         .destroy_inode  = shmem_destroy_inode,
2475 #ifdef CONFIG_TMPFS
2476         .statfs         = shmem_statfs,
2477         .remount_fs     = shmem_remount_fs,
2478         .show_options   = shmem_show_options,
2479 #endif
2480         .delete_inode   = shmem_delete_inode,
2481         .drop_inode     = generic_delete_inode,
2482         .put_super      = shmem_put_super,
2483 };
2484
2485 static struct vm_operations_struct shmem_vm_ops = {
2486         .fault          = shmem_fault,
2487 #ifdef CONFIG_NUMA
2488         .set_policy     = shmem_set_policy,
2489         .get_policy     = shmem_get_policy,
2490 #endif
2491 };
2492
2493
2494 static int shmem_get_sb(struct file_system_type *fs_type,
2495         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2496 {
2497         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2498 }
2499
2500 static struct file_system_type tmpfs_fs_type = {
2501         .owner          = THIS_MODULE,
2502         .name           = "tmpfs",
2503         .get_sb         = shmem_get_sb,
2504         .kill_sb        = kill_litter_super,
2505 };
2506
2507 static int __init init_tmpfs(void)
2508 {
2509         int error;
2510
2511         error = bdi_init(&shmem_backing_dev_info);
2512         if (error)
2513                 goto out4;
2514
2515         error = init_inodecache();
2516         if (error)
2517                 goto out3;
2518
2519         error = register_filesystem(&tmpfs_fs_type);
2520         if (error) {
2521                 printk(KERN_ERR "Could not register tmpfs\n");
2522                 goto out2;
2523         }
2524
2525         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2526                                 tmpfs_fs_type.name, NULL);
2527         if (IS_ERR(shm_mnt)) {
2528                 error = PTR_ERR(shm_mnt);
2529                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2530                 goto out1;
2531         }
2532         return 0;
2533
2534 out1:
2535         unregister_filesystem(&tmpfs_fs_type);
2536 out2:
2537         destroy_inodecache();
2538 out3:
2539         bdi_destroy(&shmem_backing_dev_info);
2540 out4:
2541         shm_mnt = ERR_PTR(error);
2542         return error;
2543 }
2544
2545 #else /* !CONFIG_SHMEM */
2546
2547 /*
2548  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2549  *
2550  * This is intended for small system where the benefits of the full
2551  * shmem code (swap-backed and resource-limited) are outweighed by
2552  * their complexity. On systems without swap this code should be
2553  * effectively equivalent, but much lighter weight.
2554  */
2555
2556 #include <linux/ramfs.h>
2557
2558 static struct file_system_type tmpfs_fs_type = {
2559         .name           = "tmpfs",
2560         .get_sb         = ramfs_get_sb,
2561         .kill_sb        = kill_litter_super,
2562 };
2563
2564 static int __init init_tmpfs(void)
2565 {
2566         BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2567
2568         shm_mnt = kern_mount(&tmpfs_fs_type);
2569         BUG_ON(IS_ERR(shm_mnt));
2570
2571         return 0;
2572 }
2573
2574 int shmem_unuse(swp_entry_t entry, struct page *page)
2575 {
2576         return 0;
2577 }
2578
2579 #define shmem_vm_ops                            generic_file_vm_ops
2580 #define shmem_file_operations                   ramfs_file_operations
2581 #define shmem_get_inode(sb, mode, dev, flags)   ramfs_get_inode(sb, mode, dev)
2582 #define shmem_acct_size(flags, size)            0
2583 #define shmem_unacct_size(flags, size)          do {} while (0)
2584 #define SHMEM_MAX_BYTES                         LLONG_MAX
2585
2586 #endif /* CONFIG_SHMEM */
2587
2588 /* common code */
2589
2590 /**
2591  * shmem_file_setup - get an unlinked file living in tmpfs
2592  * @name: name for dentry (to be seen in /proc/<pid>/maps
2593  * @size: size to be set for the file
2594  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2595  */
2596 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2597 {
2598         int error;
2599         struct file *file;
2600         struct inode *inode;
2601         struct dentry *dentry, *root;
2602         struct qstr this;
2603
2604         if (IS_ERR(shm_mnt))
2605                 return (void *)shm_mnt;
2606
2607         if (size < 0 || size > SHMEM_MAX_BYTES)
2608                 return ERR_PTR(-EINVAL);
2609
2610         if (shmem_acct_size(flags, size))
2611                 return ERR_PTR(-ENOMEM);
2612
2613         error = -ENOMEM;
2614         this.name = name;
2615         this.len = strlen(name);
2616         this.hash = 0; /* will go */
2617         root = shm_mnt->mnt_root;
2618         dentry = d_alloc(root, &this);
2619         if (!dentry)
2620                 goto put_memory;
2621
2622         error = -ENFILE;
2623         file = get_empty_filp();
2624         if (!file)
2625                 goto put_dentry;
2626
2627         error = -ENOSPC;
2628         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
2629         if (!inode)
2630                 goto close_file;
2631
2632         d_instantiate(dentry, inode);
2633         inode->i_size = size;
2634         inode->i_nlink = 0;     /* It is unlinked */
2635         init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2636                   &shmem_file_operations);
2637
2638 #ifndef CONFIG_MMU
2639         error = ramfs_nommu_expand_for_mapping(inode, size);
2640         if (error)
2641                 goto close_file;
2642 #endif
2643         return file;
2644
2645 close_file:
2646         put_filp(file);
2647 put_dentry:
2648         dput(dentry);
2649 put_memory:
2650         shmem_unacct_size(flags, size);
2651         return ERR_PTR(error);
2652 }
2653 EXPORT_SYMBOL_GPL(shmem_file_setup);
2654
2655 /**
2656  * shmem_zero_setup - setup a shared anonymous mapping
2657  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2658  */
2659 int shmem_zero_setup(struct vm_area_struct *vma)
2660 {
2661         struct file *file;
2662         loff_t size = vma->vm_end - vma->vm_start;
2663
2664         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2665         if (IS_ERR(file))
2666                 return PTR_ERR(file);
2667
2668         if (vma->vm_file)
2669                 fput(vma->vm_file);
2670         vma->vm_file = file;
2671         vma->vm_ops = &shmem_vm_ops;
2672         return 0;
2673 }
2674
2675 module_init(init_tmpfs)