mm: don't mark_page_accessed in shmem_fault
[safe/jmp/linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
32 #include <linux/mm.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52 #include <linux/seq_file.h>
53 #include <linux/magic.h>
54
55 #include <asm/uaccess.h>
56 #include <asm/div64.h>
57 #include <asm/pgtable.h>
58
59 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
60 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
61 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
62
63 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
64 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
65
66 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
67
68 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
69 #define SHMEM_PAGEIN     VM_READ
70 #define SHMEM_TRUNCATE   VM_WRITE
71
72 /* Definition to limit shmem_truncate's steps between cond_rescheds */
73 #define LATENCY_LIMIT    64
74
75 /* Pretend that each entry is of this size in directory's i_size */
76 #define BOGO_DIRENT_SIZE 20
77
78 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
79 enum sgp_type {
80         SGP_READ,       /* don't exceed i_size, don't allocate page */
81         SGP_CACHE,      /* don't exceed i_size, may allocate page */
82         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
83         SGP_WRITE,      /* may exceed i_size, may allocate page */
84 };
85
86 #ifdef CONFIG_TMPFS
87 static unsigned long shmem_default_max_blocks(void)
88 {
89         return totalram_pages / 2;
90 }
91
92 static unsigned long shmem_default_max_inodes(void)
93 {
94         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
95 }
96 #endif
97
98 static int shmem_getpage(struct inode *inode, unsigned long idx,
99                          struct page **pagep, enum sgp_type sgp, int *type);
100
101 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
102 {
103         /*
104          * The above definition of ENTRIES_PER_PAGE, and the use of
105          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
106          * might be reconsidered if it ever diverges from PAGE_SIZE.
107          *
108          * Mobility flags are masked out as swap vectors cannot move
109          */
110         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
111                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
112 }
113
114 static inline void shmem_dir_free(struct page *page)
115 {
116         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
117 }
118
119 static struct page **shmem_dir_map(struct page *page)
120 {
121         return (struct page **)kmap_atomic(page, KM_USER0);
122 }
123
124 static inline void shmem_dir_unmap(struct page **dir)
125 {
126         kunmap_atomic(dir, KM_USER0);
127 }
128
129 static swp_entry_t *shmem_swp_map(struct page *page)
130 {
131         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
132 }
133
134 static inline void shmem_swp_balance_unmap(void)
135 {
136         /*
137          * When passing a pointer to an i_direct entry, to code which
138          * also handles indirect entries and so will shmem_swp_unmap,
139          * we must arrange for the preempt count to remain in balance.
140          * What kmap_atomic of a lowmem page does depends on config
141          * and architecture, so pretend to kmap_atomic some lowmem page.
142          */
143         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
144 }
145
146 static inline void shmem_swp_unmap(swp_entry_t *entry)
147 {
148         kunmap_atomic(entry, KM_USER1);
149 }
150
151 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
152 {
153         return sb->s_fs_info;
154 }
155
156 /*
157  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
158  * for shared memory and for shared anonymous (/dev/zero) mappings
159  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
160  * consistent with the pre-accounting of private mappings ...
161  */
162 static inline int shmem_acct_size(unsigned long flags, loff_t size)
163 {
164         return (flags & VM_ACCOUNT) ?
165                 security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
166 }
167
168 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
169 {
170         if (flags & VM_ACCOUNT)
171                 vm_unacct_memory(VM_ACCT(size));
172 }
173
174 /*
175  * ... whereas tmpfs objects are accounted incrementally as
176  * pages are allocated, in order to allow huge sparse files.
177  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
178  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
179  */
180 static inline int shmem_acct_block(unsigned long flags)
181 {
182         return (flags & VM_ACCOUNT) ?
183                 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
184 }
185
186 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
187 {
188         if (!(flags & VM_ACCOUNT))
189                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
190 }
191
192 static const struct super_operations shmem_ops;
193 static const struct address_space_operations shmem_aops;
194 static const struct file_operations shmem_file_operations;
195 static const struct inode_operations shmem_inode_operations;
196 static const struct inode_operations shmem_dir_inode_operations;
197 static const struct inode_operations shmem_special_inode_operations;
198 static struct vm_operations_struct shmem_vm_ops;
199
200 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
201         .ra_pages       = 0,    /* No readahead */
202         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
203         .unplug_io_fn   = default_unplug_io_fn,
204 };
205
206 static LIST_HEAD(shmem_swaplist);
207 static DEFINE_MUTEX(shmem_swaplist_mutex);
208
209 static void shmem_free_blocks(struct inode *inode, long pages)
210 {
211         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
212         if (sbinfo->max_blocks) {
213                 spin_lock(&sbinfo->stat_lock);
214                 sbinfo->free_blocks += pages;
215                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
216                 spin_unlock(&sbinfo->stat_lock);
217         }
218 }
219
220 static int shmem_reserve_inode(struct super_block *sb)
221 {
222         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
223         if (sbinfo->max_inodes) {
224                 spin_lock(&sbinfo->stat_lock);
225                 if (!sbinfo->free_inodes) {
226                         spin_unlock(&sbinfo->stat_lock);
227                         return -ENOSPC;
228                 }
229                 sbinfo->free_inodes--;
230                 spin_unlock(&sbinfo->stat_lock);
231         }
232         return 0;
233 }
234
235 static void shmem_free_inode(struct super_block *sb)
236 {
237         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
238         if (sbinfo->max_inodes) {
239                 spin_lock(&sbinfo->stat_lock);
240                 sbinfo->free_inodes++;
241                 spin_unlock(&sbinfo->stat_lock);
242         }
243 }
244
245 /**
246  * shmem_recalc_inode - recalculate the size of an inode
247  * @inode: inode to recalc
248  *
249  * We have to calculate the free blocks since the mm can drop
250  * undirtied hole pages behind our back.
251  *
252  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
253  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
254  *
255  * It has to be called with the spinlock held.
256  */
257 static void shmem_recalc_inode(struct inode *inode)
258 {
259         struct shmem_inode_info *info = SHMEM_I(inode);
260         long freed;
261
262         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
263         if (freed > 0) {
264                 info->alloced -= freed;
265                 shmem_unacct_blocks(info->flags, freed);
266                 shmem_free_blocks(inode, freed);
267         }
268 }
269
270 /**
271  * shmem_swp_entry - find the swap vector position in the info structure
272  * @info:  info structure for the inode
273  * @index: index of the page to find
274  * @page:  optional page to add to the structure. Has to be preset to
275  *         all zeros
276  *
277  * If there is no space allocated yet it will return NULL when
278  * page is NULL, else it will use the page for the needed block,
279  * setting it to NULL on return to indicate that it has been used.
280  *
281  * The swap vector is organized the following way:
282  *
283  * There are SHMEM_NR_DIRECT entries directly stored in the
284  * shmem_inode_info structure. So small files do not need an addional
285  * allocation.
286  *
287  * For pages with index > SHMEM_NR_DIRECT there is the pointer
288  * i_indirect which points to a page which holds in the first half
289  * doubly indirect blocks, in the second half triple indirect blocks:
290  *
291  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
292  * following layout (for SHMEM_NR_DIRECT == 16):
293  *
294  * i_indirect -> dir --> 16-19
295  *            |      +-> 20-23
296  *            |
297  *            +-->dir2 --> 24-27
298  *            |        +-> 28-31
299  *            |        +-> 32-35
300  *            |        +-> 36-39
301  *            |
302  *            +-->dir3 --> 40-43
303  *                     +-> 44-47
304  *                     +-> 48-51
305  *                     +-> 52-55
306  */
307 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
308 {
309         unsigned long offset;
310         struct page **dir;
311         struct page *subdir;
312
313         if (index < SHMEM_NR_DIRECT) {
314                 shmem_swp_balance_unmap();
315                 return info->i_direct+index;
316         }
317         if (!info->i_indirect) {
318                 if (page) {
319                         info->i_indirect = *page;
320                         *page = NULL;
321                 }
322                 return NULL;                    /* need another page */
323         }
324
325         index -= SHMEM_NR_DIRECT;
326         offset = index % ENTRIES_PER_PAGE;
327         index /= ENTRIES_PER_PAGE;
328         dir = shmem_dir_map(info->i_indirect);
329
330         if (index >= ENTRIES_PER_PAGE/2) {
331                 index -= ENTRIES_PER_PAGE/2;
332                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
333                 index %= ENTRIES_PER_PAGE;
334                 subdir = *dir;
335                 if (!subdir) {
336                         if (page) {
337                                 *dir = *page;
338                                 *page = NULL;
339                         }
340                         shmem_dir_unmap(dir);
341                         return NULL;            /* need another page */
342                 }
343                 shmem_dir_unmap(dir);
344                 dir = shmem_dir_map(subdir);
345         }
346
347         dir += index;
348         subdir = *dir;
349         if (!subdir) {
350                 if (!page || !(subdir = *page)) {
351                         shmem_dir_unmap(dir);
352                         return NULL;            /* need a page */
353                 }
354                 *dir = subdir;
355                 *page = NULL;
356         }
357         shmem_dir_unmap(dir);
358         return shmem_swp_map(subdir) + offset;
359 }
360
361 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
362 {
363         long incdec = value? 1: -1;
364
365         entry->val = value;
366         info->swapped += incdec;
367         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
368                 struct page *page = kmap_atomic_to_page(entry);
369                 set_page_private(page, page_private(page) + incdec);
370         }
371 }
372
373 /**
374  * shmem_swp_alloc - get the position of the swap entry for the page.
375  * @info:       info structure for the inode
376  * @index:      index of the page to find
377  * @sgp:        check and recheck i_size? skip allocation?
378  *
379  * If the entry does not exist, allocate it.
380  */
381 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
382 {
383         struct inode *inode = &info->vfs_inode;
384         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
385         struct page *page = NULL;
386         swp_entry_t *entry;
387
388         if (sgp != SGP_WRITE &&
389             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
390                 return ERR_PTR(-EINVAL);
391
392         while (!(entry = shmem_swp_entry(info, index, &page))) {
393                 if (sgp == SGP_READ)
394                         return shmem_swp_map(ZERO_PAGE(0));
395                 /*
396                  * Test free_blocks against 1 not 0, since we have 1 data
397                  * page (and perhaps indirect index pages) yet to allocate:
398                  * a waste to allocate index if we cannot allocate data.
399                  */
400                 if (sbinfo->max_blocks) {
401                         spin_lock(&sbinfo->stat_lock);
402                         if (sbinfo->free_blocks <= 1) {
403                                 spin_unlock(&sbinfo->stat_lock);
404                                 return ERR_PTR(-ENOSPC);
405                         }
406                         sbinfo->free_blocks--;
407                         inode->i_blocks += BLOCKS_PER_PAGE;
408                         spin_unlock(&sbinfo->stat_lock);
409                 }
410
411                 spin_unlock(&info->lock);
412                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
413                 if (page)
414                         set_page_private(page, 0);
415                 spin_lock(&info->lock);
416
417                 if (!page) {
418                         shmem_free_blocks(inode, 1);
419                         return ERR_PTR(-ENOMEM);
420                 }
421                 if (sgp != SGP_WRITE &&
422                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
423                         entry = ERR_PTR(-EINVAL);
424                         break;
425                 }
426                 if (info->next_index <= index)
427                         info->next_index = index + 1;
428         }
429         if (page) {
430                 /* another task gave its page, or truncated the file */
431                 shmem_free_blocks(inode, 1);
432                 shmem_dir_free(page);
433         }
434         if (info->next_index <= index && !IS_ERR(entry))
435                 info->next_index = index + 1;
436         return entry;
437 }
438
439 /**
440  * shmem_free_swp - free some swap entries in a directory
441  * @dir:        pointer to the directory
442  * @edir:       pointer after last entry of the directory
443  * @punch_lock: pointer to spinlock when needed for the holepunch case
444  */
445 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
446                                                 spinlock_t *punch_lock)
447 {
448         spinlock_t *punch_unlock = NULL;
449         swp_entry_t *ptr;
450         int freed = 0;
451
452         for (ptr = dir; ptr < edir; ptr++) {
453                 if (ptr->val) {
454                         if (unlikely(punch_lock)) {
455                                 punch_unlock = punch_lock;
456                                 punch_lock = NULL;
457                                 spin_lock(punch_unlock);
458                                 if (!ptr->val)
459                                         continue;
460                         }
461                         free_swap_and_cache(*ptr);
462                         *ptr = (swp_entry_t){0};
463                         freed++;
464                 }
465         }
466         if (punch_unlock)
467                 spin_unlock(punch_unlock);
468         return freed;
469 }
470
471 static int shmem_map_and_free_swp(struct page *subdir, int offset,
472                 int limit, struct page ***dir, spinlock_t *punch_lock)
473 {
474         swp_entry_t *ptr;
475         int freed = 0;
476
477         ptr = shmem_swp_map(subdir);
478         for (; offset < limit; offset += LATENCY_LIMIT) {
479                 int size = limit - offset;
480                 if (size > LATENCY_LIMIT)
481                         size = LATENCY_LIMIT;
482                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
483                                                         punch_lock);
484                 if (need_resched()) {
485                         shmem_swp_unmap(ptr);
486                         if (*dir) {
487                                 shmem_dir_unmap(*dir);
488                                 *dir = NULL;
489                         }
490                         cond_resched();
491                         ptr = shmem_swp_map(subdir);
492                 }
493         }
494         shmem_swp_unmap(ptr);
495         return freed;
496 }
497
498 static void shmem_free_pages(struct list_head *next)
499 {
500         struct page *page;
501         int freed = 0;
502
503         do {
504                 page = container_of(next, struct page, lru);
505                 next = next->next;
506                 shmem_dir_free(page);
507                 freed++;
508                 if (freed >= LATENCY_LIMIT) {
509                         cond_resched();
510                         freed = 0;
511                 }
512         } while (next);
513 }
514
515 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
516 {
517         struct shmem_inode_info *info = SHMEM_I(inode);
518         unsigned long idx;
519         unsigned long size;
520         unsigned long limit;
521         unsigned long stage;
522         unsigned long diroff;
523         struct page **dir;
524         struct page *topdir;
525         struct page *middir;
526         struct page *subdir;
527         swp_entry_t *ptr;
528         LIST_HEAD(pages_to_free);
529         long nr_pages_to_free = 0;
530         long nr_swaps_freed = 0;
531         int offset;
532         int freed;
533         int punch_hole;
534         spinlock_t *needs_lock;
535         spinlock_t *punch_lock;
536         unsigned long upper_limit;
537
538         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
539         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
540         if (idx >= info->next_index)
541                 return;
542
543         spin_lock(&info->lock);
544         info->flags |= SHMEM_TRUNCATE;
545         if (likely(end == (loff_t) -1)) {
546                 limit = info->next_index;
547                 upper_limit = SHMEM_MAX_INDEX;
548                 info->next_index = idx;
549                 needs_lock = NULL;
550                 punch_hole = 0;
551         } else {
552                 if (end + 1 >= inode->i_size) { /* we may free a little more */
553                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
554                                                         PAGE_CACHE_SHIFT;
555                         upper_limit = SHMEM_MAX_INDEX;
556                 } else {
557                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
558                         upper_limit = limit;
559                 }
560                 needs_lock = &info->lock;
561                 punch_hole = 1;
562         }
563
564         topdir = info->i_indirect;
565         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
566                 info->i_indirect = NULL;
567                 nr_pages_to_free++;
568                 list_add(&topdir->lru, &pages_to_free);
569         }
570         spin_unlock(&info->lock);
571
572         if (info->swapped && idx < SHMEM_NR_DIRECT) {
573                 ptr = info->i_direct;
574                 size = limit;
575                 if (size > SHMEM_NR_DIRECT)
576                         size = SHMEM_NR_DIRECT;
577                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
578         }
579
580         /*
581          * If there are no indirect blocks or we are punching a hole
582          * below indirect blocks, nothing to be done.
583          */
584         if (!topdir || limit <= SHMEM_NR_DIRECT)
585                 goto done2;
586
587         /*
588          * The truncation case has already dropped info->lock, and we're safe
589          * because i_size and next_index have already been lowered, preventing
590          * access beyond.  But in the punch_hole case, we still need to take
591          * the lock when updating the swap directory, because there might be
592          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
593          * shmem_writepage.  However, whenever we find we can remove a whole
594          * directory page (not at the misaligned start or end of the range),
595          * we first NULLify its pointer in the level above, and then have no
596          * need to take the lock when updating its contents: needs_lock and
597          * punch_lock (either pointing to info->lock or NULL) manage this.
598          */
599
600         upper_limit -= SHMEM_NR_DIRECT;
601         limit -= SHMEM_NR_DIRECT;
602         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
603         offset = idx % ENTRIES_PER_PAGE;
604         idx -= offset;
605
606         dir = shmem_dir_map(topdir);
607         stage = ENTRIES_PER_PAGEPAGE/2;
608         if (idx < ENTRIES_PER_PAGEPAGE/2) {
609                 middir = topdir;
610                 diroff = idx/ENTRIES_PER_PAGE;
611         } else {
612                 dir += ENTRIES_PER_PAGE/2;
613                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
614                 while (stage <= idx)
615                         stage += ENTRIES_PER_PAGEPAGE;
616                 middir = *dir;
617                 if (*dir) {
618                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
619                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
620                         if (!diroff && !offset && upper_limit >= stage) {
621                                 if (needs_lock) {
622                                         spin_lock(needs_lock);
623                                         *dir = NULL;
624                                         spin_unlock(needs_lock);
625                                         needs_lock = NULL;
626                                 } else
627                                         *dir = NULL;
628                                 nr_pages_to_free++;
629                                 list_add(&middir->lru, &pages_to_free);
630                         }
631                         shmem_dir_unmap(dir);
632                         dir = shmem_dir_map(middir);
633                 } else {
634                         diroff = 0;
635                         offset = 0;
636                         idx = stage;
637                 }
638         }
639
640         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
641                 if (unlikely(idx == stage)) {
642                         shmem_dir_unmap(dir);
643                         dir = shmem_dir_map(topdir) +
644                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
645                         while (!*dir) {
646                                 dir++;
647                                 idx += ENTRIES_PER_PAGEPAGE;
648                                 if (idx >= limit)
649                                         goto done1;
650                         }
651                         stage = idx + ENTRIES_PER_PAGEPAGE;
652                         middir = *dir;
653                         if (punch_hole)
654                                 needs_lock = &info->lock;
655                         if (upper_limit >= stage) {
656                                 if (needs_lock) {
657                                         spin_lock(needs_lock);
658                                         *dir = NULL;
659                                         spin_unlock(needs_lock);
660                                         needs_lock = NULL;
661                                 } else
662                                         *dir = NULL;
663                                 nr_pages_to_free++;
664                                 list_add(&middir->lru, &pages_to_free);
665                         }
666                         shmem_dir_unmap(dir);
667                         cond_resched();
668                         dir = shmem_dir_map(middir);
669                         diroff = 0;
670                 }
671                 punch_lock = needs_lock;
672                 subdir = dir[diroff];
673                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
674                         if (needs_lock) {
675                                 spin_lock(needs_lock);
676                                 dir[diroff] = NULL;
677                                 spin_unlock(needs_lock);
678                                 punch_lock = NULL;
679                         } else
680                                 dir[diroff] = NULL;
681                         nr_pages_to_free++;
682                         list_add(&subdir->lru, &pages_to_free);
683                 }
684                 if (subdir && page_private(subdir) /* has swap entries */) {
685                         size = limit - idx;
686                         if (size > ENTRIES_PER_PAGE)
687                                 size = ENTRIES_PER_PAGE;
688                         freed = shmem_map_and_free_swp(subdir,
689                                         offset, size, &dir, punch_lock);
690                         if (!dir)
691                                 dir = shmem_dir_map(middir);
692                         nr_swaps_freed += freed;
693                         if (offset || punch_lock) {
694                                 spin_lock(&info->lock);
695                                 set_page_private(subdir,
696                                         page_private(subdir) - freed);
697                                 spin_unlock(&info->lock);
698                         } else
699                                 BUG_ON(page_private(subdir) != freed);
700                 }
701                 offset = 0;
702         }
703 done1:
704         shmem_dir_unmap(dir);
705 done2:
706         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
707                 /*
708                  * Call truncate_inode_pages again: racing shmem_unuse_inode
709                  * may have swizzled a page in from swap since vmtruncate or
710                  * generic_delete_inode did it, before we lowered next_index.
711                  * Also, though shmem_getpage checks i_size before adding to
712                  * cache, no recheck after: so fix the narrow window there too.
713                  *
714                  * Recalling truncate_inode_pages_range and unmap_mapping_range
715                  * every time for punch_hole (which never got a chance to clear
716                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
717                  * yet hardly ever necessary: try to optimize them out later.
718                  */
719                 truncate_inode_pages_range(inode->i_mapping, start, end);
720                 if (punch_hole)
721                         unmap_mapping_range(inode->i_mapping, start,
722                                                         end - start, 1);
723         }
724
725         spin_lock(&info->lock);
726         info->flags &= ~SHMEM_TRUNCATE;
727         info->swapped -= nr_swaps_freed;
728         if (nr_pages_to_free)
729                 shmem_free_blocks(inode, nr_pages_to_free);
730         shmem_recalc_inode(inode);
731         spin_unlock(&info->lock);
732
733         /*
734          * Empty swap vector directory pages to be freed?
735          */
736         if (!list_empty(&pages_to_free)) {
737                 pages_to_free.prev->next = NULL;
738                 shmem_free_pages(pages_to_free.next);
739         }
740 }
741
742 static void shmem_truncate(struct inode *inode)
743 {
744         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
745 }
746
747 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
748 {
749         struct inode *inode = dentry->d_inode;
750         struct page *page = NULL;
751         int error;
752
753         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
754                 if (attr->ia_size < inode->i_size) {
755                         /*
756                          * If truncating down to a partial page, then
757                          * if that page is already allocated, hold it
758                          * in memory until the truncation is over, so
759                          * truncate_partial_page cannnot miss it were
760                          * it assigned to swap.
761                          */
762                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
763                                 (void) shmem_getpage(inode,
764                                         attr->ia_size>>PAGE_CACHE_SHIFT,
765                                                 &page, SGP_READ, NULL);
766                                 if (page)
767                                         unlock_page(page);
768                         }
769                         /*
770                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
771                          * detect if any pages might have been added to cache
772                          * after truncate_inode_pages.  But we needn't bother
773                          * if it's being fully truncated to zero-length: the
774                          * nrpages check is efficient enough in that case.
775                          */
776                         if (attr->ia_size) {
777                                 struct shmem_inode_info *info = SHMEM_I(inode);
778                                 spin_lock(&info->lock);
779                                 info->flags &= ~SHMEM_PAGEIN;
780                                 spin_unlock(&info->lock);
781                         }
782                 }
783         }
784
785         error = inode_change_ok(inode, attr);
786         if (!error)
787                 error = inode_setattr(inode, attr);
788 #ifdef CONFIG_TMPFS_POSIX_ACL
789         if (!error && (attr->ia_valid & ATTR_MODE))
790                 error = generic_acl_chmod(inode, &shmem_acl_ops);
791 #endif
792         if (page)
793                 page_cache_release(page);
794         return error;
795 }
796
797 static void shmem_delete_inode(struct inode *inode)
798 {
799         struct shmem_inode_info *info = SHMEM_I(inode);
800
801         if (inode->i_op->truncate == shmem_truncate) {
802                 truncate_inode_pages(inode->i_mapping, 0);
803                 shmem_unacct_size(info->flags, inode->i_size);
804                 inode->i_size = 0;
805                 shmem_truncate(inode);
806                 if (!list_empty(&info->swaplist)) {
807                         mutex_lock(&shmem_swaplist_mutex);
808                         list_del_init(&info->swaplist);
809                         mutex_unlock(&shmem_swaplist_mutex);
810                 }
811         }
812         BUG_ON(inode->i_blocks);
813         shmem_free_inode(inode->i_sb);
814         clear_inode(inode);
815 }
816
817 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
818 {
819         swp_entry_t *ptr;
820
821         for (ptr = dir; ptr < edir; ptr++) {
822                 if (ptr->val == entry.val)
823                         return ptr - dir;
824         }
825         return -1;
826 }
827
828 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
829 {
830         struct inode *inode;
831         unsigned long idx;
832         unsigned long size;
833         unsigned long limit;
834         unsigned long stage;
835         struct page **dir;
836         struct page *subdir;
837         swp_entry_t *ptr;
838         int offset;
839         int error;
840
841         idx = 0;
842         ptr = info->i_direct;
843         spin_lock(&info->lock);
844         if (!info->swapped) {
845                 list_del_init(&info->swaplist);
846                 goto lost2;
847         }
848         limit = info->next_index;
849         size = limit;
850         if (size > SHMEM_NR_DIRECT)
851                 size = SHMEM_NR_DIRECT;
852         offset = shmem_find_swp(entry, ptr, ptr+size);
853         if (offset >= 0)
854                 goto found;
855         if (!info->i_indirect)
856                 goto lost2;
857
858         dir = shmem_dir_map(info->i_indirect);
859         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
860
861         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
862                 if (unlikely(idx == stage)) {
863                         shmem_dir_unmap(dir-1);
864                         if (cond_resched_lock(&info->lock)) {
865                                 /* check it has not been truncated */
866                                 if (limit > info->next_index) {
867                                         limit = info->next_index;
868                                         if (idx >= limit)
869                                                 goto lost2;
870                                 }
871                         }
872                         dir = shmem_dir_map(info->i_indirect) +
873                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
874                         while (!*dir) {
875                                 dir++;
876                                 idx += ENTRIES_PER_PAGEPAGE;
877                                 if (idx >= limit)
878                                         goto lost1;
879                         }
880                         stage = idx + ENTRIES_PER_PAGEPAGE;
881                         subdir = *dir;
882                         shmem_dir_unmap(dir);
883                         dir = shmem_dir_map(subdir);
884                 }
885                 subdir = *dir;
886                 if (subdir && page_private(subdir)) {
887                         ptr = shmem_swp_map(subdir);
888                         size = limit - idx;
889                         if (size > ENTRIES_PER_PAGE)
890                                 size = ENTRIES_PER_PAGE;
891                         offset = shmem_find_swp(entry, ptr, ptr+size);
892                         shmem_swp_unmap(ptr);
893                         if (offset >= 0) {
894                                 shmem_dir_unmap(dir);
895                                 goto found;
896                         }
897                 }
898         }
899 lost1:
900         shmem_dir_unmap(dir-1);
901 lost2:
902         spin_unlock(&info->lock);
903         return 0;
904 found:
905         idx += offset;
906         inode = igrab(&info->vfs_inode);
907         spin_unlock(&info->lock);
908
909         /*
910          * Move _head_ to start search for next from here.
911          * But be careful: shmem_delete_inode checks list_empty without taking
912          * mutex, and there's an instant in list_move_tail when info->swaplist
913          * would appear empty, if it were the only one on shmem_swaplist.  We
914          * could avoid doing it if inode NULL; or use this minor optimization.
915          */
916         if (shmem_swaplist.next != &info->swaplist)
917                 list_move_tail(&shmem_swaplist, &info->swaplist);
918         mutex_unlock(&shmem_swaplist_mutex);
919
920         error = 1;
921         if (!inode)
922                 goto out;
923         /* Precharge page using GFP_KERNEL while we can wait */
924         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
925         if (error)
926                 goto out;
927         error = radix_tree_preload(GFP_KERNEL);
928         if (error) {
929                 mem_cgroup_uncharge_cache_page(page);
930                 goto out;
931         }
932         error = 1;
933
934         spin_lock(&info->lock);
935         ptr = shmem_swp_entry(info, idx, NULL);
936         if (ptr && ptr->val == entry.val) {
937                 error = add_to_page_cache_locked(page, inode->i_mapping,
938                                                 idx, GFP_NOWAIT);
939                 /* does mem_cgroup_uncharge_cache_page on error */
940         } else  /* we must compensate for our precharge above */
941                 mem_cgroup_uncharge_cache_page(page);
942
943         if (error == -EEXIST) {
944                 struct page *filepage = find_get_page(inode->i_mapping, idx);
945                 error = 1;
946                 if (filepage) {
947                         /*
948                          * There might be a more uptodate page coming down
949                          * from a stacked writepage: forget our swappage if so.
950                          */
951                         if (PageUptodate(filepage))
952                                 error = 0;
953                         page_cache_release(filepage);
954                 }
955         }
956         if (!error) {
957                 delete_from_swap_cache(page);
958                 set_page_dirty(page);
959                 info->flags |= SHMEM_PAGEIN;
960                 shmem_swp_set(info, ptr, 0);
961                 swap_free(entry);
962                 error = 1;      /* not an error, but entry was found */
963         }
964         if (ptr)
965                 shmem_swp_unmap(ptr);
966         spin_unlock(&info->lock);
967         radix_tree_preload_end();
968 out:
969         unlock_page(page);
970         page_cache_release(page);
971         iput(inode);            /* allows for NULL */
972         return error;
973 }
974
975 /*
976  * shmem_unuse() search for an eventually swapped out shmem page.
977  */
978 int shmem_unuse(swp_entry_t entry, struct page *page)
979 {
980         struct list_head *p, *next;
981         struct shmem_inode_info *info;
982         int found = 0;
983
984         mutex_lock(&shmem_swaplist_mutex);
985         list_for_each_safe(p, next, &shmem_swaplist) {
986                 info = list_entry(p, struct shmem_inode_info, swaplist);
987                 found = shmem_unuse_inode(info, entry, page);
988                 cond_resched();
989                 if (found)
990                         goto out;
991         }
992         mutex_unlock(&shmem_swaplist_mutex);
993 out:    return found;   /* 0 or 1 or -ENOMEM */
994 }
995
996 /*
997  * Move the page from the page cache to the swap cache.
998  */
999 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1000 {
1001         struct shmem_inode_info *info;
1002         swp_entry_t *entry, swap;
1003         struct address_space *mapping;
1004         unsigned long index;
1005         struct inode *inode;
1006
1007         BUG_ON(!PageLocked(page));
1008         mapping = page->mapping;
1009         index = page->index;
1010         inode = mapping->host;
1011         info = SHMEM_I(inode);
1012         if (info->flags & VM_LOCKED)
1013                 goto redirty;
1014         if (!total_swap_pages)
1015                 goto redirty;
1016
1017         /*
1018          * shmem_backing_dev_info's capabilities prevent regular writeback or
1019          * sync from ever calling shmem_writepage; but a stacking filesystem
1020          * may use the ->writepage of its underlying filesystem, in which case
1021          * tmpfs should write out to swap only in response to memory pressure,
1022          * and not for pdflush or sync.  However, in those cases, we do still
1023          * want to check if there's a redundant swappage to be discarded.
1024          */
1025         if (wbc->for_reclaim)
1026                 swap = get_swap_page();
1027         else
1028                 swap.val = 0;
1029
1030         spin_lock(&info->lock);
1031         if (index >= info->next_index) {
1032                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1033                 goto unlock;
1034         }
1035         entry = shmem_swp_entry(info, index, NULL);
1036         if (entry->val) {
1037                 /*
1038                  * The more uptodate page coming down from a stacked
1039                  * writepage should replace our old swappage.
1040                  */
1041                 free_swap_and_cache(*entry);
1042                 shmem_swp_set(info, entry, 0);
1043         }
1044         shmem_recalc_inode(inode);
1045
1046         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1047                 remove_from_page_cache(page);
1048                 shmem_swp_set(info, entry, swap.val);
1049                 shmem_swp_unmap(entry);
1050                 if (list_empty(&info->swaplist))
1051                         inode = igrab(inode);
1052                 else
1053                         inode = NULL;
1054                 spin_unlock(&info->lock);
1055                 swap_duplicate(swap);
1056                 BUG_ON(page_mapped(page));
1057                 page_cache_release(page);       /* pagecache ref */
1058                 set_page_dirty(page);
1059                 unlock_page(page);
1060                 if (inode) {
1061                         mutex_lock(&shmem_swaplist_mutex);
1062                         /* move instead of add in case we're racing */
1063                         list_move_tail(&info->swaplist, &shmem_swaplist);
1064                         mutex_unlock(&shmem_swaplist_mutex);
1065                         iput(inode);
1066                 }
1067                 return 0;
1068         }
1069
1070         shmem_swp_unmap(entry);
1071 unlock:
1072         spin_unlock(&info->lock);
1073         swap_free(swap);
1074 redirty:
1075         set_page_dirty(page);
1076         if (wbc->for_reclaim)
1077                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1078         unlock_page(page);
1079         return 0;
1080 }
1081
1082 #ifdef CONFIG_NUMA
1083 #ifdef CONFIG_TMPFS
1084 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1085 {
1086         char buffer[64];
1087
1088         if (!mpol || mpol->mode == MPOL_DEFAULT)
1089                 return;         /* show nothing */
1090
1091         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1092
1093         seq_printf(seq, ",mpol=%s", buffer);
1094 }
1095
1096 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1097 {
1098         struct mempolicy *mpol = NULL;
1099         if (sbinfo->mpol) {
1100                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1101                 mpol = sbinfo->mpol;
1102                 mpol_get(mpol);
1103                 spin_unlock(&sbinfo->stat_lock);
1104         }
1105         return mpol;
1106 }
1107 #endif /* CONFIG_TMPFS */
1108
1109 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1110                         struct shmem_inode_info *info, unsigned long idx)
1111 {
1112         struct mempolicy mpol, *spol;
1113         struct vm_area_struct pvma;
1114         struct page *page;
1115
1116         spol = mpol_cond_copy(&mpol,
1117                                 mpol_shared_policy_lookup(&info->policy, idx));
1118
1119         /* Create a pseudo vma that just contains the policy */
1120         pvma.vm_start = 0;
1121         pvma.vm_pgoff = idx;
1122         pvma.vm_ops = NULL;
1123         pvma.vm_policy = spol;
1124         page = swapin_readahead(entry, gfp, &pvma, 0);
1125         return page;
1126 }
1127
1128 static struct page *shmem_alloc_page(gfp_t gfp,
1129                         struct shmem_inode_info *info, unsigned long idx)
1130 {
1131         struct vm_area_struct pvma;
1132
1133         /* Create a pseudo vma that just contains the policy */
1134         pvma.vm_start = 0;
1135         pvma.vm_pgoff = idx;
1136         pvma.vm_ops = NULL;
1137         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1138
1139         /*
1140          * alloc_page_vma() will drop the shared policy reference
1141          */
1142         return alloc_page_vma(gfp, &pvma, 0);
1143 }
1144 #else /* !CONFIG_NUMA */
1145 #ifdef CONFIG_TMPFS
1146 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1147 {
1148 }
1149 #endif /* CONFIG_TMPFS */
1150
1151 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1152                         struct shmem_inode_info *info, unsigned long idx)
1153 {
1154         return swapin_readahead(entry, gfp, NULL, 0);
1155 }
1156
1157 static inline struct page *shmem_alloc_page(gfp_t gfp,
1158                         struct shmem_inode_info *info, unsigned long idx)
1159 {
1160         return alloc_page(gfp);
1161 }
1162 #endif /* CONFIG_NUMA */
1163
1164 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1165 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1166 {
1167         return NULL;
1168 }
1169 #endif
1170
1171 /*
1172  * shmem_getpage - either get the page from swap or allocate a new one
1173  *
1174  * If we allocate a new one we do not mark it dirty. That's up to the
1175  * vm. If we swap it in we mark it dirty since we also free the swap
1176  * entry since a page cannot live in both the swap and page cache
1177  */
1178 static int shmem_getpage(struct inode *inode, unsigned long idx,
1179                         struct page **pagep, enum sgp_type sgp, int *type)
1180 {
1181         struct address_space *mapping = inode->i_mapping;
1182         struct shmem_inode_info *info = SHMEM_I(inode);
1183         struct shmem_sb_info *sbinfo;
1184         struct page *filepage = *pagep;
1185         struct page *swappage;
1186         swp_entry_t *entry;
1187         swp_entry_t swap;
1188         gfp_t gfp;
1189         int error;
1190
1191         if (idx >= SHMEM_MAX_INDEX)
1192                 return -EFBIG;
1193
1194         if (type)
1195                 *type = 0;
1196
1197         /*
1198          * Normally, filepage is NULL on entry, and either found
1199          * uptodate immediately, or allocated and zeroed, or read
1200          * in under swappage, which is then assigned to filepage.
1201          * But shmem_readpage (required for splice) passes in a locked
1202          * filepage, which may be found not uptodate by other callers
1203          * too, and may need to be copied from the swappage read in.
1204          */
1205 repeat:
1206         if (!filepage)
1207                 filepage = find_lock_page(mapping, idx);
1208         if (filepage && PageUptodate(filepage))
1209                 goto done;
1210         error = 0;
1211         gfp = mapping_gfp_mask(mapping);
1212         if (!filepage) {
1213                 /*
1214                  * Try to preload while we can wait, to not make a habit of
1215                  * draining atomic reserves; but don't latch on to this cpu.
1216                  */
1217                 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1218                 if (error)
1219                         goto failed;
1220                 radix_tree_preload_end();
1221         }
1222
1223         spin_lock(&info->lock);
1224         shmem_recalc_inode(inode);
1225         entry = shmem_swp_alloc(info, idx, sgp);
1226         if (IS_ERR(entry)) {
1227                 spin_unlock(&info->lock);
1228                 error = PTR_ERR(entry);
1229                 goto failed;
1230         }
1231         swap = *entry;
1232
1233         if (swap.val) {
1234                 /* Look it up and read it in.. */
1235                 swappage = lookup_swap_cache(swap);
1236                 if (!swappage) {
1237                         shmem_swp_unmap(entry);
1238                         /* here we actually do the io */
1239                         if (type && !(*type & VM_FAULT_MAJOR)) {
1240                                 __count_vm_event(PGMAJFAULT);
1241                                 *type |= VM_FAULT_MAJOR;
1242                         }
1243                         spin_unlock(&info->lock);
1244                         swappage = shmem_swapin(swap, gfp, info, idx);
1245                         if (!swappage) {
1246                                 spin_lock(&info->lock);
1247                                 entry = shmem_swp_alloc(info, idx, sgp);
1248                                 if (IS_ERR(entry))
1249                                         error = PTR_ERR(entry);
1250                                 else {
1251                                         if (entry->val == swap.val)
1252                                                 error = -ENOMEM;
1253                                         shmem_swp_unmap(entry);
1254                                 }
1255                                 spin_unlock(&info->lock);
1256                                 if (error)
1257                                         goto failed;
1258                                 goto repeat;
1259                         }
1260                         wait_on_page_locked(swappage);
1261                         page_cache_release(swappage);
1262                         goto repeat;
1263                 }
1264
1265                 /* We have to do this with page locked to prevent races */
1266                 if (!trylock_page(swappage)) {
1267                         shmem_swp_unmap(entry);
1268                         spin_unlock(&info->lock);
1269                         wait_on_page_locked(swappage);
1270                         page_cache_release(swappage);
1271                         goto repeat;
1272                 }
1273                 if (PageWriteback(swappage)) {
1274                         shmem_swp_unmap(entry);
1275                         spin_unlock(&info->lock);
1276                         wait_on_page_writeback(swappage);
1277                         unlock_page(swappage);
1278                         page_cache_release(swappage);
1279                         goto repeat;
1280                 }
1281                 if (!PageUptodate(swappage)) {
1282                         shmem_swp_unmap(entry);
1283                         spin_unlock(&info->lock);
1284                         unlock_page(swappage);
1285                         page_cache_release(swappage);
1286                         error = -EIO;
1287                         goto failed;
1288                 }
1289
1290                 if (filepage) {
1291                         shmem_swp_set(info, entry, 0);
1292                         shmem_swp_unmap(entry);
1293                         delete_from_swap_cache(swappage);
1294                         spin_unlock(&info->lock);
1295                         copy_highpage(filepage, swappage);
1296                         unlock_page(swappage);
1297                         page_cache_release(swappage);
1298                         flush_dcache_page(filepage);
1299                         SetPageUptodate(filepage);
1300                         set_page_dirty(filepage);
1301                         swap_free(swap);
1302                 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1303                                         idx, GFP_NOWAIT))) {
1304                         info->flags |= SHMEM_PAGEIN;
1305                         shmem_swp_set(info, entry, 0);
1306                         shmem_swp_unmap(entry);
1307                         delete_from_swap_cache(swappage);
1308                         spin_unlock(&info->lock);
1309                         filepage = swappage;
1310                         set_page_dirty(filepage);
1311                         swap_free(swap);
1312                 } else {
1313                         shmem_swp_unmap(entry);
1314                         spin_unlock(&info->lock);
1315                         unlock_page(swappage);
1316                         page_cache_release(swappage);
1317                         if (error == -ENOMEM) {
1318                                 /* allow reclaim from this memory cgroup */
1319                                 error = mem_cgroup_shrink_usage(current->mm,
1320                                                                 gfp);
1321                                 if (error)
1322                                         goto failed;
1323                         }
1324                         goto repeat;
1325                 }
1326         } else if (sgp == SGP_READ && !filepage) {
1327                 shmem_swp_unmap(entry);
1328                 filepage = find_get_page(mapping, idx);
1329                 if (filepage &&
1330                     (!PageUptodate(filepage) || !trylock_page(filepage))) {
1331                         spin_unlock(&info->lock);
1332                         wait_on_page_locked(filepage);
1333                         page_cache_release(filepage);
1334                         filepage = NULL;
1335                         goto repeat;
1336                 }
1337                 spin_unlock(&info->lock);
1338         } else {
1339                 shmem_swp_unmap(entry);
1340                 sbinfo = SHMEM_SB(inode->i_sb);
1341                 if (sbinfo->max_blocks) {
1342                         spin_lock(&sbinfo->stat_lock);
1343                         if (sbinfo->free_blocks == 0 ||
1344                             shmem_acct_block(info->flags)) {
1345                                 spin_unlock(&sbinfo->stat_lock);
1346                                 spin_unlock(&info->lock);
1347                                 error = -ENOSPC;
1348                                 goto failed;
1349                         }
1350                         sbinfo->free_blocks--;
1351                         inode->i_blocks += BLOCKS_PER_PAGE;
1352                         spin_unlock(&sbinfo->stat_lock);
1353                 } else if (shmem_acct_block(info->flags)) {
1354                         spin_unlock(&info->lock);
1355                         error = -ENOSPC;
1356                         goto failed;
1357                 }
1358
1359                 if (!filepage) {
1360                         int ret;
1361
1362                         spin_unlock(&info->lock);
1363                         filepage = shmem_alloc_page(gfp, info, idx);
1364                         if (!filepage) {
1365                                 shmem_unacct_blocks(info->flags, 1);
1366                                 shmem_free_blocks(inode, 1);
1367                                 error = -ENOMEM;
1368                                 goto failed;
1369                         }
1370                         SetPageSwapBacked(filepage);
1371
1372                         /* Precharge page while we can wait, compensate after */
1373                         error = mem_cgroup_cache_charge(filepage, current->mm,
1374                                                         gfp & ~__GFP_HIGHMEM);
1375                         if (error) {
1376                                 page_cache_release(filepage);
1377                                 shmem_unacct_blocks(info->flags, 1);
1378                                 shmem_free_blocks(inode, 1);
1379                                 filepage = NULL;
1380                                 goto failed;
1381                         }
1382
1383                         spin_lock(&info->lock);
1384                         entry = shmem_swp_alloc(info, idx, sgp);
1385                         if (IS_ERR(entry))
1386                                 error = PTR_ERR(entry);
1387                         else {
1388                                 swap = *entry;
1389                                 shmem_swp_unmap(entry);
1390                         }
1391                         ret = error || swap.val;
1392                         if (ret)
1393                                 mem_cgroup_uncharge_cache_page(filepage);
1394                         else
1395                                 ret = add_to_page_cache_lru(filepage, mapping,
1396                                                 idx, GFP_NOWAIT);
1397                         /*
1398                          * At add_to_page_cache_lru() failure, uncharge will
1399                          * be done automatically.
1400                          */
1401                         if (ret) {
1402                                 spin_unlock(&info->lock);
1403                                 page_cache_release(filepage);
1404                                 shmem_unacct_blocks(info->flags, 1);
1405                                 shmem_free_blocks(inode, 1);
1406                                 filepage = NULL;
1407                                 if (error)
1408                                         goto failed;
1409                                 goto repeat;
1410                         }
1411                         info->flags |= SHMEM_PAGEIN;
1412                 }
1413
1414                 info->alloced++;
1415                 spin_unlock(&info->lock);
1416                 clear_highpage(filepage);
1417                 flush_dcache_page(filepage);
1418                 SetPageUptodate(filepage);
1419                 if (sgp == SGP_DIRTY)
1420                         set_page_dirty(filepage);
1421         }
1422 done:
1423         *pagep = filepage;
1424         return 0;
1425
1426 failed:
1427         if (*pagep != filepage) {
1428                 unlock_page(filepage);
1429                 page_cache_release(filepage);
1430         }
1431         return error;
1432 }
1433
1434 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1435 {
1436         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1437         int error;
1438         int ret;
1439
1440         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1441                 return VM_FAULT_SIGBUS;
1442
1443         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1444         if (error)
1445                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1446
1447         return ret | VM_FAULT_LOCKED;
1448 }
1449
1450 #ifdef CONFIG_NUMA
1451 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1452 {
1453         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1454         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1455 }
1456
1457 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1458                                           unsigned long addr)
1459 {
1460         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1461         unsigned long idx;
1462
1463         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1464         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1465 }
1466 #endif
1467
1468 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1469 {
1470         struct inode *inode = file->f_path.dentry->d_inode;
1471         struct shmem_inode_info *info = SHMEM_I(inode);
1472         int retval = -ENOMEM;
1473
1474         spin_lock(&info->lock);
1475         if (lock && !(info->flags & VM_LOCKED)) {
1476                 if (!user_shm_lock(inode->i_size, user))
1477                         goto out_nomem;
1478                 info->flags |= VM_LOCKED;
1479                 mapping_set_unevictable(file->f_mapping);
1480         }
1481         if (!lock && (info->flags & VM_LOCKED) && user) {
1482                 user_shm_unlock(inode->i_size, user);
1483                 info->flags &= ~VM_LOCKED;
1484                 mapping_clear_unevictable(file->f_mapping);
1485                 scan_mapping_unevictable_pages(file->f_mapping);
1486         }
1487         retval = 0;
1488
1489 out_nomem:
1490         spin_unlock(&info->lock);
1491         return retval;
1492 }
1493
1494 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1495 {
1496         file_accessed(file);
1497         vma->vm_ops = &shmem_vm_ops;
1498         vma->vm_flags |= VM_CAN_NONLINEAR;
1499         return 0;
1500 }
1501
1502 static struct inode *
1503 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1504 {
1505         struct inode *inode;
1506         struct shmem_inode_info *info;
1507         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1508
1509         if (shmem_reserve_inode(sb))
1510                 return NULL;
1511
1512         inode = new_inode(sb);
1513         if (inode) {
1514                 inode->i_mode = mode;
1515                 inode->i_uid = current_fsuid();
1516                 inode->i_gid = current_fsgid();
1517                 inode->i_blocks = 0;
1518                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1519                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1520                 inode->i_generation = get_seconds();
1521                 info = SHMEM_I(inode);
1522                 memset(info, 0, (char *)inode - (char *)info);
1523                 spin_lock_init(&info->lock);
1524                 INIT_LIST_HEAD(&info->swaplist);
1525
1526                 switch (mode & S_IFMT) {
1527                 default:
1528                         inode->i_op = &shmem_special_inode_operations;
1529                         init_special_inode(inode, mode, dev);
1530                         break;
1531                 case S_IFREG:
1532                         inode->i_mapping->a_ops = &shmem_aops;
1533                         inode->i_op = &shmem_inode_operations;
1534                         inode->i_fop = &shmem_file_operations;
1535                         mpol_shared_policy_init(&info->policy,
1536                                                  shmem_get_sbmpol(sbinfo));
1537                         break;
1538                 case S_IFDIR:
1539                         inc_nlink(inode);
1540                         /* Some things misbehave if size == 0 on a directory */
1541                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1542                         inode->i_op = &shmem_dir_inode_operations;
1543                         inode->i_fop = &simple_dir_operations;
1544                         break;
1545                 case S_IFLNK:
1546                         /*
1547                          * Must not load anything in the rbtree,
1548                          * mpol_free_shared_policy will not be called.
1549                          */
1550                         mpol_shared_policy_init(&info->policy, NULL);
1551                         break;
1552                 }
1553         } else
1554                 shmem_free_inode(sb);
1555         return inode;
1556 }
1557
1558 #ifdef CONFIG_TMPFS
1559 static const struct inode_operations shmem_symlink_inode_operations;
1560 static const struct inode_operations shmem_symlink_inline_operations;
1561
1562 /*
1563  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1564  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1565  * below the loop driver, in the generic fashion that many filesystems support.
1566  */
1567 static int shmem_readpage(struct file *file, struct page *page)
1568 {
1569         struct inode *inode = page->mapping->host;
1570         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1571         unlock_page(page);
1572         return error;
1573 }
1574
1575 static int
1576 shmem_write_begin(struct file *file, struct address_space *mapping,
1577                         loff_t pos, unsigned len, unsigned flags,
1578                         struct page **pagep, void **fsdata)
1579 {
1580         struct inode *inode = mapping->host;
1581         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1582         *pagep = NULL;
1583         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1584 }
1585
1586 static int
1587 shmem_write_end(struct file *file, struct address_space *mapping,
1588                         loff_t pos, unsigned len, unsigned copied,
1589                         struct page *page, void *fsdata)
1590 {
1591         struct inode *inode = mapping->host;
1592
1593         if (pos + copied > inode->i_size)
1594                 i_size_write(inode, pos + copied);
1595
1596         unlock_page(page);
1597         set_page_dirty(page);
1598         page_cache_release(page);
1599
1600         return copied;
1601 }
1602
1603 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1604 {
1605         struct inode *inode = filp->f_path.dentry->d_inode;
1606         struct address_space *mapping = inode->i_mapping;
1607         unsigned long index, offset;
1608         enum sgp_type sgp = SGP_READ;
1609
1610         /*
1611          * Might this read be for a stacking filesystem?  Then when reading
1612          * holes of a sparse file, we actually need to allocate those pages,
1613          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1614          */
1615         if (segment_eq(get_fs(), KERNEL_DS))
1616                 sgp = SGP_DIRTY;
1617
1618         index = *ppos >> PAGE_CACHE_SHIFT;
1619         offset = *ppos & ~PAGE_CACHE_MASK;
1620
1621         for (;;) {
1622                 struct page *page = NULL;
1623                 unsigned long end_index, nr, ret;
1624                 loff_t i_size = i_size_read(inode);
1625
1626                 end_index = i_size >> PAGE_CACHE_SHIFT;
1627                 if (index > end_index)
1628                         break;
1629                 if (index == end_index) {
1630                         nr = i_size & ~PAGE_CACHE_MASK;
1631                         if (nr <= offset)
1632                                 break;
1633                 }
1634
1635                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1636                 if (desc->error) {
1637                         if (desc->error == -EINVAL)
1638                                 desc->error = 0;
1639                         break;
1640                 }
1641                 if (page)
1642                         unlock_page(page);
1643
1644                 /*
1645                  * We must evaluate after, since reads (unlike writes)
1646                  * are called without i_mutex protection against truncate
1647                  */
1648                 nr = PAGE_CACHE_SIZE;
1649                 i_size = i_size_read(inode);
1650                 end_index = i_size >> PAGE_CACHE_SHIFT;
1651                 if (index == end_index) {
1652                         nr = i_size & ~PAGE_CACHE_MASK;
1653                         if (nr <= offset) {
1654                                 if (page)
1655                                         page_cache_release(page);
1656                                 break;
1657                         }
1658                 }
1659                 nr -= offset;
1660
1661                 if (page) {
1662                         /*
1663                          * If users can be writing to this page using arbitrary
1664                          * virtual addresses, take care about potential aliasing
1665                          * before reading the page on the kernel side.
1666                          */
1667                         if (mapping_writably_mapped(mapping))
1668                                 flush_dcache_page(page);
1669                         /*
1670                          * Mark the page accessed if we read the beginning.
1671                          */
1672                         if (!offset)
1673                                 mark_page_accessed(page);
1674                 } else {
1675                         page = ZERO_PAGE(0);
1676                         page_cache_get(page);
1677                 }
1678
1679                 /*
1680                  * Ok, we have the page, and it's up-to-date, so
1681                  * now we can copy it to user space...
1682                  *
1683                  * The actor routine returns how many bytes were actually used..
1684                  * NOTE! This may not be the same as how much of a user buffer
1685                  * we filled up (we may be padding etc), so we can only update
1686                  * "pos" here (the actor routine has to update the user buffer
1687                  * pointers and the remaining count).
1688                  */
1689                 ret = actor(desc, page, offset, nr);
1690                 offset += ret;
1691                 index += offset >> PAGE_CACHE_SHIFT;
1692                 offset &= ~PAGE_CACHE_MASK;
1693
1694                 page_cache_release(page);
1695                 if (ret != nr || !desc->count)
1696                         break;
1697
1698                 cond_resched();
1699         }
1700
1701         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1702         file_accessed(filp);
1703 }
1704
1705 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1706                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1707 {
1708         struct file *filp = iocb->ki_filp;
1709         ssize_t retval;
1710         unsigned long seg;
1711         size_t count;
1712         loff_t *ppos = &iocb->ki_pos;
1713
1714         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1715         if (retval)
1716                 return retval;
1717
1718         for (seg = 0; seg < nr_segs; seg++) {
1719                 read_descriptor_t desc;
1720
1721                 desc.written = 0;
1722                 desc.arg.buf = iov[seg].iov_base;
1723                 desc.count = iov[seg].iov_len;
1724                 if (desc.count == 0)
1725                         continue;
1726                 desc.error = 0;
1727                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1728                 retval += desc.written;
1729                 if (desc.error) {
1730                         retval = retval ?: desc.error;
1731                         break;
1732                 }
1733                 if (desc.count > 0)
1734                         break;
1735         }
1736         return retval;
1737 }
1738
1739 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1740 {
1741         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1742
1743         buf->f_type = TMPFS_MAGIC;
1744         buf->f_bsize = PAGE_CACHE_SIZE;
1745         buf->f_namelen = NAME_MAX;
1746         spin_lock(&sbinfo->stat_lock);
1747         if (sbinfo->max_blocks) {
1748                 buf->f_blocks = sbinfo->max_blocks;
1749                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1750         }
1751         if (sbinfo->max_inodes) {
1752                 buf->f_files = sbinfo->max_inodes;
1753                 buf->f_ffree = sbinfo->free_inodes;
1754         }
1755         /* else leave those fields 0 like simple_statfs */
1756         spin_unlock(&sbinfo->stat_lock);
1757         return 0;
1758 }
1759
1760 /*
1761  * File creation. Allocate an inode, and we're done..
1762  */
1763 static int
1764 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1765 {
1766         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1767         int error = -ENOSPC;
1768
1769         if (inode) {
1770                 error = security_inode_init_security(inode, dir, NULL, NULL,
1771                                                      NULL);
1772                 if (error) {
1773                         if (error != -EOPNOTSUPP) {
1774                                 iput(inode);
1775                                 return error;
1776                         }
1777                 }
1778                 error = shmem_acl_init(inode, dir);
1779                 if (error) {
1780                         iput(inode);
1781                         return error;
1782                 }
1783                 if (dir->i_mode & S_ISGID) {
1784                         inode->i_gid = dir->i_gid;
1785                         if (S_ISDIR(mode))
1786                                 inode->i_mode |= S_ISGID;
1787                 }
1788                 dir->i_size += BOGO_DIRENT_SIZE;
1789                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1790                 d_instantiate(dentry, inode);
1791                 dget(dentry); /* Extra count - pin the dentry in core */
1792         }
1793         return error;
1794 }
1795
1796 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1797 {
1798         int error;
1799
1800         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1801                 return error;
1802         inc_nlink(dir);
1803         return 0;
1804 }
1805
1806 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1807                 struct nameidata *nd)
1808 {
1809         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1810 }
1811
1812 /*
1813  * Link a file..
1814  */
1815 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1816 {
1817         struct inode *inode = old_dentry->d_inode;
1818         int ret;
1819
1820         /*
1821          * No ordinary (disk based) filesystem counts links as inodes;
1822          * but each new link needs a new dentry, pinning lowmem, and
1823          * tmpfs dentries cannot be pruned until they are unlinked.
1824          */
1825         ret = shmem_reserve_inode(inode->i_sb);
1826         if (ret)
1827                 goto out;
1828
1829         dir->i_size += BOGO_DIRENT_SIZE;
1830         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1831         inc_nlink(inode);
1832         atomic_inc(&inode->i_count);    /* New dentry reference */
1833         dget(dentry);           /* Extra pinning count for the created dentry */
1834         d_instantiate(dentry, inode);
1835 out:
1836         return ret;
1837 }
1838
1839 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1840 {
1841         struct inode *inode = dentry->d_inode;
1842
1843         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1844                 shmem_free_inode(inode->i_sb);
1845
1846         dir->i_size -= BOGO_DIRENT_SIZE;
1847         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1848         drop_nlink(inode);
1849         dput(dentry);   /* Undo the count from "create" - this does all the work */
1850         return 0;
1851 }
1852
1853 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1854 {
1855         if (!simple_empty(dentry))
1856                 return -ENOTEMPTY;
1857
1858         drop_nlink(dentry->d_inode);
1859         drop_nlink(dir);
1860         return shmem_unlink(dir, dentry);
1861 }
1862
1863 /*
1864  * The VFS layer already does all the dentry stuff for rename,
1865  * we just have to decrement the usage count for the target if
1866  * it exists so that the VFS layer correctly free's it when it
1867  * gets overwritten.
1868  */
1869 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1870 {
1871         struct inode *inode = old_dentry->d_inode;
1872         int they_are_dirs = S_ISDIR(inode->i_mode);
1873
1874         if (!simple_empty(new_dentry))
1875                 return -ENOTEMPTY;
1876
1877         if (new_dentry->d_inode) {
1878                 (void) shmem_unlink(new_dir, new_dentry);
1879                 if (they_are_dirs)
1880                         drop_nlink(old_dir);
1881         } else if (they_are_dirs) {
1882                 drop_nlink(old_dir);
1883                 inc_nlink(new_dir);
1884         }
1885
1886         old_dir->i_size -= BOGO_DIRENT_SIZE;
1887         new_dir->i_size += BOGO_DIRENT_SIZE;
1888         old_dir->i_ctime = old_dir->i_mtime =
1889         new_dir->i_ctime = new_dir->i_mtime =
1890         inode->i_ctime = CURRENT_TIME;
1891         return 0;
1892 }
1893
1894 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1895 {
1896         int error;
1897         int len;
1898         struct inode *inode;
1899         struct page *page = NULL;
1900         char *kaddr;
1901         struct shmem_inode_info *info;
1902
1903         len = strlen(symname) + 1;
1904         if (len > PAGE_CACHE_SIZE)
1905                 return -ENAMETOOLONG;
1906
1907         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1908         if (!inode)
1909                 return -ENOSPC;
1910
1911         error = security_inode_init_security(inode, dir, NULL, NULL,
1912                                              NULL);
1913         if (error) {
1914                 if (error != -EOPNOTSUPP) {
1915                         iput(inode);
1916                         return error;
1917                 }
1918                 error = 0;
1919         }
1920
1921         info = SHMEM_I(inode);
1922         inode->i_size = len-1;
1923         if (len <= (char *)inode - (char *)info) {
1924                 /* do it inline */
1925                 memcpy(info, symname, len);
1926                 inode->i_op = &shmem_symlink_inline_operations;
1927         } else {
1928                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1929                 if (error) {
1930                         iput(inode);
1931                         return error;
1932                 }
1933                 unlock_page(page);
1934                 inode->i_mapping->a_ops = &shmem_aops;
1935                 inode->i_op = &shmem_symlink_inode_operations;
1936                 kaddr = kmap_atomic(page, KM_USER0);
1937                 memcpy(kaddr, symname, len);
1938                 kunmap_atomic(kaddr, KM_USER0);
1939                 set_page_dirty(page);
1940                 page_cache_release(page);
1941         }
1942         if (dir->i_mode & S_ISGID)
1943                 inode->i_gid = dir->i_gid;
1944         dir->i_size += BOGO_DIRENT_SIZE;
1945         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1946         d_instantiate(dentry, inode);
1947         dget(dentry);
1948         return 0;
1949 }
1950
1951 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1952 {
1953         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1954         return NULL;
1955 }
1956
1957 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1958 {
1959         struct page *page = NULL;
1960         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1961         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1962         if (page)
1963                 unlock_page(page);
1964         return page;
1965 }
1966
1967 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1968 {
1969         if (!IS_ERR(nd_get_link(nd))) {
1970                 struct page *page = cookie;
1971                 kunmap(page);
1972                 mark_page_accessed(page);
1973                 page_cache_release(page);
1974         }
1975 }
1976
1977 static const struct inode_operations shmem_symlink_inline_operations = {
1978         .readlink       = generic_readlink,
1979         .follow_link    = shmem_follow_link_inline,
1980 };
1981
1982 static const struct inode_operations shmem_symlink_inode_operations = {
1983         .truncate       = shmem_truncate,
1984         .readlink       = generic_readlink,
1985         .follow_link    = shmem_follow_link,
1986         .put_link       = shmem_put_link,
1987 };
1988
1989 #ifdef CONFIG_TMPFS_POSIX_ACL
1990 /*
1991  * Superblocks without xattr inode operations will get security.* xattr
1992  * support from the VFS "for free". As soon as we have any other xattrs
1993  * like ACLs, we also need to implement the security.* handlers at
1994  * filesystem level, though.
1995  */
1996
1997 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1998                                         size_t list_len, const char *name,
1999                                         size_t name_len)
2000 {
2001         return security_inode_listsecurity(inode, list, list_len);
2002 }
2003
2004 static int shmem_xattr_security_get(struct inode *inode, const char *name,
2005                                     void *buffer, size_t size)
2006 {
2007         if (strcmp(name, "") == 0)
2008                 return -EINVAL;
2009         return xattr_getsecurity(inode, name, buffer, size);
2010 }
2011
2012 static int shmem_xattr_security_set(struct inode *inode, const char *name,
2013                                     const void *value, size_t size, int flags)
2014 {
2015         if (strcmp(name, "") == 0)
2016                 return -EINVAL;
2017         return security_inode_setsecurity(inode, name, value, size, flags);
2018 }
2019
2020 static struct xattr_handler shmem_xattr_security_handler = {
2021         .prefix = XATTR_SECURITY_PREFIX,
2022         .list   = shmem_xattr_security_list,
2023         .get    = shmem_xattr_security_get,
2024         .set    = shmem_xattr_security_set,
2025 };
2026
2027 static struct xattr_handler *shmem_xattr_handlers[] = {
2028         &shmem_xattr_acl_access_handler,
2029         &shmem_xattr_acl_default_handler,
2030         &shmem_xattr_security_handler,
2031         NULL
2032 };
2033 #endif
2034
2035 static struct dentry *shmem_get_parent(struct dentry *child)
2036 {
2037         return ERR_PTR(-ESTALE);
2038 }
2039
2040 static int shmem_match(struct inode *ino, void *vfh)
2041 {
2042         __u32 *fh = vfh;
2043         __u64 inum = fh[2];
2044         inum = (inum << 32) | fh[1];
2045         return ino->i_ino == inum && fh[0] == ino->i_generation;
2046 }
2047
2048 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2049                 struct fid *fid, int fh_len, int fh_type)
2050 {
2051         struct inode *inode;
2052         struct dentry *dentry = NULL;
2053         u64 inum = fid->raw[2];
2054         inum = (inum << 32) | fid->raw[1];
2055
2056         if (fh_len < 3)
2057                 return NULL;
2058
2059         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2060                         shmem_match, fid->raw);
2061         if (inode) {
2062                 dentry = d_find_alias(inode);
2063                 iput(inode);
2064         }
2065
2066         return dentry;
2067 }
2068
2069 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2070                                 int connectable)
2071 {
2072         struct inode *inode = dentry->d_inode;
2073
2074         if (*len < 3)
2075                 return 255;
2076
2077         if (hlist_unhashed(&inode->i_hash)) {
2078                 /* Unfortunately insert_inode_hash is not idempotent,
2079                  * so as we hash inodes here rather than at creation
2080                  * time, we need a lock to ensure we only try
2081                  * to do it once
2082                  */
2083                 static DEFINE_SPINLOCK(lock);
2084                 spin_lock(&lock);
2085                 if (hlist_unhashed(&inode->i_hash))
2086                         __insert_inode_hash(inode,
2087                                             inode->i_ino + inode->i_generation);
2088                 spin_unlock(&lock);
2089         }
2090
2091         fh[0] = inode->i_generation;
2092         fh[1] = inode->i_ino;
2093         fh[2] = ((__u64)inode->i_ino) >> 32;
2094
2095         *len = 3;
2096         return 1;
2097 }
2098
2099 static const struct export_operations shmem_export_ops = {
2100         .get_parent     = shmem_get_parent,
2101         .encode_fh      = shmem_encode_fh,
2102         .fh_to_dentry   = shmem_fh_to_dentry,
2103 };
2104
2105 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2106                                bool remount)
2107 {
2108         char *this_char, *value, *rest;
2109
2110         while (options != NULL) {
2111                 this_char = options;
2112                 for (;;) {
2113                         /*
2114                          * NUL-terminate this option: unfortunately,
2115                          * mount options form a comma-separated list,
2116                          * but mpol's nodelist may also contain commas.
2117                          */
2118                         options = strchr(options, ',');
2119                         if (options == NULL)
2120                                 break;
2121                         options++;
2122                         if (!isdigit(*options)) {
2123                                 options[-1] = '\0';
2124                                 break;
2125                         }
2126                 }
2127                 if (!*this_char)
2128                         continue;
2129                 if ((value = strchr(this_char,'=')) != NULL) {
2130                         *value++ = 0;
2131                 } else {
2132                         printk(KERN_ERR
2133                             "tmpfs: No value for mount option '%s'\n",
2134                             this_char);
2135                         return 1;
2136                 }
2137
2138                 if (!strcmp(this_char,"size")) {
2139                         unsigned long long size;
2140                         size = memparse(value,&rest);
2141                         if (*rest == '%') {
2142                                 size <<= PAGE_SHIFT;
2143                                 size *= totalram_pages;
2144                                 do_div(size, 100);
2145                                 rest++;
2146                         }
2147                         if (*rest)
2148                                 goto bad_val;
2149                         sbinfo->max_blocks =
2150                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2151                 } else if (!strcmp(this_char,"nr_blocks")) {
2152                         sbinfo->max_blocks = memparse(value, &rest);
2153                         if (*rest)
2154                                 goto bad_val;
2155                 } else if (!strcmp(this_char,"nr_inodes")) {
2156                         sbinfo->max_inodes = memparse(value, &rest);
2157                         if (*rest)
2158                                 goto bad_val;
2159                 } else if (!strcmp(this_char,"mode")) {
2160                         if (remount)
2161                                 continue;
2162                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2163                         if (*rest)
2164                                 goto bad_val;
2165                 } else if (!strcmp(this_char,"uid")) {
2166                         if (remount)
2167                                 continue;
2168                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2169                         if (*rest)
2170                                 goto bad_val;
2171                 } else if (!strcmp(this_char,"gid")) {
2172                         if (remount)
2173                                 continue;
2174                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2175                         if (*rest)
2176                                 goto bad_val;
2177                 } else if (!strcmp(this_char,"mpol")) {
2178                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2179                                 goto bad_val;
2180                 } else {
2181                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2182                                this_char);
2183                         return 1;
2184                 }
2185         }
2186         return 0;
2187
2188 bad_val:
2189         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2190                value, this_char);
2191         return 1;
2192
2193 }
2194
2195 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2196 {
2197         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2198         struct shmem_sb_info config = *sbinfo;
2199         unsigned long blocks;
2200         unsigned long inodes;
2201         int error = -EINVAL;
2202
2203         if (shmem_parse_options(data, &config, true))
2204                 return error;
2205
2206         spin_lock(&sbinfo->stat_lock);
2207         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2208         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2209         if (config.max_blocks < blocks)
2210                 goto out;
2211         if (config.max_inodes < inodes)
2212                 goto out;
2213         /*
2214          * Those tests also disallow limited->unlimited while any are in
2215          * use, so i_blocks will always be zero when max_blocks is zero;
2216          * but we must separately disallow unlimited->limited, because
2217          * in that case we have no record of how much is already in use.
2218          */
2219         if (config.max_blocks && !sbinfo->max_blocks)
2220                 goto out;
2221         if (config.max_inodes && !sbinfo->max_inodes)
2222                 goto out;
2223
2224         error = 0;
2225         sbinfo->max_blocks  = config.max_blocks;
2226         sbinfo->free_blocks = config.max_blocks - blocks;
2227         sbinfo->max_inodes  = config.max_inodes;
2228         sbinfo->free_inodes = config.max_inodes - inodes;
2229
2230         mpol_put(sbinfo->mpol);
2231         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2232 out:
2233         spin_unlock(&sbinfo->stat_lock);
2234         return error;
2235 }
2236
2237 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2238 {
2239         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2240
2241         if (sbinfo->max_blocks != shmem_default_max_blocks())
2242                 seq_printf(seq, ",size=%luk",
2243                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2244         if (sbinfo->max_inodes != shmem_default_max_inodes())
2245                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2246         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2247                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2248         if (sbinfo->uid != 0)
2249                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2250         if (sbinfo->gid != 0)
2251                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2252         shmem_show_mpol(seq, sbinfo->mpol);
2253         return 0;
2254 }
2255 #endif /* CONFIG_TMPFS */
2256
2257 static void shmem_put_super(struct super_block *sb)
2258 {
2259         kfree(sb->s_fs_info);
2260         sb->s_fs_info = NULL;
2261 }
2262
2263 static int shmem_fill_super(struct super_block *sb,
2264                             void *data, int silent)
2265 {
2266         struct inode *inode;
2267         struct dentry *root;
2268         struct shmem_sb_info *sbinfo;
2269         int err = -ENOMEM;
2270
2271         /* Round up to L1_CACHE_BYTES to resist false sharing */
2272         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2273                                 L1_CACHE_BYTES), GFP_KERNEL);
2274         if (!sbinfo)
2275                 return -ENOMEM;
2276
2277         sbinfo->max_blocks = 0;
2278         sbinfo->max_inodes = 0;
2279         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2280         sbinfo->uid = current_fsuid();
2281         sbinfo->gid = current_fsgid();
2282         sbinfo->mpol = NULL;
2283         sb->s_fs_info = sbinfo;
2284
2285 #ifdef CONFIG_TMPFS
2286         /*
2287          * Per default we only allow half of the physical ram per
2288          * tmpfs instance, limiting inodes to one per page of lowmem;
2289          * but the internal instance is left unlimited.
2290          */
2291         if (!(sb->s_flags & MS_NOUSER)) {
2292                 sbinfo->max_blocks = shmem_default_max_blocks();
2293                 sbinfo->max_inodes = shmem_default_max_inodes();
2294                 if (shmem_parse_options(data, sbinfo, false)) {
2295                         err = -EINVAL;
2296                         goto failed;
2297                 }
2298         }
2299         sb->s_export_op = &shmem_export_ops;
2300 #else
2301         sb->s_flags |= MS_NOUSER;
2302 #endif
2303
2304         spin_lock_init(&sbinfo->stat_lock);
2305         sbinfo->free_blocks = sbinfo->max_blocks;
2306         sbinfo->free_inodes = sbinfo->max_inodes;
2307
2308         sb->s_maxbytes = SHMEM_MAX_BYTES;
2309         sb->s_blocksize = PAGE_CACHE_SIZE;
2310         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2311         sb->s_magic = TMPFS_MAGIC;
2312         sb->s_op = &shmem_ops;
2313         sb->s_time_gran = 1;
2314 #ifdef CONFIG_TMPFS_POSIX_ACL
2315         sb->s_xattr = shmem_xattr_handlers;
2316         sb->s_flags |= MS_POSIXACL;
2317 #endif
2318
2319         inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
2320         if (!inode)
2321                 goto failed;
2322         inode->i_uid = sbinfo->uid;
2323         inode->i_gid = sbinfo->gid;
2324         root = d_alloc_root(inode);
2325         if (!root)
2326                 goto failed_iput;
2327         sb->s_root = root;
2328         return 0;
2329
2330 failed_iput:
2331         iput(inode);
2332 failed:
2333         shmem_put_super(sb);
2334         return err;
2335 }
2336
2337 static struct kmem_cache *shmem_inode_cachep;
2338
2339 static struct inode *shmem_alloc_inode(struct super_block *sb)
2340 {
2341         struct shmem_inode_info *p;
2342         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2343         if (!p)
2344                 return NULL;
2345         return &p->vfs_inode;
2346 }
2347
2348 static void shmem_destroy_inode(struct inode *inode)
2349 {
2350         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2351                 /* only struct inode is valid if it's an inline symlink */
2352                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2353         }
2354         shmem_acl_destroy_inode(inode);
2355         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2356 }
2357
2358 static void init_once(void *foo)
2359 {
2360         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2361
2362         inode_init_once(&p->vfs_inode);
2363 #ifdef CONFIG_TMPFS_POSIX_ACL
2364         p->i_acl = NULL;
2365         p->i_default_acl = NULL;
2366 #endif
2367 }
2368
2369 static int init_inodecache(void)
2370 {
2371         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2372                                 sizeof(struct shmem_inode_info),
2373                                 0, SLAB_PANIC, init_once);
2374         return 0;
2375 }
2376
2377 static void destroy_inodecache(void)
2378 {
2379         kmem_cache_destroy(shmem_inode_cachep);
2380 }
2381
2382 static const struct address_space_operations shmem_aops = {
2383         .writepage      = shmem_writepage,
2384         .set_page_dirty = __set_page_dirty_no_writeback,
2385 #ifdef CONFIG_TMPFS
2386         .readpage       = shmem_readpage,
2387         .write_begin    = shmem_write_begin,
2388         .write_end      = shmem_write_end,
2389 #endif
2390         .migratepage    = migrate_page,
2391 };
2392
2393 static const struct file_operations shmem_file_operations = {
2394         .mmap           = shmem_mmap,
2395 #ifdef CONFIG_TMPFS
2396         .llseek         = generic_file_llseek,
2397         .read           = do_sync_read,
2398         .write          = do_sync_write,
2399         .aio_read       = shmem_file_aio_read,
2400         .aio_write      = generic_file_aio_write,
2401         .fsync          = simple_sync_file,
2402         .splice_read    = generic_file_splice_read,
2403         .splice_write   = generic_file_splice_write,
2404 #endif
2405 };
2406
2407 static const struct inode_operations shmem_inode_operations = {
2408         .truncate       = shmem_truncate,
2409         .setattr        = shmem_notify_change,
2410         .truncate_range = shmem_truncate_range,
2411 #ifdef CONFIG_TMPFS_POSIX_ACL
2412         .setxattr       = generic_setxattr,
2413         .getxattr       = generic_getxattr,
2414         .listxattr      = generic_listxattr,
2415         .removexattr    = generic_removexattr,
2416         .permission     = shmem_permission,
2417 #endif
2418
2419 };
2420
2421 static const struct inode_operations shmem_dir_inode_operations = {
2422 #ifdef CONFIG_TMPFS
2423         .create         = shmem_create,
2424         .lookup         = simple_lookup,
2425         .link           = shmem_link,
2426         .unlink         = shmem_unlink,
2427         .symlink        = shmem_symlink,
2428         .mkdir          = shmem_mkdir,
2429         .rmdir          = shmem_rmdir,
2430         .mknod          = shmem_mknod,
2431         .rename         = shmem_rename,
2432 #endif
2433 #ifdef CONFIG_TMPFS_POSIX_ACL
2434         .setattr        = shmem_notify_change,
2435         .setxattr       = generic_setxattr,
2436         .getxattr       = generic_getxattr,
2437         .listxattr      = generic_listxattr,
2438         .removexattr    = generic_removexattr,
2439         .permission     = shmem_permission,
2440 #endif
2441 };
2442
2443 static const struct inode_operations shmem_special_inode_operations = {
2444 #ifdef CONFIG_TMPFS_POSIX_ACL
2445         .setattr        = shmem_notify_change,
2446         .setxattr       = generic_setxattr,
2447         .getxattr       = generic_getxattr,
2448         .listxattr      = generic_listxattr,
2449         .removexattr    = generic_removexattr,
2450         .permission     = shmem_permission,
2451 #endif
2452 };
2453
2454 static const struct super_operations shmem_ops = {
2455         .alloc_inode    = shmem_alloc_inode,
2456         .destroy_inode  = shmem_destroy_inode,
2457 #ifdef CONFIG_TMPFS
2458         .statfs         = shmem_statfs,
2459         .remount_fs     = shmem_remount_fs,
2460         .show_options   = shmem_show_options,
2461 #endif
2462         .delete_inode   = shmem_delete_inode,
2463         .drop_inode     = generic_delete_inode,
2464         .put_super      = shmem_put_super,
2465 };
2466
2467 static struct vm_operations_struct shmem_vm_ops = {
2468         .fault          = shmem_fault,
2469 #ifdef CONFIG_NUMA
2470         .set_policy     = shmem_set_policy,
2471         .get_policy     = shmem_get_policy,
2472 #endif
2473 };
2474
2475
2476 static int shmem_get_sb(struct file_system_type *fs_type,
2477         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2478 {
2479         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2480 }
2481
2482 static struct file_system_type tmpfs_fs_type = {
2483         .owner          = THIS_MODULE,
2484         .name           = "tmpfs",
2485         .get_sb         = shmem_get_sb,
2486         .kill_sb        = kill_litter_super,
2487 };
2488 static struct vfsmount *shm_mnt;
2489
2490 static int __init init_tmpfs(void)
2491 {
2492         int error;
2493
2494         error = bdi_init(&shmem_backing_dev_info);
2495         if (error)
2496                 goto out4;
2497
2498         error = init_inodecache();
2499         if (error)
2500                 goto out3;
2501
2502         error = register_filesystem(&tmpfs_fs_type);
2503         if (error) {
2504                 printk(KERN_ERR "Could not register tmpfs\n");
2505                 goto out2;
2506         }
2507
2508         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2509                                 tmpfs_fs_type.name, NULL);
2510         if (IS_ERR(shm_mnt)) {
2511                 error = PTR_ERR(shm_mnt);
2512                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2513                 goto out1;
2514         }
2515         return 0;
2516
2517 out1:
2518         unregister_filesystem(&tmpfs_fs_type);
2519 out2:
2520         destroy_inodecache();
2521 out3:
2522         bdi_destroy(&shmem_backing_dev_info);
2523 out4:
2524         shm_mnt = ERR_PTR(error);
2525         return error;
2526 }
2527 module_init(init_tmpfs)
2528
2529 /**
2530  * shmem_file_setup - get an unlinked file living in tmpfs
2531  * @name: name for dentry (to be seen in /proc/<pid>/maps
2532  * @size: size to be set for the file
2533  * @flags: vm_flags
2534  */
2535 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2536 {
2537         int error;
2538         struct file *file;
2539         struct inode *inode;
2540         struct dentry *dentry, *root;
2541         struct qstr this;
2542
2543         if (IS_ERR(shm_mnt))
2544                 return (void *)shm_mnt;
2545
2546         if (size < 0 || size > SHMEM_MAX_BYTES)
2547                 return ERR_PTR(-EINVAL);
2548
2549         if (shmem_acct_size(flags, size))
2550                 return ERR_PTR(-ENOMEM);
2551
2552         error = -ENOMEM;
2553         this.name = name;
2554         this.len = strlen(name);
2555         this.hash = 0; /* will go */
2556         root = shm_mnt->mnt_root;
2557         dentry = d_alloc(root, &this);
2558         if (!dentry)
2559                 goto put_memory;
2560
2561         error = -ENFILE;
2562         file = get_empty_filp();
2563         if (!file)
2564                 goto put_dentry;
2565
2566         error = -ENOSPC;
2567         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2568         if (!inode)
2569                 goto close_file;
2570
2571         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2572         d_instantiate(dentry, inode);
2573         inode->i_size = size;
2574         inode->i_nlink = 0;     /* It is unlinked */
2575         init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2576                         &shmem_file_operations);
2577         return file;
2578
2579 close_file:
2580         put_filp(file);
2581 put_dentry:
2582         dput(dentry);
2583 put_memory:
2584         shmem_unacct_size(flags, size);
2585         return ERR_PTR(error);
2586 }
2587 EXPORT_SYMBOL_GPL(shmem_file_setup);
2588
2589 /**
2590  * shmem_zero_setup - setup a shared anonymous mapping
2591  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2592  */
2593 int shmem_zero_setup(struct vm_area_struct *vma)
2594 {
2595         struct file *file;
2596         loff_t size = vma->vm_end - vma->vm_start;
2597
2598         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2599         if (IS_ERR(file))
2600                 return PTR_ERR(file);
2601
2602         if (vma->vm_file)
2603                 fput(vma->vm_file);
2604         vma->vm_file = file;
2605         vma->vm_ops = &shmem_vm_ops;
2606         return 0;
2607 }