swapin_readahead: excise NUMA bogosity
[safe/jmp/linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
32 #include <linux/mm.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
56
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC     0x01021994
59
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN     VM_READ
71 #define SHMEM_TRUNCATE   VM_WRITE
72
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT    64
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 enum sgp_type {
81         SGP_QUICK,      /* don't try more than file page cache lookup */
82         SGP_READ,       /* don't exceed i_size, don't allocate page */
83         SGP_CACHE,      /* don't exceed i_size, may allocate page */
84         SGP_WRITE,      /* may exceed i_size, may allocate page */
85         SGP_FAULT,      /* same as SGP_CACHE, return with page locked */
86 };
87
88 static int shmem_getpage(struct inode *inode, unsigned long idx,
89                          struct page **pagep, enum sgp_type sgp, int *type);
90
91 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
92 {
93         /*
94          * The above definition of ENTRIES_PER_PAGE, and the use of
95          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
96          * might be reconsidered if it ever diverges from PAGE_SIZE.
97          *
98          * Mobility flags are masked out as swap vectors cannot move
99          */
100         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
101                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
102 }
103
104 static inline void shmem_dir_free(struct page *page)
105 {
106         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
107 }
108
109 static struct page **shmem_dir_map(struct page *page)
110 {
111         return (struct page **)kmap_atomic(page, KM_USER0);
112 }
113
114 static inline void shmem_dir_unmap(struct page **dir)
115 {
116         kunmap_atomic(dir, KM_USER0);
117 }
118
119 static swp_entry_t *shmem_swp_map(struct page *page)
120 {
121         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
122 }
123
124 static inline void shmem_swp_balance_unmap(void)
125 {
126         /*
127          * When passing a pointer to an i_direct entry, to code which
128          * also handles indirect entries and so will shmem_swp_unmap,
129          * we must arrange for the preempt count to remain in balance.
130          * What kmap_atomic of a lowmem page does depends on config
131          * and architecture, so pretend to kmap_atomic some lowmem page.
132          */
133         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
134 }
135
136 static inline void shmem_swp_unmap(swp_entry_t *entry)
137 {
138         kunmap_atomic(entry, KM_USER1);
139 }
140
141 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
142 {
143         return sb->s_fs_info;
144 }
145
146 /*
147  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
148  * for shared memory and for shared anonymous (/dev/zero) mappings
149  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
150  * consistent with the pre-accounting of private mappings ...
151  */
152 static inline int shmem_acct_size(unsigned long flags, loff_t size)
153 {
154         return (flags & VM_ACCOUNT)?
155                 security_vm_enough_memory(VM_ACCT(size)): 0;
156 }
157
158 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
159 {
160         if (flags & VM_ACCOUNT)
161                 vm_unacct_memory(VM_ACCT(size));
162 }
163
164 /*
165  * ... whereas tmpfs objects are accounted incrementally as
166  * pages are allocated, in order to allow huge sparse files.
167  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
168  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
169  */
170 static inline int shmem_acct_block(unsigned long flags)
171 {
172         return (flags & VM_ACCOUNT)?
173                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
174 }
175
176 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
177 {
178         if (!(flags & VM_ACCOUNT))
179                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
180 }
181
182 static const struct super_operations shmem_ops;
183 static const struct address_space_operations shmem_aops;
184 static const struct file_operations shmem_file_operations;
185 static const struct inode_operations shmem_inode_operations;
186 static const struct inode_operations shmem_dir_inode_operations;
187 static const struct inode_operations shmem_special_inode_operations;
188 static struct vm_operations_struct shmem_vm_ops;
189
190 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
191         .ra_pages       = 0,    /* No readahead */
192         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
193         .unplug_io_fn   = default_unplug_io_fn,
194 };
195
196 static LIST_HEAD(shmem_swaplist);
197 static DEFINE_SPINLOCK(shmem_swaplist_lock);
198
199 static void shmem_free_blocks(struct inode *inode, long pages)
200 {
201         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
202         if (sbinfo->max_blocks) {
203                 spin_lock(&sbinfo->stat_lock);
204                 sbinfo->free_blocks += pages;
205                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
206                 spin_unlock(&sbinfo->stat_lock);
207         }
208 }
209
210 /*
211  * shmem_recalc_inode - recalculate the size of an inode
212  *
213  * @inode: inode to recalc
214  *
215  * We have to calculate the free blocks since the mm can drop
216  * undirtied hole pages behind our back.
217  *
218  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
219  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
220  *
221  * It has to be called with the spinlock held.
222  */
223 static void shmem_recalc_inode(struct inode *inode)
224 {
225         struct shmem_inode_info *info = SHMEM_I(inode);
226         long freed;
227
228         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
229         if (freed > 0) {
230                 info->alloced -= freed;
231                 shmem_unacct_blocks(info->flags, freed);
232                 shmem_free_blocks(inode, freed);
233         }
234 }
235
236 /*
237  * shmem_swp_entry - find the swap vector position in the info structure
238  *
239  * @info:  info structure for the inode
240  * @index: index of the page to find
241  * @page:  optional page to add to the structure. Has to be preset to
242  *         all zeros
243  *
244  * If there is no space allocated yet it will return NULL when
245  * page is NULL, else it will use the page for the needed block,
246  * setting it to NULL on return to indicate that it has been used.
247  *
248  * The swap vector is organized the following way:
249  *
250  * There are SHMEM_NR_DIRECT entries directly stored in the
251  * shmem_inode_info structure. So small files do not need an addional
252  * allocation.
253  *
254  * For pages with index > SHMEM_NR_DIRECT there is the pointer
255  * i_indirect which points to a page which holds in the first half
256  * doubly indirect blocks, in the second half triple indirect blocks:
257  *
258  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
259  * following layout (for SHMEM_NR_DIRECT == 16):
260  *
261  * i_indirect -> dir --> 16-19
262  *            |      +-> 20-23
263  *            |
264  *            +-->dir2 --> 24-27
265  *            |        +-> 28-31
266  *            |        +-> 32-35
267  *            |        +-> 36-39
268  *            |
269  *            +-->dir3 --> 40-43
270  *                     +-> 44-47
271  *                     +-> 48-51
272  *                     +-> 52-55
273  */
274 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
275 {
276         unsigned long offset;
277         struct page **dir;
278         struct page *subdir;
279
280         if (index < SHMEM_NR_DIRECT) {
281                 shmem_swp_balance_unmap();
282                 return info->i_direct+index;
283         }
284         if (!info->i_indirect) {
285                 if (page) {
286                         info->i_indirect = *page;
287                         *page = NULL;
288                 }
289                 return NULL;                    /* need another page */
290         }
291
292         index -= SHMEM_NR_DIRECT;
293         offset = index % ENTRIES_PER_PAGE;
294         index /= ENTRIES_PER_PAGE;
295         dir = shmem_dir_map(info->i_indirect);
296
297         if (index >= ENTRIES_PER_PAGE/2) {
298                 index -= ENTRIES_PER_PAGE/2;
299                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
300                 index %= ENTRIES_PER_PAGE;
301                 subdir = *dir;
302                 if (!subdir) {
303                         if (page) {
304                                 *dir = *page;
305                                 *page = NULL;
306                         }
307                         shmem_dir_unmap(dir);
308                         return NULL;            /* need another page */
309                 }
310                 shmem_dir_unmap(dir);
311                 dir = shmem_dir_map(subdir);
312         }
313
314         dir += index;
315         subdir = *dir;
316         if (!subdir) {
317                 if (!page || !(subdir = *page)) {
318                         shmem_dir_unmap(dir);
319                         return NULL;            /* need a page */
320                 }
321                 *dir = subdir;
322                 *page = NULL;
323         }
324         shmem_dir_unmap(dir);
325         return shmem_swp_map(subdir) + offset;
326 }
327
328 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
329 {
330         long incdec = value? 1: -1;
331
332         entry->val = value;
333         info->swapped += incdec;
334         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
335                 struct page *page = kmap_atomic_to_page(entry);
336                 set_page_private(page, page_private(page) + incdec);
337         }
338 }
339
340 /*
341  * shmem_swp_alloc - get the position of the swap entry for the page.
342  *                   If it does not exist allocate the entry.
343  *
344  * @info:       info structure for the inode
345  * @index:      index of the page to find
346  * @sgp:        check and recheck i_size? skip allocation?
347  */
348 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
349 {
350         struct inode *inode = &info->vfs_inode;
351         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
352         struct page *page = NULL;
353         swp_entry_t *entry;
354
355         if (sgp != SGP_WRITE &&
356             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
357                 return ERR_PTR(-EINVAL);
358
359         while (!(entry = shmem_swp_entry(info, index, &page))) {
360                 if (sgp == SGP_READ)
361                         return shmem_swp_map(ZERO_PAGE(0));
362                 /*
363                  * Test free_blocks against 1 not 0, since we have 1 data
364                  * page (and perhaps indirect index pages) yet to allocate:
365                  * a waste to allocate index if we cannot allocate data.
366                  */
367                 if (sbinfo->max_blocks) {
368                         spin_lock(&sbinfo->stat_lock);
369                         if (sbinfo->free_blocks <= 1) {
370                                 spin_unlock(&sbinfo->stat_lock);
371                                 return ERR_PTR(-ENOSPC);
372                         }
373                         sbinfo->free_blocks--;
374                         inode->i_blocks += BLOCKS_PER_PAGE;
375                         spin_unlock(&sbinfo->stat_lock);
376                 }
377
378                 spin_unlock(&info->lock);
379                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
380                 if (page)
381                         set_page_private(page, 0);
382                 spin_lock(&info->lock);
383
384                 if (!page) {
385                         shmem_free_blocks(inode, 1);
386                         return ERR_PTR(-ENOMEM);
387                 }
388                 if (sgp != SGP_WRITE &&
389                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
390                         entry = ERR_PTR(-EINVAL);
391                         break;
392                 }
393                 if (info->next_index <= index)
394                         info->next_index = index + 1;
395         }
396         if (page) {
397                 /* another task gave its page, or truncated the file */
398                 shmem_free_blocks(inode, 1);
399                 shmem_dir_free(page);
400         }
401         if (info->next_index <= index && !IS_ERR(entry))
402                 info->next_index = index + 1;
403         return entry;
404 }
405
406 /*
407  * shmem_free_swp - free some swap entries in a directory
408  *
409  * @dir:        pointer to the directory
410  * @edir:       pointer after last entry of the directory
411  * @punch_lock: pointer to spinlock when needed for the holepunch case
412  */
413 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
414                                                 spinlock_t *punch_lock)
415 {
416         spinlock_t *punch_unlock = NULL;
417         swp_entry_t *ptr;
418         int freed = 0;
419
420         for (ptr = dir; ptr < edir; ptr++) {
421                 if (ptr->val) {
422                         if (unlikely(punch_lock)) {
423                                 punch_unlock = punch_lock;
424                                 punch_lock = NULL;
425                                 spin_lock(punch_unlock);
426                                 if (!ptr->val)
427                                         continue;
428                         }
429                         free_swap_and_cache(*ptr);
430                         *ptr = (swp_entry_t){0};
431                         freed++;
432                 }
433         }
434         if (punch_unlock)
435                 spin_unlock(punch_unlock);
436         return freed;
437 }
438
439 static int shmem_map_and_free_swp(struct page *subdir, int offset,
440                 int limit, struct page ***dir, spinlock_t *punch_lock)
441 {
442         swp_entry_t *ptr;
443         int freed = 0;
444
445         ptr = shmem_swp_map(subdir);
446         for (; offset < limit; offset += LATENCY_LIMIT) {
447                 int size = limit - offset;
448                 if (size > LATENCY_LIMIT)
449                         size = LATENCY_LIMIT;
450                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
451                                                         punch_lock);
452                 if (need_resched()) {
453                         shmem_swp_unmap(ptr);
454                         if (*dir) {
455                                 shmem_dir_unmap(*dir);
456                                 *dir = NULL;
457                         }
458                         cond_resched();
459                         ptr = shmem_swp_map(subdir);
460                 }
461         }
462         shmem_swp_unmap(ptr);
463         return freed;
464 }
465
466 static void shmem_free_pages(struct list_head *next)
467 {
468         struct page *page;
469         int freed = 0;
470
471         do {
472                 page = container_of(next, struct page, lru);
473                 next = next->next;
474                 shmem_dir_free(page);
475                 freed++;
476                 if (freed >= LATENCY_LIMIT) {
477                         cond_resched();
478                         freed = 0;
479                 }
480         } while (next);
481 }
482
483 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
484 {
485         struct shmem_inode_info *info = SHMEM_I(inode);
486         unsigned long idx;
487         unsigned long size;
488         unsigned long limit;
489         unsigned long stage;
490         unsigned long diroff;
491         struct page **dir;
492         struct page *topdir;
493         struct page *middir;
494         struct page *subdir;
495         swp_entry_t *ptr;
496         LIST_HEAD(pages_to_free);
497         long nr_pages_to_free = 0;
498         long nr_swaps_freed = 0;
499         int offset;
500         int freed;
501         int punch_hole;
502         spinlock_t *needs_lock;
503         spinlock_t *punch_lock;
504         unsigned long upper_limit;
505
506         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
507         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
508         if (idx >= info->next_index)
509                 return;
510
511         spin_lock(&info->lock);
512         info->flags |= SHMEM_TRUNCATE;
513         if (likely(end == (loff_t) -1)) {
514                 limit = info->next_index;
515                 upper_limit = SHMEM_MAX_INDEX;
516                 info->next_index = idx;
517                 needs_lock = NULL;
518                 punch_hole = 0;
519         } else {
520                 if (end + 1 >= inode->i_size) { /* we may free a little more */
521                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
522                                                         PAGE_CACHE_SHIFT;
523                         upper_limit = SHMEM_MAX_INDEX;
524                 } else {
525                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
526                         upper_limit = limit;
527                 }
528                 needs_lock = &info->lock;
529                 punch_hole = 1;
530         }
531
532         topdir = info->i_indirect;
533         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
534                 info->i_indirect = NULL;
535                 nr_pages_to_free++;
536                 list_add(&topdir->lru, &pages_to_free);
537         }
538         spin_unlock(&info->lock);
539
540         if (info->swapped && idx < SHMEM_NR_DIRECT) {
541                 ptr = info->i_direct;
542                 size = limit;
543                 if (size > SHMEM_NR_DIRECT)
544                         size = SHMEM_NR_DIRECT;
545                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
546         }
547
548         /*
549          * If there are no indirect blocks or we are punching a hole
550          * below indirect blocks, nothing to be done.
551          */
552         if (!topdir || limit <= SHMEM_NR_DIRECT)
553                 goto done2;
554
555         /*
556          * The truncation case has already dropped info->lock, and we're safe
557          * because i_size and next_index have already been lowered, preventing
558          * access beyond.  But in the punch_hole case, we still need to take
559          * the lock when updating the swap directory, because there might be
560          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
561          * shmem_writepage.  However, whenever we find we can remove a whole
562          * directory page (not at the misaligned start or end of the range),
563          * we first NULLify its pointer in the level above, and then have no
564          * need to take the lock when updating its contents: needs_lock and
565          * punch_lock (either pointing to info->lock or NULL) manage this.
566          */
567
568         upper_limit -= SHMEM_NR_DIRECT;
569         limit -= SHMEM_NR_DIRECT;
570         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
571         offset = idx % ENTRIES_PER_PAGE;
572         idx -= offset;
573
574         dir = shmem_dir_map(topdir);
575         stage = ENTRIES_PER_PAGEPAGE/2;
576         if (idx < ENTRIES_PER_PAGEPAGE/2) {
577                 middir = topdir;
578                 diroff = idx/ENTRIES_PER_PAGE;
579         } else {
580                 dir += ENTRIES_PER_PAGE/2;
581                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
582                 while (stage <= idx)
583                         stage += ENTRIES_PER_PAGEPAGE;
584                 middir = *dir;
585                 if (*dir) {
586                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
587                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
588                         if (!diroff && !offset && upper_limit >= stage) {
589                                 if (needs_lock) {
590                                         spin_lock(needs_lock);
591                                         *dir = NULL;
592                                         spin_unlock(needs_lock);
593                                         needs_lock = NULL;
594                                 } else
595                                         *dir = NULL;
596                                 nr_pages_to_free++;
597                                 list_add(&middir->lru, &pages_to_free);
598                         }
599                         shmem_dir_unmap(dir);
600                         dir = shmem_dir_map(middir);
601                 } else {
602                         diroff = 0;
603                         offset = 0;
604                         idx = stage;
605                 }
606         }
607
608         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
609                 if (unlikely(idx == stage)) {
610                         shmem_dir_unmap(dir);
611                         dir = shmem_dir_map(topdir) +
612                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
613                         while (!*dir) {
614                                 dir++;
615                                 idx += ENTRIES_PER_PAGEPAGE;
616                                 if (idx >= limit)
617                                         goto done1;
618                         }
619                         stage = idx + ENTRIES_PER_PAGEPAGE;
620                         middir = *dir;
621                         if (punch_hole)
622                                 needs_lock = &info->lock;
623                         if (upper_limit >= stage) {
624                                 if (needs_lock) {
625                                         spin_lock(needs_lock);
626                                         *dir = NULL;
627                                         spin_unlock(needs_lock);
628                                         needs_lock = NULL;
629                                 } else
630                                         *dir = NULL;
631                                 nr_pages_to_free++;
632                                 list_add(&middir->lru, &pages_to_free);
633                         }
634                         shmem_dir_unmap(dir);
635                         cond_resched();
636                         dir = shmem_dir_map(middir);
637                         diroff = 0;
638                 }
639                 punch_lock = needs_lock;
640                 subdir = dir[diroff];
641                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
642                         if (needs_lock) {
643                                 spin_lock(needs_lock);
644                                 dir[diroff] = NULL;
645                                 spin_unlock(needs_lock);
646                                 punch_lock = NULL;
647                         } else
648                                 dir[diroff] = NULL;
649                         nr_pages_to_free++;
650                         list_add(&subdir->lru, &pages_to_free);
651                 }
652                 if (subdir && page_private(subdir) /* has swap entries */) {
653                         size = limit - idx;
654                         if (size > ENTRIES_PER_PAGE)
655                                 size = ENTRIES_PER_PAGE;
656                         freed = shmem_map_and_free_swp(subdir,
657                                         offset, size, &dir, punch_lock);
658                         if (!dir)
659                                 dir = shmem_dir_map(middir);
660                         nr_swaps_freed += freed;
661                         if (offset || punch_lock) {
662                                 spin_lock(&info->lock);
663                                 set_page_private(subdir,
664                                         page_private(subdir) - freed);
665                                 spin_unlock(&info->lock);
666                         } else
667                                 BUG_ON(page_private(subdir) != freed);
668                 }
669                 offset = 0;
670         }
671 done1:
672         shmem_dir_unmap(dir);
673 done2:
674         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
675                 /*
676                  * Call truncate_inode_pages again: racing shmem_unuse_inode
677                  * may have swizzled a page in from swap since vmtruncate or
678                  * generic_delete_inode did it, before we lowered next_index.
679                  * Also, though shmem_getpage checks i_size before adding to
680                  * cache, no recheck after: so fix the narrow window there too.
681                  *
682                  * Recalling truncate_inode_pages_range and unmap_mapping_range
683                  * every time for punch_hole (which never got a chance to clear
684                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
685                  * yet hardly ever necessary: try to optimize them out later.
686                  */
687                 truncate_inode_pages_range(inode->i_mapping, start, end);
688                 if (punch_hole)
689                         unmap_mapping_range(inode->i_mapping, start,
690                                                         end - start, 1);
691         }
692
693         spin_lock(&info->lock);
694         info->flags &= ~SHMEM_TRUNCATE;
695         info->swapped -= nr_swaps_freed;
696         if (nr_pages_to_free)
697                 shmem_free_blocks(inode, nr_pages_to_free);
698         shmem_recalc_inode(inode);
699         spin_unlock(&info->lock);
700
701         /*
702          * Empty swap vector directory pages to be freed?
703          */
704         if (!list_empty(&pages_to_free)) {
705                 pages_to_free.prev->next = NULL;
706                 shmem_free_pages(pages_to_free.next);
707         }
708 }
709
710 static void shmem_truncate(struct inode *inode)
711 {
712         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
713 }
714
715 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
716 {
717         struct inode *inode = dentry->d_inode;
718         struct page *page = NULL;
719         int error;
720
721         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
722                 if (attr->ia_size < inode->i_size) {
723                         /*
724                          * If truncating down to a partial page, then
725                          * if that page is already allocated, hold it
726                          * in memory until the truncation is over, so
727                          * truncate_partial_page cannnot miss it were
728                          * it assigned to swap.
729                          */
730                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
731                                 (void) shmem_getpage(inode,
732                                         attr->ia_size>>PAGE_CACHE_SHIFT,
733                                                 &page, SGP_READ, NULL);
734                         }
735                         /*
736                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
737                          * detect if any pages might have been added to cache
738                          * after truncate_inode_pages.  But we needn't bother
739                          * if it's being fully truncated to zero-length: the
740                          * nrpages check is efficient enough in that case.
741                          */
742                         if (attr->ia_size) {
743                                 struct shmem_inode_info *info = SHMEM_I(inode);
744                                 spin_lock(&info->lock);
745                                 info->flags &= ~SHMEM_PAGEIN;
746                                 spin_unlock(&info->lock);
747                         }
748                 }
749         }
750
751         error = inode_change_ok(inode, attr);
752         if (!error)
753                 error = inode_setattr(inode, attr);
754 #ifdef CONFIG_TMPFS_POSIX_ACL
755         if (!error && (attr->ia_valid & ATTR_MODE))
756                 error = generic_acl_chmod(inode, &shmem_acl_ops);
757 #endif
758         if (page)
759                 page_cache_release(page);
760         return error;
761 }
762
763 static void shmem_delete_inode(struct inode *inode)
764 {
765         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
766         struct shmem_inode_info *info = SHMEM_I(inode);
767
768         if (inode->i_op->truncate == shmem_truncate) {
769                 truncate_inode_pages(inode->i_mapping, 0);
770                 shmem_unacct_size(info->flags, inode->i_size);
771                 inode->i_size = 0;
772                 shmem_truncate(inode);
773                 if (!list_empty(&info->swaplist)) {
774                         spin_lock(&shmem_swaplist_lock);
775                         list_del_init(&info->swaplist);
776                         spin_unlock(&shmem_swaplist_lock);
777                 }
778         }
779         BUG_ON(inode->i_blocks);
780         if (sbinfo->max_inodes) {
781                 spin_lock(&sbinfo->stat_lock);
782                 sbinfo->free_inodes++;
783                 spin_unlock(&sbinfo->stat_lock);
784         }
785         clear_inode(inode);
786 }
787
788 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
789 {
790         swp_entry_t *ptr;
791
792         for (ptr = dir; ptr < edir; ptr++) {
793                 if (ptr->val == entry.val)
794                         return ptr - dir;
795         }
796         return -1;
797 }
798
799 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
800 {
801         struct inode *inode;
802         unsigned long idx;
803         unsigned long size;
804         unsigned long limit;
805         unsigned long stage;
806         struct page **dir;
807         struct page *subdir;
808         swp_entry_t *ptr;
809         int offset;
810
811         idx = 0;
812         ptr = info->i_direct;
813         spin_lock(&info->lock);
814         limit = info->next_index;
815         size = limit;
816         if (size > SHMEM_NR_DIRECT)
817                 size = SHMEM_NR_DIRECT;
818         offset = shmem_find_swp(entry, ptr, ptr+size);
819         if (offset >= 0) {
820                 shmem_swp_balance_unmap();
821                 goto found;
822         }
823         if (!info->i_indirect)
824                 goto lost2;
825
826         dir = shmem_dir_map(info->i_indirect);
827         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
828
829         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
830                 if (unlikely(idx == stage)) {
831                         shmem_dir_unmap(dir-1);
832                         dir = shmem_dir_map(info->i_indirect) +
833                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
834                         while (!*dir) {
835                                 dir++;
836                                 idx += ENTRIES_PER_PAGEPAGE;
837                                 if (idx >= limit)
838                                         goto lost1;
839                         }
840                         stage = idx + ENTRIES_PER_PAGEPAGE;
841                         subdir = *dir;
842                         shmem_dir_unmap(dir);
843                         dir = shmem_dir_map(subdir);
844                 }
845                 subdir = *dir;
846                 if (subdir && page_private(subdir)) {
847                         ptr = shmem_swp_map(subdir);
848                         size = limit - idx;
849                         if (size > ENTRIES_PER_PAGE)
850                                 size = ENTRIES_PER_PAGE;
851                         offset = shmem_find_swp(entry, ptr, ptr+size);
852                         if (offset >= 0) {
853                                 shmem_dir_unmap(dir);
854                                 goto found;
855                         }
856                         shmem_swp_unmap(ptr);
857                 }
858         }
859 lost1:
860         shmem_dir_unmap(dir-1);
861 lost2:
862         spin_unlock(&info->lock);
863         return 0;
864 found:
865         idx += offset;
866         inode = &info->vfs_inode;
867         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
868                 info->flags |= SHMEM_PAGEIN;
869                 shmem_swp_set(info, ptr + offset, 0);
870         }
871         shmem_swp_unmap(ptr);
872         spin_unlock(&info->lock);
873         /*
874          * Decrement swap count even when the entry is left behind:
875          * try_to_unuse will skip over mms, then reincrement count.
876          */
877         swap_free(entry);
878         return 1;
879 }
880
881 /*
882  * shmem_unuse() search for an eventually swapped out shmem page.
883  */
884 int shmem_unuse(swp_entry_t entry, struct page *page)
885 {
886         struct list_head *p, *next;
887         struct shmem_inode_info *info;
888         int found = 0;
889
890         spin_lock(&shmem_swaplist_lock);
891         list_for_each_safe(p, next, &shmem_swaplist) {
892                 info = list_entry(p, struct shmem_inode_info, swaplist);
893                 if (!info->swapped)
894                         list_del_init(&info->swaplist);
895                 else if (shmem_unuse_inode(info, entry, page)) {
896                         /* move head to start search for next from here */
897                         list_move_tail(&shmem_swaplist, &info->swaplist);
898                         found = 1;
899                         break;
900                 }
901         }
902         spin_unlock(&shmem_swaplist_lock);
903         return found;
904 }
905
906 /*
907  * Move the page from the page cache to the swap cache.
908  */
909 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
910 {
911         struct shmem_inode_info *info;
912         swp_entry_t *entry, swap;
913         struct address_space *mapping;
914         unsigned long index;
915         struct inode *inode;
916
917         BUG_ON(!PageLocked(page));
918         /*
919          * shmem_backing_dev_info's capabilities prevent regular writeback or
920          * sync from ever calling shmem_writepage; but a stacking filesystem
921          * may use the ->writepage of its underlying filesystem, in which case
922          * we want to do nothing when that underlying filesystem is tmpfs
923          * (writing out to swap is useful as a response to memory pressure, but
924          * of no use to stabilize the data) - just redirty the page, unlock it
925          * and claim success in this case.  AOP_WRITEPAGE_ACTIVATE, and the
926          * page_mapped check below, must be avoided unless we're in reclaim.
927          */
928         if (!wbc->for_reclaim) {
929                 set_page_dirty(page);
930                 unlock_page(page);
931                 return 0;
932         }
933         BUG_ON(page_mapped(page));
934
935         mapping = page->mapping;
936         index = page->index;
937         inode = mapping->host;
938         info = SHMEM_I(inode);
939         if (info->flags & VM_LOCKED)
940                 goto redirty;
941         swap = get_swap_page();
942         if (!swap.val)
943                 goto redirty;
944
945         spin_lock(&info->lock);
946         shmem_recalc_inode(inode);
947         if (index >= info->next_index) {
948                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
949                 goto unlock;
950         }
951         entry = shmem_swp_entry(info, index, NULL);
952         BUG_ON(!entry);
953         BUG_ON(entry->val);
954
955         if (move_to_swap_cache(page, swap) == 0) {
956                 shmem_swp_set(info, entry, swap.val);
957                 shmem_swp_unmap(entry);
958                 spin_unlock(&info->lock);
959                 if (list_empty(&info->swaplist)) {
960                         spin_lock(&shmem_swaplist_lock);
961                         /* move instead of add in case we're racing */
962                         list_move_tail(&info->swaplist, &shmem_swaplist);
963                         spin_unlock(&shmem_swaplist_lock);
964                 }
965                 unlock_page(page);
966                 return 0;
967         }
968
969         shmem_swp_unmap(entry);
970 unlock:
971         spin_unlock(&info->lock);
972         swap_free(swap);
973 redirty:
974         set_page_dirty(page);
975         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
976 }
977
978 #ifdef CONFIG_NUMA
979 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
980 {
981         char *nodelist = strchr(value, ':');
982         int err = 1;
983
984         if (nodelist) {
985                 /* NUL-terminate policy string */
986                 *nodelist++ = '\0';
987                 if (nodelist_parse(nodelist, *policy_nodes))
988                         goto out;
989                 if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
990                         goto out;
991         }
992         if (!strcmp(value, "default")) {
993                 *policy = MPOL_DEFAULT;
994                 /* Don't allow a nodelist */
995                 if (!nodelist)
996                         err = 0;
997         } else if (!strcmp(value, "prefer")) {
998                 *policy = MPOL_PREFERRED;
999                 /* Insist on a nodelist of one node only */
1000                 if (nodelist) {
1001                         char *rest = nodelist;
1002                         while (isdigit(*rest))
1003                                 rest++;
1004                         if (!*rest)
1005                                 err = 0;
1006                 }
1007         } else if (!strcmp(value, "bind")) {
1008                 *policy = MPOL_BIND;
1009                 /* Insist on a nodelist */
1010                 if (nodelist)
1011                         err = 0;
1012         } else if (!strcmp(value, "interleave")) {
1013                 *policy = MPOL_INTERLEAVE;
1014                 /*
1015                  * Default to online nodes with memory if no nodelist
1016                  */
1017                 if (!nodelist)
1018                         *policy_nodes = node_states[N_HIGH_MEMORY];
1019                 err = 0;
1020         }
1021 out:
1022         /* Restore string for error message */
1023         if (nodelist)
1024                 *--nodelist = ':';
1025         return err;
1026 }
1027
1028 static struct page *shmem_swapin(struct shmem_inode_info *info,
1029                                        swp_entry_t entry, unsigned long idx)
1030 {
1031         struct vm_area_struct pvma;
1032         struct page *page;
1033
1034         /* Create a pseudo vma that just contains the policy */
1035         pvma.vm_start = 0;
1036         pvma.vm_pgoff = idx;
1037         pvma.vm_ops = NULL;
1038         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039         swapin_readahead(entry, 0, &pvma);
1040         page = read_swap_cache_async(entry, &pvma, 0);
1041         mpol_free(pvma.vm_policy);
1042         return page;
1043 }
1044
1045 static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1046                                         unsigned long idx)
1047 {
1048         struct vm_area_struct pvma;
1049         struct page *page;
1050
1051         /* Create a pseudo vma that just contains the policy */
1052         pvma.vm_start = 0;
1053         pvma.vm_pgoff = idx;
1054         pvma.vm_ops = NULL;
1055         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1056         page = alloc_page_vma(gfp, &pvma, 0);
1057         mpol_free(pvma.vm_policy);
1058         return page;
1059 }
1060 #else
1061 static inline int shmem_parse_mpol(char *value, int *policy,
1062                                                 nodemask_t *policy_nodes)
1063 {
1064         return 1;
1065 }
1066
1067 static inline struct page *
1068 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1069 {
1070         swapin_readahead(entry, 0, NULL);
1071         return read_swap_cache_async(entry, NULL, 0);
1072 }
1073
1074 static inline struct page *
1075 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1076 {
1077         return alloc_page(gfp);
1078 }
1079 #endif
1080
1081 /*
1082  * shmem_getpage - either get the page from swap or allocate a new one
1083  *
1084  * If we allocate a new one we do not mark it dirty. That's up to the
1085  * vm. If we swap it in we mark it dirty since we also free the swap
1086  * entry since a page cannot live in both the swap and page cache
1087  */
1088 static int shmem_getpage(struct inode *inode, unsigned long idx,
1089                         struct page **pagep, enum sgp_type sgp, int *type)
1090 {
1091         struct address_space *mapping = inode->i_mapping;
1092         struct shmem_inode_info *info = SHMEM_I(inode);
1093         struct shmem_sb_info *sbinfo;
1094         struct page *filepage = *pagep;
1095         struct page *swappage;
1096         swp_entry_t *entry;
1097         swp_entry_t swap;
1098         int error;
1099
1100         if (idx >= SHMEM_MAX_INDEX)
1101                 return -EFBIG;
1102
1103         if (type)
1104                 *type = 0;
1105
1106         /*
1107          * Normally, filepage is NULL on entry, and either found
1108          * uptodate immediately, or allocated and zeroed, or read
1109          * in under swappage, which is then assigned to filepage.
1110          * But shmem_readpage and shmem_write_begin pass in a locked
1111          * filepage, which may be found not uptodate by other callers
1112          * too, and may need to be copied from the swappage read in.
1113          */
1114 repeat:
1115         if (!filepage)
1116                 filepage = find_lock_page(mapping, idx);
1117         if (filepage && PageUptodate(filepage))
1118                 goto done;
1119         error = 0;
1120         if (sgp == SGP_QUICK)
1121                 goto failed;
1122
1123         spin_lock(&info->lock);
1124         shmem_recalc_inode(inode);
1125         entry = shmem_swp_alloc(info, idx, sgp);
1126         if (IS_ERR(entry)) {
1127                 spin_unlock(&info->lock);
1128                 error = PTR_ERR(entry);
1129                 goto failed;
1130         }
1131         swap = *entry;
1132
1133         if (swap.val) {
1134                 /* Look it up and read it in.. */
1135                 swappage = lookup_swap_cache(swap);
1136                 if (!swappage) {
1137                         shmem_swp_unmap(entry);
1138                         /* here we actually do the io */
1139                         if (type && !(*type & VM_FAULT_MAJOR)) {
1140                                 __count_vm_event(PGMAJFAULT);
1141                                 *type |= VM_FAULT_MAJOR;
1142                         }
1143                         spin_unlock(&info->lock);
1144                         swappage = shmem_swapin(info, swap, idx);
1145                         if (!swappage) {
1146                                 spin_lock(&info->lock);
1147                                 entry = shmem_swp_alloc(info, idx, sgp);
1148                                 if (IS_ERR(entry))
1149                                         error = PTR_ERR(entry);
1150                                 else {
1151                                         if (entry->val == swap.val)
1152                                                 error = -ENOMEM;
1153                                         shmem_swp_unmap(entry);
1154                                 }
1155                                 spin_unlock(&info->lock);
1156                                 if (error)
1157                                         goto failed;
1158                                 goto repeat;
1159                         }
1160                         wait_on_page_locked(swappage);
1161                         page_cache_release(swappage);
1162                         goto repeat;
1163                 }
1164
1165                 /* We have to do this with page locked to prevent races */
1166                 if (TestSetPageLocked(swappage)) {
1167                         shmem_swp_unmap(entry);
1168                         spin_unlock(&info->lock);
1169                         wait_on_page_locked(swappage);
1170                         page_cache_release(swappage);
1171                         goto repeat;
1172                 }
1173                 if (PageWriteback(swappage)) {
1174                         shmem_swp_unmap(entry);
1175                         spin_unlock(&info->lock);
1176                         wait_on_page_writeback(swappage);
1177                         unlock_page(swappage);
1178                         page_cache_release(swappage);
1179                         goto repeat;
1180                 }
1181                 if (!PageUptodate(swappage)) {
1182                         shmem_swp_unmap(entry);
1183                         spin_unlock(&info->lock);
1184                         unlock_page(swappage);
1185                         page_cache_release(swappage);
1186                         error = -EIO;
1187                         goto failed;
1188                 }
1189
1190                 if (filepage) {
1191                         shmem_swp_set(info, entry, 0);
1192                         shmem_swp_unmap(entry);
1193                         delete_from_swap_cache(swappage);
1194                         spin_unlock(&info->lock);
1195                         copy_highpage(filepage, swappage);
1196                         unlock_page(swappage);
1197                         page_cache_release(swappage);
1198                         flush_dcache_page(filepage);
1199                         SetPageUptodate(filepage);
1200                         set_page_dirty(filepage);
1201                         swap_free(swap);
1202                 } else if (!(error = move_from_swap_cache(
1203                                 swappage, idx, mapping))) {
1204                         info->flags |= SHMEM_PAGEIN;
1205                         shmem_swp_set(info, entry, 0);
1206                         shmem_swp_unmap(entry);
1207                         spin_unlock(&info->lock);
1208                         filepage = swappage;
1209                         swap_free(swap);
1210                 } else {
1211                         shmem_swp_unmap(entry);
1212                         spin_unlock(&info->lock);
1213                         unlock_page(swappage);
1214                         page_cache_release(swappage);
1215                         if (error == -ENOMEM) {
1216                                 /* let kswapd refresh zone for GFP_ATOMICs */
1217                                 congestion_wait(WRITE, HZ/50);
1218                         }
1219                         goto repeat;
1220                 }
1221         } else if (sgp == SGP_READ && !filepage) {
1222                 shmem_swp_unmap(entry);
1223                 filepage = find_get_page(mapping, idx);
1224                 if (filepage &&
1225                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1226                         spin_unlock(&info->lock);
1227                         wait_on_page_locked(filepage);
1228                         page_cache_release(filepage);
1229                         filepage = NULL;
1230                         goto repeat;
1231                 }
1232                 spin_unlock(&info->lock);
1233         } else {
1234                 shmem_swp_unmap(entry);
1235                 sbinfo = SHMEM_SB(inode->i_sb);
1236                 if (sbinfo->max_blocks) {
1237                         spin_lock(&sbinfo->stat_lock);
1238                         if (sbinfo->free_blocks == 0 ||
1239                             shmem_acct_block(info->flags)) {
1240                                 spin_unlock(&sbinfo->stat_lock);
1241                                 spin_unlock(&info->lock);
1242                                 error = -ENOSPC;
1243                                 goto failed;
1244                         }
1245                         sbinfo->free_blocks--;
1246                         inode->i_blocks += BLOCKS_PER_PAGE;
1247                         spin_unlock(&sbinfo->stat_lock);
1248                 } else if (shmem_acct_block(info->flags)) {
1249                         spin_unlock(&info->lock);
1250                         error = -ENOSPC;
1251                         goto failed;
1252                 }
1253
1254                 if (!filepage) {
1255                         spin_unlock(&info->lock);
1256                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1257                                                     info,
1258                                                     idx);
1259                         if (!filepage) {
1260                                 shmem_unacct_blocks(info->flags, 1);
1261                                 shmem_free_blocks(inode, 1);
1262                                 error = -ENOMEM;
1263                                 goto failed;
1264                         }
1265
1266                         spin_lock(&info->lock);
1267                         entry = shmem_swp_alloc(info, idx, sgp);
1268                         if (IS_ERR(entry))
1269                                 error = PTR_ERR(entry);
1270                         else {
1271                                 swap = *entry;
1272                                 shmem_swp_unmap(entry);
1273                         }
1274                         if (error || swap.val || 0 != add_to_page_cache_lru(
1275                                         filepage, mapping, idx, GFP_ATOMIC)) {
1276                                 spin_unlock(&info->lock);
1277                                 page_cache_release(filepage);
1278                                 shmem_unacct_blocks(info->flags, 1);
1279                                 shmem_free_blocks(inode, 1);
1280                                 filepage = NULL;
1281                                 if (error)
1282                                         goto failed;
1283                                 goto repeat;
1284                         }
1285                         info->flags |= SHMEM_PAGEIN;
1286                 }
1287
1288                 info->alloced++;
1289                 spin_unlock(&info->lock);
1290                 clear_highpage(filepage);
1291                 flush_dcache_page(filepage);
1292                 SetPageUptodate(filepage);
1293         }
1294 done:
1295         if (*pagep != filepage) {
1296                 *pagep = filepage;
1297                 if (sgp != SGP_FAULT)
1298                         unlock_page(filepage);
1299
1300         }
1301         return 0;
1302
1303 failed:
1304         if (*pagep != filepage) {
1305                 unlock_page(filepage);
1306                 page_cache_release(filepage);
1307         }
1308         return error;
1309 }
1310
1311 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1312 {
1313         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1314         int error;
1315         int ret;
1316
1317         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1318                 return VM_FAULT_SIGBUS;
1319
1320         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
1321         if (error)
1322                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1323
1324         mark_page_accessed(vmf->page);
1325         return ret | VM_FAULT_LOCKED;
1326 }
1327
1328 #ifdef CONFIG_NUMA
1329 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1330 {
1331         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1332         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1333 }
1334
1335 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1336                                           unsigned long addr)
1337 {
1338         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1339         unsigned long idx;
1340
1341         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1342         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1343 }
1344 #endif
1345
1346 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1347 {
1348         struct inode *inode = file->f_path.dentry->d_inode;
1349         struct shmem_inode_info *info = SHMEM_I(inode);
1350         int retval = -ENOMEM;
1351
1352         spin_lock(&info->lock);
1353         if (lock && !(info->flags & VM_LOCKED)) {
1354                 if (!user_shm_lock(inode->i_size, user))
1355                         goto out_nomem;
1356                 info->flags |= VM_LOCKED;
1357         }
1358         if (!lock && (info->flags & VM_LOCKED) && user) {
1359                 user_shm_unlock(inode->i_size, user);
1360                 info->flags &= ~VM_LOCKED;
1361         }
1362         retval = 0;
1363 out_nomem:
1364         spin_unlock(&info->lock);
1365         return retval;
1366 }
1367
1368 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1369 {
1370         file_accessed(file);
1371         vma->vm_ops = &shmem_vm_ops;
1372         vma->vm_flags |= VM_CAN_NONLINEAR;
1373         return 0;
1374 }
1375
1376 static struct inode *
1377 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1378 {
1379         struct inode *inode;
1380         struct shmem_inode_info *info;
1381         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1382
1383         if (sbinfo->max_inodes) {
1384                 spin_lock(&sbinfo->stat_lock);
1385                 if (!sbinfo->free_inodes) {
1386                         spin_unlock(&sbinfo->stat_lock);
1387                         return NULL;
1388                 }
1389                 sbinfo->free_inodes--;
1390                 spin_unlock(&sbinfo->stat_lock);
1391         }
1392
1393         inode = new_inode(sb);
1394         if (inode) {
1395                 inode->i_mode = mode;
1396                 inode->i_uid = current->fsuid;
1397                 inode->i_gid = current->fsgid;
1398                 inode->i_blocks = 0;
1399                 inode->i_mapping->a_ops = &shmem_aops;
1400                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1401                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1402                 inode->i_generation = get_seconds();
1403                 info = SHMEM_I(inode);
1404                 memset(info, 0, (char *)inode - (char *)info);
1405                 spin_lock_init(&info->lock);
1406                 INIT_LIST_HEAD(&info->swaplist);
1407
1408                 switch (mode & S_IFMT) {
1409                 default:
1410                         inode->i_op = &shmem_special_inode_operations;
1411                         init_special_inode(inode, mode, dev);
1412                         break;
1413                 case S_IFREG:
1414                         inode->i_op = &shmem_inode_operations;
1415                         inode->i_fop = &shmem_file_operations;
1416                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1417                                                         &sbinfo->policy_nodes);
1418                         break;
1419                 case S_IFDIR:
1420                         inc_nlink(inode);
1421                         /* Some things misbehave if size == 0 on a directory */
1422                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1423                         inode->i_op = &shmem_dir_inode_operations;
1424                         inode->i_fop = &simple_dir_operations;
1425                         break;
1426                 case S_IFLNK:
1427                         /*
1428                          * Must not load anything in the rbtree,
1429                          * mpol_free_shared_policy will not be called.
1430                          */
1431                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1432                                                 NULL);
1433                         break;
1434                 }
1435         } else if (sbinfo->max_inodes) {
1436                 spin_lock(&sbinfo->stat_lock);
1437                 sbinfo->free_inodes++;
1438                 spin_unlock(&sbinfo->stat_lock);
1439         }
1440         return inode;
1441 }
1442
1443 #ifdef CONFIG_TMPFS
1444 static const struct inode_operations shmem_symlink_inode_operations;
1445 static const struct inode_operations shmem_symlink_inline_operations;
1446
1447 /*
1448  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1449  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1450  * below the loop driver, in the generic fashion that many filesystems support.
1451  */
1452 static int shmem_readpage(struct file *file, struct page *page)
1453 {
1454         struct inode *inode = page->mapping->host;
1455         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1456         unlock_page(page);
1457         return error;
1458 }
1459
1460 static int
1461 shmem_write_begin(struct file *file, struct address_space *mapping,
1462                         loff_t pos, unsigned len, unsigned flags,
1463                         struct page **pagep, void **fsdata)
1464 {
1465         struct inode *inode = mapping->host;
1466         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1467         *pagep = NULL;
1468         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1469 }
1470
1471 static int
1472 shmem_write_end(struct file *file, struct address_space *mapping,
1473                         loff_t pos, unsigned len, unsigned copied,
1474                         struct page *page, void *fsdata)
1475 {
1476         struct inode *inode = mapping->host;
1477
1478         set_page_dirty(page);
1479         page_cache_release(page);
1480
1481         if (pos+copied > inode->i_size)
1482                 i_size_write(inode, pos+copied);
1483
1484         return copied;
1485 }
1486
1487 static ssize_t
1488 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1489 {
1490         struct inode    *inode = file->f_path.dentry->d_inode;
1491         loff_t          pos;
1492         unsigned long   written;
1493         ssize_t         err;
1494
1495         if ((ssize_t) count < 0)
1496                 return -EINVAL;
1497
1498         if (!access_ok(VERIFY_READ, buf, count))
1499                 return -EFAULT;
1500
1501         mutex_lock(&inode->i_mutex);
1502
1503         pos = *ppos;
1504         written = 0;
1505
1506         err = generic_write_checks(file, &pos, &count, 0);
1507         if (err || !count)
1508                 goto out;
1509
1510         err = remove_suid(file->f_path.dentry);
1511         if (err)
1512                 goto out;
1513
1514         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1515
1516         do {
1517                 struct page *page = NULL;
1518                 unsigned long bytes, index, offset;
1519                 char *kaddr;
1520                 int left;
1521
1522                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1523                 index = pos >> PAGE_CACHE_SHIFT;
1524                 bytes = PAGE_CACHE_SIZE - offset;
1525                 if (bytes > count)
1526                         bytes = count;
1527
1528                 /*
1529                  * We don't hold page lock across copy from user -
1530                  * what would it guard against? - so no deadlock here.
1531                  * But it still may be a good idea to prefault below.
1532                  */
1533
1534                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1535                 if (err)
1536                         break;
1537
1538                 left = bytes;
1539                 if (PageHighMem(page)) {
1540                         volatile unsigned char dummy;
1541                         __get_user(dummy, buf);
1542                         __get_user(dummy, buf + bytes - 1);
1543
1544                         kaddr = kmap_atomic(page, KM_USER0);
1545                         left = __copy_from_user_inatomic(kaddr + offset,
1546                                                         buf, bytes);
1547                         kunmap_atomic(kaddr, KM_USER0);
1548                 }
1549                 if (left) {
1550                         kaddr = kmap(page);
1551                         left = __copy_from_user(kaddr + offset, buf, bytes);
1552                         kunmap(page);
1553                 }
1554
1555                 written += bytes;
1556                 count -= bytes;
1557                 pos += bytes;
1558                 buf += bytes;
1559                 if (pos > inode->i_size)
1560                         i_size_write(inode, pos);
1561
1562                 flush_dcache_page(page);
1563                 set_page_dirty(page);
1564                 mark_page_accessed(page);
1565                 page_cache_release(page);
1566
1567                 if (left) {
1568                         pos -= left;
1569                         written -= left;
1570                         err = -EFAULT;
1571                         break;
1572                 }
1573
1574                 /*
1575                  * Our dirty pages are not counted in nr_dirty,
1576                  * and we do not attempt to balance dirty pages.
1577                  */
1578
1579                 cond_resched();
1580         } while (count);
1581
1582         *ppos = pos;
1583         if (written)
1584                 err = written;
1585 out:
1586         mutex_unlock(&inode->i_mutex);
1587         return err;
1588 }
1589
1590 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1591 {
1592         struct inode *inode = filp->f_path.dentry->d_inode;
1593         struct address_space *mapping = inode->i_mapping;
1594         unsigned long index, offset;
1595
1596         index = *ppos >> PAGE_CACHE_SHIFT;
1597         offset = *ppos & ~PAGE_CACHE_MASK;
1598
1599         for (;;) {
1600                 struct page *page = NULL;
1601                 unsigned long end_index, nr, ret;
1602                 loff_t i_size = i_size_read(inode);
1603
1604                 end_index = i_size >> PAGE_CACHE_SHIFT;
1605                 if (index > end_index)
1606                         break;
1607                 if (index == end_index) {
1608                         nr = i_size & ~PAGE_CACHE_MASK;
1609                         if (nr <= offset)
1610                                 break;
1611                 }
1612
1613                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1614                 if (desc->error) {
1615                         if (desc->error == -EINVAL)
1616                                 desc->error = 0;
1617                         break;
1618                 }
1619
1620                 /*
1621                  * We must evaluate after, since reads (unlike writes)
1622                  * are called without i_mutex protection against truncate
1623                  */
1624                 nr = PAGE_CACHE_SIZE;
1625                 i_size = i_size_read(inode);
1626                 end_index = i_size >> PAGE_CACHE_SHIFT;
1627                 if (index == end_index) {
1628                         nr = i_size & ~PAGE_CACHE_MASK;
1629                         if (nr <= offset) {
1630                                 if (page)
1631                                         page_cache_release(page);
1632                                 break;
1633                         }
1634                 }
1635                 nr -= offset;
1636
1637                 if (page) {
1638                         /*
1639                          * If users can be writing to this page using arbitrary
1640                          * virtual addresses, take care about potential aliasing
1641                          * before reading the page on the kernel side.
1642                          */
1643                         if (mapping_writably_mapped(mapping))
1644                                 flush_dcache_page(page);
1645                         /*
1646                          * Mark the page accessed if we read the beginning.
1647                          */
1648                         if (!offset)
1649                                 mark_page_accessed(page);
1650                 } else {
1651                         page = ZERO_PAGE(0);
1652                         page_cache_get(page);
1653                 }
1654
1655                 /*
1656                  * Ok, we have the page, and it's up-to-date, so
1657                  * now we can copy it to user space...
1658                  *
1659                  * The actor routine returns how many bytes were actually used..
1660                  * NOTE! This may not be the same as how much of a user buffer
1661                  * we filled up (we may be padding etc), so we can only update
1662                  * "pos" here (the actor routine has to update the user buffer
1663                  * pointers and the remaining count).
1664                  */
1665                 ret = actor(desc, page, offset, nr);
1666                 offset += ret;
1667                 index += offset >> PAGE_CACHE_SHIFT;
1668                 offset &= ~PAGE_CACHE_MASK;
1669
1670                 page_cache_release(page);
1671                 if (ret != nr || !desc->count)
1672                         break;
1673
1674                 cond_resched();
1675         }
1676
1677         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1678         file_accessed(filp);
1679 }
1680
1681 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1682 {
1683         read_descriptor_t desc;
1684
1685         if ((ssize_t) count < 0)
1686                 return -EINVAL;
1687         if (!access_ok(VERIFY_WRITE, buf, count))
1688                 return -EFAULT;
1689         if (!count)
1690                 return 0;
1691
1692         desc.written = 0;
1693         desc.count = count;
1694         desc.arg.buf = buf;
1695         desc.error = 0;
1696
1697         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1698         if (desc.written)
1699                 return desc.written;
1700         return desc.error;
1701 }
1702
1703 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1704 {
1705         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1706
1707         buf->f_type = TMPFS_MAGIC;
1708         buf->f_bsize = PAGE_CACHE_SIZE;
1709         buf->f_namelen = NAME_MAX;
1710         spin_lock(&sbinfo->stat_lock);
1711         if (sbinfo->max_blocks) {
1712                 buf->f_blocks = sbinfo->max_blocks;
1713                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1714         }
1715         if (sbinfo->max_inodes) {
1716                 buf->f_files = sbinfo->max_inodes;
1717                 buf->f_ffree = sbinfo->free_inodes;
1718         }
1719         /* else leave those fields 0 like simple_statfs */
1720         spin_unlock(&sbinfo->stat_lock);
1721         return 0;
1722 }
1723
1724 /*
1725  * File creation. Allocate an inode, and we're done..
1726  */
1727 static int
1728 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1729 {
1730         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1731         int error = -ENOSPC;
1732
1733         if (inode) {
1734                 error = security_inode_init_security(inode, dir, NULL, NULL,
1735                                                      NULL);
1736                 if (error) {
1737                         if (error != -EOPNOTSUPP) {
1738                                 iput(inode);
1739                                 return error;
1740                         }
1741                 }
1742                 error = shmem_acl_init(inode, dir);
1743                 if (error) {
1744                         iput(inode);
1745                         return error;
1746                 }
1747                 if (dir->i_mode & S_ISGID) {
1748                         inode->i_gid = dir->i_gid;
1749                         if (S_ISDIR(mode))
1750                                 inode->i_mode |= S_ISGID;
1751                 }
1752                 dir->i_size += BOGO_DIRENT_SIZE;
1753                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1754                 d_instantiate(dentry, inode);
1755                 dget(dentry); /* Extra count - pin the dentry in core */
1756         }
1757         return error;
1758 }
1759
1760 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1761 {
1762         int error;
1763
1764         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1765                 return error;
1766         inc_nlink(dir);
1767         return 0;
1768 }
1769
1770 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1771                 struct nameidata *nd)
1772 {
1773         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1774 }
1775
1776 /*
1777  * Link a file..
1778  */
1779 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1780 {
1781         struct inode *inode = old_dentry->d_inode;
1782         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1783
1784         /*
1785          * No ordinary (disk based) filesystem counts links as inodes;
1786          * but each new link needs a new dentry, pinning lowmem, and
1787          * tmpfs dentries cannot be pruned until they are unlinked.
1788          */
1789         if (sbinfo->max_inodes) {
1790                 spin_lock(&sbinfo->stat_lock);
1791                 if (!sbinfo->free_inodes) {
1792                         spin_unlock(&sbinfo->stat_lock);
1793                         return -ENOSPC;
1794                 }
1795                 sbinfo->free_inodes--;
1796                 spin_unlock(&sbinfo->stat_lock);
1797         }
1798
1799         dir->i_size += BOGO_DIRENT_SIZE;
1800         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1801         inc_nlink(inode);
1802         atomic_inc(&inode->i_count);    /* New dentry reference */
1803         dget(dentry);           /* Extra pinning count for the created dentry */
1804         d_instantiate(dentry, inode);
1805         return 0;
1806 }
1807
1808 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1809 {
1810         struct inode *inode = dentry->d_inode;
1811
1812         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1813                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1814                 if (sbinfo->max_inodes) {
1815                         spin_lock(&sbinfo->stat_lock);
1816                         sbinfo->free_inodes++;
1817                         spin_unlock(&sbinfo->stat_lock);
1818                 }
1819         }
1820
1821         dir->i_size -= BOGO_DIRENT_SIZE;
1822         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1823         drop_nlink(inode);
1824         dput(dentry);   /* Undo the count from "create" - this does all the work */
1825         return 0;
1826 }
1827
1828 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1829 {
1830         if (!simple_empty(dentry))
1831                 return -ENOTEMPTY;
1832
1833         drop_nlink(dentry->d_inode);
1834         drop_nlink(dir);
1835         return shmem_unlink(dir, dentry);
1836 }
1837
1838 /*
1839  * The VFS layer already does all the dentry stuff for rename,
1840  * we just have to decrement the usage count for the target if
1841  * it exists so that the VFS layer correctly free's it when it
1842  * gets overwritten.
1843  */
1844 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1845 {
1846         struct inode *inode = old_dentry->d_inode;
1847         int they_are_dirs = S_ISDIR(inode->i_mode);
1848
1849         if (!simple_empty(new_dentry))
1850                 return -ENOTEMPTY;
1851
1852         if (new_dentry->d_inode) {
1853                 (void) shmem_unlink(new_dir, new_dentry);
1854                 if (they_are_dirs)
1855                         drop_nlink(old_dir);
1856         } else if (they_are_dirs) {
1857                 drop_nlink(old_dir);
1858                 inc_nlink(new_dir);
1859         }
1860
1861         old_dir->i_size -= BOGO_DIRENT_SIZE;
1862         new_dir->i_size += BOGO_DIRENT_SIZE;
1863         old_dir->i_ctime = old_dir->i_mtime =
1864         new_dir->i_ctime = new_dir->i_mtime =
1865         inode->i_ctime = CURRENT_TIME;
1866         return 0;
1867 }
1868
1869 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1870 {
1871         int error;
1872         int len;
1873         struct inode *inode;
1874         struct page *page = NULL;
1875         char *kaddr;
1876         struct shmem_inode_info *info;
1877
1878         len = strlen(symname) + 1;
1879         if (len > PAGE_CACHE_SIZE)
1880                 return -ENAMETOOLONG;
1881
1882         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1883         if (!inode)
1884                 return -ENOSPC;
1885
1886         error = security_inode_init_security(inode, dir, NULL, NULL,
1887                                              NULL);
1888         if (error) {
1889                 if (error != -EOPNOTSUPP) {
1890                         iput(inode);
1891                         return error;
1892                 }
1893                 error = 0;
1894         }
1895
1896         info = SHMEM_I(inode);
1897         inode->i_size = len-1;
1898         if (len <= (char *)inode - (char *)info) {
1899                 /* do it inline */
1900                 memcpy(info, symname, len);
1901                 inode->i_op = &shmem_symlink_inline_operations;
1902         } else {
1903                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1904                 if (error) {
1905                         iput(inode);
1906                         return error;
1907                 }
1908                 inode->i_op = &shmem_symlink_inode_operations;
1909                 kaddr = kmap_atomic(page, KM_USER0);
1910                 memcpy(kaddr, symname, len);
1911                 kunmap_atomic(kaddr, KM_USER0);
1912                 set_page_dirty(page);
1913                 page_cache_release(page);
1914         }
1915         if (dir->i_mode & S_ISGID)
1916                 inode->i_gid = dir->i_gid;
1917         dir->i_size += BOGO_DIRENT_SIZE;
1918         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1919         d_instantiate(dentry, inode);
1920         dget(dentry);
1921         return 0;
1922 }
1923
1924 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1925 {
1926         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1927         return NULL;
1928 }
1929
1930 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1931 {
1932         struct page *page = NULL;
1933         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1934         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1935         return page;
1936 }
1937
1938 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1939 {
1940         if (!IS_ERR(nd_get_link(nd))) {
1941                 struct page *page = cookie;
1942                 kunmap(page);
1943                 mark_page_accessed(page);
1944                 page_cache_release(page);
1945         }
1946 }
1947
1948 static const struct inode_operations shmem_symlink_inline_operations = {
1949         .readlink       = generic_readlink,
1950         .follow_link    = shmem_follow_link_inline,
1951 };
1952
1953 static const struct inode_operations shmem_symlink_inode_operations = {
1954         .truncate       = shmem_truncate,
1955         .readlink       = generic_readlink,
1956         .follow_link    = shmem_follow_link,
1957         .put_link       = shmem_put_link,
1958 };
1959
1960 #ifdef CONFIG_TMPFS_POSIX_ACL
1961 /**
1962  * Superblocks without xattr inode operations will get security.* xattr
1963  * support from the VFS "for free". As soon as we have any other xattrs
1964  * like ACLs, we also need to implement the security.* handlers at
1965  * filesystem level, though.
1966  */
1967
1968 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1969                                         size_t list_len, const char *name,
1970                                         size_t name_len)
1971 {
1972         return security_inode_listsecurity(inode, list, list_len);
1973 }
1974
1975 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1976                                     void *buffer, size_t size)
1977 {
1978         if (strcmp(name, "") == 0)
1979                 return -EINVAL;
1980         return security_inode_getsecurity(inode, name, buffer, size,
1981                                           -EOPNOTSUPP);
1982 }
1983
1984 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1985                                     const void *value, size_t size, int flags)
1986 {
1987         if (strcmp(name, "") == 0)
1988                 return -EINVAL;
1989         return security_inode_setsecurity(inode, name, value, size, flags);
1990 }
1991
1992 static struct xattr_handler shmem_xattr_security_handler = {
1993         .prefix = XATTR_SECURITY_PREFIX,
1994         .list   = shmem_xattr_security_list,
1995         .get    = shmem_xattr_security_get,
1996         .set    = shmem_xattr_security_set,
1997 };
1998
1999 static struct xattr_handler *shmem_xattr_handlers[] = {
2000         &shmem_xattr_acl_access_handler,
2001         &shmem_xattr_acl_default_handler,
2002         &shmem_xattr_security_handler,
2003         NULL
2004 };
2005 #endif
2006
2007 static struct dentry *shmem_get_parent(struct dentry *child)
2008 {
2009         return ERR_PTR(-ESTALE);
2010 }
2011
2012 static int shmem_match(struct inode *ino, void *vfh)
2013 {
2014         __u32 *fh = vfh;
2015         __u64 inum = fh[2];
2016         inum = (inum << 32) | fh[1];
2017         return ino->i_ino == inum && fh[0] == ino->i_generation;
2018 }
2019
2020 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2021                 struct fid *fid, int fh_len, int fh_type)
2022 {
2023         struct inode *inode;
2024         struct dentry *dentry = NULL;
2025         u64 inum = fid->raw[2];
2026         inum = (inum << 32) | fid->raw[1];
2027
2028         if (fh_len < 3)
2029                 return NULL;
2030
2031         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2032                         shmem_match, fid->raw);
2033         if (inode) {
2034                 dentry = d_find_alias(inode);
2035                 iput(inode);
2036         }
2037
2038         return dentry;
2039 }
2040
2041 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2042                                 int connectable)
2043 {
2044         struct inode *inode = dentry->d_inode;
2045
2046         if (*len < 3)
2047                 return 255;
2048
2049         if (hlist_unhashed(&inode->i_hash)) {
2050                 /* Unfortunately insert_inode_hash is not idempotent,
2051                  * so as we hash inodes here rather than at creation
2052                  * time, we need a lock to ensure we only try
2053                  * to do it once
2054                  */
2055                 static DEFINE_SPINLOCK(lock);
2056                 spin_lock(&lock);
2057                 if (hlist_unhashed(&inode->i_hash))
2058                         __insert_inode_hash(inode,
2059                                             inode->i_ino + inode->i_generation);
2060                 spin_unlock(&lock);
2061         }
2062
2063         fh[0] = inode->i_generation;
2064         fh[1] = inode->i_ino;
2065         fh[2] = ((__u64)inode->i_ino) >> 32;
2066
2067         *len = 3;
2068         return 1;
2069 }
2070
2071 static const struct export_operations shmem_export_ops = {
2072         .get_parent     = shmem_get_parent,
2073         .encode_fh      = shmem_encode_fh,
2074         .fh_to_dentry   = shmem_fh_to_dentry,
2075 };
2076
2077 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2078         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2079         int *policy, nodemask_t *policy_nodes)
2080 {
2081         char *this_char, *value, *rest;
2082
2083         while (options != NULL) {
2084                 this_char = options;
2085                 for (;;) {
2086                         /*
2087                          * NUL-terminate this option: unfortunately,
2088                          * mount options form a comma-separated list,
2089                          * but mpol's nodelist may also contain commas.
2090                          */
2091                         options = strchr(options, ',');
2092                         if (options == NULL)
2093                                 break;
2094                         options++;
2095                         if (!isdigit(*options)) {
2096                                 options[-1] = '\0';
2097                                 break;
2098                         }
2099                 }
2100                 if (!*this_char)
2101                         continue;
2102                 if ((value = strchr(this_char,'=')) != NULL) {
2103                         *value++ = 0;
2104                 } else {
2105                         printk(KERN_ERR
2106                             "tmpfs: No value for mount option '%s'\n",
2107                             this_char);
2108                         return 1;
2109                 }
2110
2111                 if (!strcmp(this_char,"size")) {
2112                         unsigned long long size;
2113                         size = memparse(value,&rest);
2114                         if (*rest == '%') {
2115                                 size <<= PAGE_SHIFT;
2116                                 size *= totalram_pages;
2117                                 do_div(size, 100);
2118                                 rest++;
2119                         }
2120                         if (*rest)
2121                                 goto bad_val;
2122                         *blocks = size >> PAGE_CACHE_SHIFT;
2123                 } else if (!strcmp(this_char,"nr_blocks")) {
2124                         *blocks = memparse(value,&rest);
2125                         if (*rest)
2126                                 goto bad_val;
2127                 } else if (!strcmp(this_char,"nr_inodes")) {
2128                         *inodes = memparse(value,&rest);
2129                         if (*rest)
2130                                 goto bad_val;
2131                 } else if (!strcmp(this_char,"mode")) {
2132                         if (!mode)
2133                                 continue;
2134                         *mode = simple_strtoul(value,&rest,8);
2135                         if (*rest)
2136                                 goto bad_val;
2137                 } else if (!strcmp(this_char,"uid")) {
2138                         if (!uid)
2139                                 continue;
2140                         *uid = simple_strtoul(value,&rest,0);
2141                         if (*rest)
2142                                 goto bad_val;
2143                 } else if (!strcmp(this_char,"gid")) {
2144                         if (!gid)
2145                                 continue;
2146                         *gid = simple_strtoul(value,&rest,0);
2147                         if (*rest)
2148                                 goto bad_val;
2149                 } else if (!strcmp(this_char,"mpol")) {
2150                         if (shmem_parse_mpol(value,policy,policy_nodes))
2151                                 goto bad_val;
2152                 } else {
2153                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2154                                this_char);
2155                         return 1;
2156                 }
2157         }
2158         return 0;
2159
2160 bad_val:
2161         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2162                value, this_char);
2163         return 1;
2164
2165 }
2166
2167 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2168 {
2169         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2170         unsigned long max_blocks = sbinfo->max_blocks;
2171         unsigned long max_inodes = sbinfo->max_inodes;
2172         int policy = sbinfo->policy;
2173         nodemask_t policy_nodes = sbinfo->policy_nodes;
2174         unsigned long blocks;
2175         unsigned long inodes;
2176         int error = -EINVAL;
2177
2178         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2179                                 &max_inodes, &policy, &policy_nodes))
2180                 return error;
2181
2182         spin_lock(&sbinfo->stat_lock);
2183         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2184         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2185         if (max_blocks < blocks)
2186                 goto out;
2187         if (max_inodes < inodes)
2188                 goto out;
2189         /*
2190          * Those tests also disallow limited->unlimited while any are in
2191          * use, so i_blocks will always be zero when max_blocks is zero;
2192          * but we must separately disallow unlimited->limited, because
2193          * in that case we have no record of how much is already in use.
2194          */
2195         if (max_blocks && !sbinfo->max_blocks)
2196                 goto out;
2197         if (max_inodes && !sbinfo->max_inodes)
2198                 goto out;
2199
2200         error = 0;
2201         sbinfo->max_blocks  = max_blocks;
2202         sbinfo->free_blocks = max_blocks - blocks;
2203         sbinfo->max_inodes  = max_inodes;
2204         sbinfo->free_inodes = max_inodes - inodes;
2205         sbinfo->policy = policy;
2206         sbinfo->policy_nodes = policy_nodes;
2207 out:
2208         spin_unlock(&sbinfo->stat_lock);
2209         return error;
2210 }
2211 #endif
2212
2213 static void shmem_put_super(struct super_block *sb)
2214 {
2215         kfree(sb->s_fs_info);
2216         sb->s_fs_info = NULL;
2217 }
2218
2219 static int shmem_fill_super(struct super_block *sb,
2220                             void *data, int silent)
2221 {
2222         struct inode *inode;
2223         struct dentry *root;
2224         int mode   = S_IRWXUGO | S_ISVTX;
2225         uid_t uid = current->fsuid;
2226         gid_t gid = current->fsgid;
2227         int err = -ENOMEM;
2228         struct shmem_sb_info *sbinfo;
2229         unsigned long blocks = 0;
2230         unsigned long inodes = 0;
2231         int policy = MPOL_DEFAULT;
2232         nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
2233
2234 #ifdef CONFIG_TMPFS
2235         /*
2236          * Per default we only allow half of the physical ram per
2237          * tmpfs instance, limiting inodes to one per page of lowmem;
2238          * but the internal instance is left unlimited.
2239          */
2240         if (!(sb->s_flags & MS_NOUSER)) {
2241                 blocks = totalram_pages / 2;
2242                 inodes = totalram_pages - totalhigh_pages;
2243                 if (inodes > blocks)
2244                         inodes = blocks;
2245                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2246                                         &inodes, &policy, &policy_nodes))
2247                         return -EINVAL;
2248         }
2249         sb->s_export_op = &shmem_export_ops;
2250 #else
2251         sb->s_flags |= MS_NOUSER;
2252 #endif
2253
2254         /* Round up to L1_CACHE_BYTES to resist false sharing */
2255         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2256                                 L1_CACHE_BYTES), GFP_KERNEL);
2257         if (!sbinfo)
2258                 return -ENOMEM;
2259
2260         spin_lock_init(&sbinfo->stat_lock);
2261         sbinfo->max_blocks = blocks;
2262         sbinfo->free_blocks = blocks;
2263         sbinfo->max_inodes = inodes;
2264         sbinfo->free_inodes = inodes;
2265         sbinfo->policy = policy;
2266         sbinfo->policy_nodes = policy_nodes;
2267
2268         sb->s_fs_info = sbinfo;
2269         sb->s_maxbytes = SHMEM_MAX_BYTES;
2270         sb->s_blocksize = PAGE_CACHE_SIZE;
2271         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2272         sb->s_magic = TMPFS_MAGIC;
2273         sb->s_op = &shmem_ops;
2274         sb->s_time_gran = 1;
2275 #ifdef CONFIG_TMPFS_POSIX_ACL
2276         sb->s_xattr = shmem_xattr_handlers;
2277         sb->s_flags |= MS_POSIXACL;
2278 #endif
2279
2280         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2281         if (!inode)
2282                 goto failed;
2283         inode->i_uid = uid;
2284         inode->i_gid = gid;
2285         root = d_alloc_root(inode);
2286         if (!root)
2287                 goto failed_iput;
2288         sb->s_root = root;
2289         return 0;
2290
2291 failed_iput:
2292         iput(inode);
2293 failed:
2294         shmem_put_super(sb);
2295         return err;
2296 }
2297
2298 static struct kmem_cache *shmem_inode_cachep;
2299
2300 static struct inode *shmem_alloc_inode(struct super_block *sb)
2301 {
2302         struct shmem_inode_info *p;
2303         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2304         if (!p)
2305                 return NULL;
2306         return &p->vfs_inode;
2307 }
2308
2309 static void shmem_destroy_inode(struct inode *inode)
2310 {
2311         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2312                 /* only struct inode is valid if it's an inline symlink */
2313                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2314         }
2315         shmem_acl_destroy_inode(inode);
2316         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2317 }
2318
2319 static void init_once(struct kmem_cache *cachep, void *foo)
2320 {
2321         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2322
2323         inode_init_once(&p->vfs_inode);
2324 #ifdef CONFIG_TMPFS_POSIX_ACL
2325         p->i_acl = NULL;
2326         p->i_default_acl = NULL;
2327 #endif
2328 }
2329
2330 static int init_inodecache(void)
2331 {
2332         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2333                                 sizeof(struct shmem_inode_info),
2334                                 0, SLAB_PANIC, init_once);
2335         return 0;
2336 }
2337
2338 static void destroy_inodecache(void)
2339 {
2340         kmem_cache_destroy(shmem_inode_cachep);
2341 }
2342
2343 static const struct address_space_operations shmem_aops = {
2344         .writepage      = shmem_writepage,
2345         .set_page_dirty = __set_page_dirty_no_writeback,
2346 #ifdef CONFIG_TMPFS
2347         .readpage       = shmem_readpage,
2348         .write_begin    = shmem_write_begin,
2349         .write_end      = shmem_write_end,
2350 #endif
2351         .migratepage    = migrate_page,
2352 };
2353
2354 static const struct file_operations shmem_file_operations = {
2355         .mmap           = shmem_mmap,
2356 #ifdef CONFIG_TMPFS
2357         .llseek         = generic_file_llseek,
2358         .read           = shmem_file_read,
2359         .write          = shmem_file_write,
2360         .fsync          = simple_sync_file,
2361         .splice_read    = generic_file_splice_read,
2362         .splice_write   = generic_file_splice_write,
2363 #endif
2364 };
2365
2366 static const struct inode_operations shmem_inode_operations = {
2367         .truncate       = shmem_truncate,
2368         .setattr        = shmem_notify_change,
2369         .truncate_range = shmem_truncate_range,
2370 #ifdef CONFIG_TMPFS_POSIX_ACL
2371         .setxattr       = generic_setxattr,
2372         .getxattr       = generic_getxattr,
2373         .listxattr      = generic_listxattr,
2374         .removexattr    = generic_removexattr,
2375         .permission     = shmem_permission,
2376 #endif
2377
2378 };
2379
2380 static const struct inode_operations shmem_dir_inode_operations = {
2381 #ifdef CONFIG_TMPFS
2382         .create         = shmem_create,
2383         .lookup         = simple_lookup,
2384         .link           = shmem_link,
2385         .unlink         = shmem_unlink,
2386         .symlink        = shmem_symlink,
2387         .mkdir          = shmem_mkdir,
2388         .rmdir          = shmem_rmdir,
2389         .mknod          = shmem_mknod,
2390         .rename         = shmem_rename,
2391 #endif
2392 #ifdef CONFIG_TMPFS_POSIX_ACL
2393         .setattr        = shmem_notify_change,
2394         .setxattr       = generic_setxattr,
2395         .getxattr       = generic_getxattr,
2396         .listxattr      = generic_listxattr,
2397         .removexattr    = generic_removexattr,
2398         .permission     = shmem_permission,
2399 #endif
2400 };
2401
2402 static const struct inode_operations shmem_special_inode_operations = {
2403 #ifdef CONFIG_TMPFS_POSIX_ACL
2404         .setattr        = shmem_notify_change,
2405         .setxattr       = generic_setxattr,
2406         .getxattr       = generic_getxattr,
2407         .listxattr      = generic_listxattr,
2408         .removexattr    = generic_removexattr,
2409         .permission     = shmem_permission,
2410 #endif
2411 };
2412
2413 static const struct super_operations shmem_ops = {
2414         .alloc_inode    = shmem_alloc_inode,
2415         .destroy_inode  = shmem_destroy_inode,
2416 #ifdef CONFIG_TMPFS
2417         .statfs         = shmem_statfs,
2418         .remount_fs     = shmem_remount_fs,
2419 #endif
2420         .delete_inode   = shmem_delete_inode,
2421         .drop_inode     = generic_delete_inode,
2422         .put_super      = shmem_put_super,
2423 };
2424
2425 static struct vm_operations_struct shmem_vm_ops = {
2426         .fault          = shmem_fault,
2427 #ifdef CONFIG_NUMA
2428         .set_policy     = shmem_set_policy,
2429         .get_policy     = shmem_get_policy,
2430 #endif
2431 };
2432
2433
2434 static int shmem_get_sb(struct file_system_type *fs_type,
2435         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2436 {
2437         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2438 }
2439
2440 static struct file_system_type tmpfs_fs_type = {
2441         .owner          = THIS_MODULE,
2442         .name           = "tmpfs",
2443         .get_sb         = shmem_get_sb,
2444         .kill_sb        = kill_litter_super,
2445 };
2446 static struct vfsmount *shm_mnt;
2447
2448 static int __init init_tmpfs(void)
2449 {
2450         int error;
2451
2452         error = bdi_init(&shmem_backing_dev_info);
2453         if (error)
2454                 goto out4;
2455
2456         error = init_inodecache();
2457         if (error)
2458                 goto out3;
2459
2460         error = register_filesystem(&tmpfs_fs_type);
2461         if (error) {
2462                 printk(KERN_ERR "Could not register tmpfs\n");
2463                 goto out2;
2464         }
2465
2466         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2467                                 tmpfs_fs_type.name, NULL);
2468         if (IS_ERR(shm_mnt)) {
2469                 error = PTR_ERR(shm_mnt);
2470                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2471                 goto out1;
2472         }
2473         return 0;
2474
2475 out1:
2476         unregister_filesystem(&tmpfs_fs_type);
2477 out2:
2478         destroy_inodecache();
2479 out3:
2480         bdi_destroy(&shmem_backing_dev_info);
2481 out4:
2482         shm_mnt = ERR_PTR(error);
2483         return error;
2484 }
2485 module_init(init_tmpfs)
2486
2487 /*
2488  * shmem_file_setup - get an unlinked file living in tmpfs
2489  *
2490  * @name: name for dentry (to be seen in /proc/<pid>/maps
2491  * @size: size to be set for the file
2492  *
2493  */
2494 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2495 {
2496         int error;
2497         struct file *file;
2498         struct inode *inode;
2499         struct dentry *dentry, *root;
2500         struct qstr this;
2501
2502         if (IS_ERR(shm_mnt))
2503                 return (void *)shm_mnt;
2504
2505         if (size < 0 || size > SHMEM_MAX_BYTES)
2506                 return ERR_PTR(-EINVAL);
2507
2508         if (shmem_acct_size(flags, size))
2509                 return ERR_PTR(-ENOMEM);
2510
2511         error = -ENOMEM;
2512         this.name = name;
2513         this.len = strlen(name);
2514         this.hash = 0; /* will go */
2515         root = shm_mnt->mnt_root;
2516         dentry = d_alloc(root, &this);
2517         if (!dentry)
2518                 goto put_memory;
2519
2520         error = -ENFILE;
2521         file = get_empty_filp();
2522         if (!file)
2523                 goto put_dentry;
2524
2525         error = -ENOSPC;
2526         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2527         if (!inode)
2528                 goto close_file;
2529
2530         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2531         d_instantiate(dentry, inode);
2532         inode->i_size = size;
2533         inode->i_nlink = 0;     /* It is unlinked */
2534         init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2535                         &shmem_file_operations);
2536         return file;
2537
2538 close_file:
2539         put_filp(file);
2540 put_dentry:
2541         dput(dentry);
2542 put_memory:
2543         shmem_unacct_size(flags, size);
2544         return ERR_PTR(error);
2545 }
2546
2547 /*
2548  * shmem_zero_setup - setup a shared anonymous mapping
2549  *
2550  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2551  */
2552 int shmem_zero_setup(struct vm_area_struct *vma)
2553 {
2554         struct file *file;
2555         loff_t size = vma->vm_end - vma->vm_start;
2556
2557         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2558         if (IS_ERR(file))
2559                 return PTR_ERR(file);
2560
2561         if (vma->vm_file)
2562                 fput(vma->vm_file);
2563         vma->vm_file = file;
2564         vma->vm_ops = &shmem_vm_ops;
2565         return 0;
2566 }