tmpfs: open a window in shmem_unuse_inode
[safe/jmp/linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/exportfs.h>
31 #include <linux/generic_acl.h>
32 #include <linux/mm.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/pagemap.h>
37 #include <linux/string.h>
38 #include <linux/slab.h>
39 #include <linux/backing-dev.h>
40 #include <linux/shmem_fs.h>
41 #include <linux/mount.h>
42 #include <linux/writeback.h>
43 #include <linux/vfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/security.h>
46 #include <linux/swapops.h>
47 #include <linux/mempolicy.h>
48 #include <linux/namei.h>
49 #include <linux/ctype.h>
50 #include <linux/migrate.h>
51 #include <linux/highmem.h>
52
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
56
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC     0x01021994
59
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN     VM_READ
71 #define SHMEM_TRUNCATE   VM_WRITE
72
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT    64
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 enum sgp_type {
81         SGP_READ,       /* don't exceed i_size, don't allocate page */
82         SGP_CACHE,      /* don't exceed i_size, may allocate page */
83         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
84         SGP_WRITE,      /* may exceed i_size, may allocate page */
85 };
86
87 static int shmem_getpage(struct inode *inode, unsigned long idx,
88                          struct page **pagep, enum sgp_type sgp, int *type);
89
90 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
91 {
92         /*
93          * The above definition of ENTRIES_PER_PAGE, and the use of
94          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95          * might be reconsidered if it ever diverges from PAGE_SIZE.
96          *
97          * Mobility flags are masked out as swap vectors cannot move
98          */
99         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
100                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
101 }
102
103 static inline void shmem_dir_free(struct page *page)
104 {
105         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
106 }
107
108 static struct page **shmem_dir_map(struct page *page)
109 {
110         return (struct page **)kmap_atomic(page, KM_USER0);
111 }
112
113 static inline void shmem_dir_unmap(struct page **dir)
114 {
115         kunmap_atomic(dir, KM_USER0);
116 }
117
118 static swp_entry_t *shmem_swp_map(struct page *page)
119 {
120         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
121 }
122
123 static inline void shmem_swp_balance_unmap(void)
124 {
125         /*
126          * When passing a pointer to an i_direct entry, to code which
127          * also handles indirect entries and so will shmem_swp_unmap,
128          * we must arrange for the preempt count to remain in balance.
129          * What kmap_atomic of a lowmem page does depends on config
130          * and architecture, so pretend to kmap_atomic some lowmem page.
131          */
132         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
133 }
134
135 static inline void shmem_swp_unmap(swp_entry_t *entry)
136 {
137         kunmap_atomic(entry, KM_USER1);
138 }
139
140 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
141 {
142         return sb->s_fs_info;
143 }
144
145 /*
146  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
147  * for shared memory and for shared anonymous (/dev/zero) mappings
148  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
149  * consistent with the pre-accounting of private mappings ...
150  */
151 static inline int shmem_acct_size(unsigned long flags, loff_t size)
152 {
153         return (flags & VM_ACCOUNT)?
154                 security_vm_enough_memory(VM_ACCT(size)): 0;
155 }
156
157 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
158 {
159         if (flags & VM_ACCOUNT)
160                 vm_unacct_memory(VM_ACCT(size));
161 }
162
163 /*
164  * ... whereas tmpfs objects are accounted incrementally as
165  * pages are allocated, in order to allow huge sparse files.
166  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
167  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
168  */
169 static inline int shmem_acct_block(unsigned long flags)
170 {
171         return (flags & VM_ACCOUNT)?
172                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
173 }
174
175 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
176 {
177         if (!(flags & VM_ACCOUNT))
178                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
179 }
180
181 static const struct super_operations shmem_ops;
182 static const struct address_space_operations shmem_aops;
183 static const struct file_operations shmem_file_operations;
184 static const struct inode_operations shmem_inode_operations;
185 static const struct inode_operations shmem_dir_inode_operations;
186 static const struct inode_operations shmem_special_inode_operations;
187 static struct vm_operations_struct shmem_vm_ops;
188
189 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
190         .ra_pages       = 0,    /* No readahead */
191         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
192         .unplug_io_fn   = default_unplug_io_fn,
193 };
194
195 static LIST_HEAD(shmem_swaplist);
196 static DEFINE_MUTEX(shmem_swaplist_mutex);
197
198 static void shmem_free_blocks(struct inode *inode, long pages)
199 {
200         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
201         if (sbinfo->max_blocks) {
202                 spin_lock(&sbinfo->stat_lock);
203                 sbinfo->free_blocks += pages;
204                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
205                 spin_unlock(&sbinfo->stat_lock);
206         }
207 }
208
209 static int shmem_reserve_inode(struct super_block *sb)
210 {
211         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
212         if (sbinfo->max_inodes) {
213                 spin_lock(&sbinfo->stat_lock);
214                 if (!sbinfo->free_inodes) {
215                         spin_unlock(&sbinfo->stat_lock);
216                         return -ENOSPC;
217                 }
218                 sbinfo->free_inodes--;
219                 spin_unlock(&sbinfo->stat_lock);
220         }
221         return 0;
222 }
223
224 static void shmem_free_inode(struct super_block *sb)
225 {
226         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
227         if (sbinfo->max_inodes) {
228                 spin_lock(&sbinfo->stat_lock);
229                 sbinfo->free_inodes++;
230                 spin_unlock(&sbinfo->stat_lock);
231         }
232 }
233
234 /*
235  * shmem_recalc_inode - recalculate the size of an inode
236  *
237  * @inode: inode to recalc
238  *
239  * We have to calculate the free blocks since the mm can drop
240  * undirtied hole pages behind our back.
241  *
242  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
243  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
244  *
245  * It has to be called with the spinlock held.
246  */
247 static void shmem_recalc_inode(struct inode *inode)
248 {
249         struct shmem_inode_info *info = SHMEM_I(inode);
250         long freed;
251
252         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
253         if (freed > 0) {
254                 info->alloced -= freed;
255                 shmem_unacct_blocks(info->flags, freed);
256                 shmem_free_blocks(inode, freed);
257         }
258 }
259
260 /*
261  * shmem_swp_entry - find the swap vector position in the info structure
262  *
263  * @info:  info structure for the inode
264  * @index: index of the page to find
265  * @page:  optional page to add to the structure. Has to be preset to
266  *         all zeros
267  *
268  * If there is no space allocated yet it will return NULL when
269  * page is NULL, else it will use the page for the needed block,
270  * setting it to NULL on return to indicate that it has been used.
271  *
272  * The swap vector is organized the following way:
273  *
274  * There are SHMEM_NR_DIRECT entries directly stored in the
275  * shmem_inode_info structure. So small files do not need an addional
276  * allocation.
277  *
278  * For pages with index > SHMEM_NR_DIRECT there is the pointer
279  * i_indirect which points to a page which holds in the first half
280  * doubly indirect blocks, in the second half triple indirect blocks:
281  *
282  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
283  * following layout (for SHMEM_NR_DIRECT == 16):
284  *
285  * i_indirect -> dir --> 16-19
286  *            |      +-> 20-23
287  *            |
288  *            +-->dir2 --> 24-27
289  *            |        +-> 28-31
290  *            |        +-> 32-35
291  *            |        +-> 36-39
292  *            |
293  *            +-->dir3 --> 40-43
294  *                     +-> 44-47
295  *                     +-> 48-51
296  *                     +-> 52-55
297  */
298 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
299 {
300         unsigned long offset;
301         struct page **dir;
302         struct page *subdir;
303
304         if (index < SHMEM_NR_DIRECT) {
305                 shmem_swp_balance_unmap();
306                 return info->i_direct+index;
307         }
308         if (!info->i_indirect) {
309                 if (page) {
310                         info->i_indirect = *page;
311                         *page = NULL;
312                 }
313                 return NULL;                    /* need another page */
314         }
315
316         index -= SHMEM_NR_DIRECT;
317         offset = index % ENTRIES_PER_PAGE;
318         index /= ENTRIES_PER_PAGE;
319         dir = shmem_dir_map(info->i_indirect);
320
321         if (index >= ENTRIES_PER_PAGE/2) {
322                 index -= ENTRIES_PER_PAGE/2;
323                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
324                 index %= ENTRIES_PER_PAGE;
325                 subdir = *dir;
326                 if (!subdir) {
327                         if (page) {
328                                 *dir = *page;
329                                 *page = NULL;
330                         }
331                         shmem_dir_unmap(dir);
332                         return NULL;            /* need another page */
333                 }
334                 shmem_dir_unmap(dir);
335                 dir = shmem_dir_map(subdir);
336         }
337
338         dir += index;
339         subdir = *dir;
340         if (!subdir) {
341                 if (!page || !(subdir = *page)) {
342                         shmem_dir_unmap(dir);
343                         return NULL;            /* need a page */
344                 }
345                 *dir = subdir;
346                 *page = NULL;
347         }
348         shmem_dir_unmap(dir);
349         return shmem_swp_map(subdir) + offset;
350 }
351
352 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
353 {
354         long incdec = value? 1: -1;
355
356         entry->val = value;
357         info->swapped += incdec;
358         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
359                 struct page *page = kmap_atomic_to_page(entry);
360                 set_page_private(page, page_private(page) + incdec);
361         }
362 }
363
364 /*
365  * shmem_swp_alloc - get the position of the swap entry for the page.
366  *                   If it does not exist allocate the entry.
367  *
368  * @info:       info structure for the inode
369  * @index:      index of the page to find
370  * @sgp:        check and recheck i_size? skip allocation?
371  */
372 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
373 {
374         struct inode *inode = &info->vfs_inode;
375         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
376         struct page *page = NULL;
377         swp_entry_t *entry;
378
379         if (sgp != SGP_WRITE &&
380             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
381                 return ERR_PTR(-EINVAL);
382
383         while (!(entry = shmem_swp_entry(info, index, &page))) {
384                 if (sgp == SGP_READ)
385                         return shmem_swp_map(ZERO_PAGE(0));
386                 /*
387                  * Test free_blocks against 1 not 0, since we have 1 data
388                  * page (and perhaps indirect index pages) yet to allocate:
389                  * a waste to allocate index if we cannot allocate data.
390                  */
391                 if (sbinfo->max_blocks) {
392                         spin_lock(&sbinfo->stat_lock);
393                         if (sbinfo->free_blocks <= 1) {
394                                 spin_unlock(&sbinfo->stat_lock);
395                                 return ERR_PTR(-ENOSPC);
396                         }
397                         sbinfo->free_blocks--;
398                         inode->i_blocks += BLOCKS_PER_PAGE;
399                         spin_unlock(&sbinfo->stat_lock);
400                 }
401
402                 spin_unlock(&info->lock);
403                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
404                 if (page)
405                         set_page_private(page, 0);
406                 spin_lock(&info->lock);
407
408                 if (!page) {
409                         shmem_free_blocks(inode, 1);
410                         return ERR_PTR(-ENOMEM);
411                 }
412                 if (sgp != SGP_WRITE &&
413                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
414                         entry = ERR_PTR(-EINVAL);
415                         break;
416                 }
417                 if (info->next_index <= index)
418                         info->next_index = index + 1;
419         }
420         if (page) {
421                 /* another task gave its page, or truncated the file */
422                 shmem_free_blocks(inode, 1);
423                 shmem_dir_free(page);
424         }
425         if (info->next_index <= index && !IS_ERR(entry))
426                 info->next_index = index + 1;
427         return entry;
428 }
429
430 /*
431  * shmem_free_swp - free some swap entries in a directory
432  *
433  * @dir:        pointer to the directory
434  * @edir:       pointer after last entry of the directory
435  * @punch_lock: pointer to spinlock when needed for the holepunch case
436  */
437 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
438                                                 spinlock_t *punch_lock)
439 {
440         spinlock_t *punch_unlock = NULL;
441         swp_entry_t *ptr;
442         int freed = 0;
443
444         for (ptr = dir; ptr < edir; ptr++) {
445                 if (ptr->val) {
446                         if (unlikely(punch_lock)) {
447                                 punch_unlock = punch_lock;
448                                 punch_lock = NULL;
449                                 spin_lock(punch_unlock);
450                                 if (!ptr->val)
451                                         continue;
452                         }
453                         free_swap_and_cache(*ptr);
454                         *ptr = (swp_entry_t){0};
455                         freed++;
456                 }
457         }
458         if (punch_unlock)
459                 spin_unlock(punch_unlock);
460         return freed;
461 }
462
463 static int shmem_map_and_free_swp(struct page *subdir, int offset,
464                 int limit, struct page ***dir, spinlock_t *punch_lock)
465 {
466         swp_entry_t *ptr;
467         int freed = 0;
468
469         ptr = shmem_swp_map(subdir);
470         for (; offset < limit; offset += LATENCY_LIMIT) {
471                 int size = limit - offset;
472                 if (size > LATENCY_LIMIT)
473                         size = LATENCY_LIMIT;
474                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
475                                                         punch_lock);
476                 if (need_resched()) {
477                         shmem_swp_unmap(ptr);
478                         if (*dir) {
479                                 shmem_dir_unmap(*dir);
480                                 *dir = NULL;
481                         }
482                         cond_resched();
483                         ptr = shmem_swp_map(subdir);
484                 }
485         }
486         shmem_swp_unmap(ptr);
487         return freed;
488 }
489
490 static void shmem_free_pages(struct list_head *next)
491 {
492         struct page *page;
493         int freed = 0;
494
495         do {
496                 page = container_of(next, struct page, lru);
497                 next = next->next;
498                 shmem_dir_free(page);
499                 freed++;
500                 if (freed >= LATENCY_LIMIT) {
501                         cond_resched();
502                         freed = 0;
503                 }
504         } while (next);
505 }
506
507 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
508 {
509         struct shmem_inode_info *info = SHMEM_I(inode);
510         unsigned long idx;
511         unsigned long size;
512         unsigned long limit;
513         unsigned long stage;
514         unsigned long diroff;
515         struct page **dir;
516         struct page *topdir;
517         struct page *middir;
518         struct page *subdir;
519         swp_entry_t *ptr;
520         LIST_HEAD(pages_to_free);
521         long nr_pages_to_free = 0;
522         long nr_swaps_freed = 0;
523         int offset;
524         int freed;
525         int punch_hole;
526         spinlock_t *needs_lock;
527         spinlock_t *punch_lock;
528         unsigned long upper_limit;
529
530         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
531         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
532         if (idx >= info->next_index)
533                 return;
534
535         spin_lock(&info->lock);
536         info->flags |= SHMEM_TRUNCATE;
537         if (likely(end == (loff_t) -1)) {
538                 limit = info->next_index;
539                 upper_limit = SHMEM_MAX_INDEX;
540                 info->next_index = idx;
541                 needs_lock = NULL;
542                 punch_hole = 0;
543         } else {
544                 if (end + 1 >= inode->i_size) { /* we may free a little more */
545                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
546                                                         PAGE_CACHE_SHIFT;
547                         upper_limit = SHMEM_MAX_INDEX;
548                 } else {
549                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
550                         upper_limit = limit;
551                 }
552                 needs_lock = &info->lock;
553                 punch_hole = 1;
554         }
555
556         topdir = info->i_indirect;
557         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
558                 info->i_indirect = NULL;
559                 nr_pages_to_free++;
560                 list_add(&topdir->lru, &pages_to_free);
561         }
562         spin_unlock(&info->lock);
563
564         if (info->swapped && idx < SHMEM_NR_DIRECT) {
565                 ptr = info->i_direct;
566                 size = limit;
567                 if (size > SHMEM_NR_DIRECT)
568                         size = SHMEM_NR_DIRECT;
569                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
570         }
571
572         /*
573          * If there are no indirect blocks or we are punching a hole
574          * below indirect blocks, nothing to be done.
575          */
576         if (!topdir || limit <= SHMEM_NR_DIRECT)
577                 goto done2;
578
579         /*
580          * The truncation case has already dropped info->lock, and we're safe
581          * because i_size and next_index have already been lowered, preventing
582          * access beyond.  But in the punch_hole case, we still need to take
583          * the lock when updating the swap directory, because there might be
584          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
585          * shmem_writepage.  However, whenever we find we can remove a whole
586          * directory page (not at the misaligned start or end of the range),
587          * we first NULLify its pointer in the level above, and then have no
588          * need to take the lock when updating its contents: needs_lock and
589          * punch_lock (either pointing to info->lock or NULL) manage this.
590          */
591
592         upper_limit -= SHMEM_NR_DIRECT;
593         limit -= SHMEM_NR_DIRECT;
594         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
595         offset = idx % ENTRIES_PER_PAGE;
596         idx -= offset;
597
598         dir = shmem_dir_map(topdir);
599         stage = ENTRIES_PER_PAGEPAGE/2;
600         if (idx < ENTRIES_PER_PAGEPAGE/2) {
601                 middir = topdir;
602                 diroff = idx/ENTRIES_PER_PAGE;
603         } else {
604                 dir += ENTRIES_PER_PAGE/2;
605                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
606                 while (stage <= idx)
607                         stage += ENTRIES_PER_PAGEPAGE;
608                 middir = *dir;
609                 if (*dir) {
610                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
611                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
612                         if (!diroff && !offset && upper_limit >= stage) {
613                                 if (needs_lock) {
614                                         spin_lock(needs_lock);
615                                         *dir = NULL;
616                                         spin_unlock(needs_lock);
617                                         needs_lock = NULL;
618                                 } else
619                                         *dir = NULL;
620                                 nr_pages_to_free++;
621                                 list_add(&middir->lru, &pages_to_free);
622                         }
623                         shmem_dir_unmap(dir);
624                         dir = shmem_dir_map(middir);
625                 } else {
626                         diroff = 0;
627                         offset = 0;
628                         idx = stage;
629                 }
630         }
631
632         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
633                 if (unlikely(idx == stage)) {
634                         shmem_dir_unmap(dir);
635                         dir = shmem_dir_map(topdir) +
636                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
637                         while (!*dir) {
638                                 dir++;
639                                 idx += ENTRIES_PER_PAGEPAGE;
640                                 if (idx >= limit)
641                                         goto done1;
642                         }
643                         stage = idx + ENTRIES_PER_PAGEPAGE;
644                         middir = *dir;
645                         if (punch_hole)
646                                 needs_lock = &info->lock;
647                         if (upper_limit >= stage) {
648                                 if (needs_lock) {
649                                         spin_lock(needs_lock);
650                                         *dir = NULL;
651                                         spin_unlock(needs_lock);
652                                         needs_lock = NULL;
653                                 } else
654                                         *dir = NULL;
655                                 nr_pages_to_free++;
656                                 list_add(&middir->lru, &pages_to_free);
657                         }
658                         shmem_dir_unmap(dir);
659                         cond_resched();
660                         dir = shmem_dir_map(middir);
661                         diroff = 0;
662                 }
663                 punch_lock = needs_lock;
664                 subdir = dir[diroff];
665                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
666                         if (needs_lock) {
667                                 spin_lock(needs_lock);
668                                 dir[diroff] = NULL;
669                                 spin_unlock(needs_lock);
670                                 punch_lock = NULL;
671                         } else
672                                 dir[diroff] = NULL;
673                         nr_pages_to_free++;
674                         list_add(&subdir->lru, &pages_to_free);
675                 }
676                 if (subdir && page_private(subdir) /* has swap entries */) {
677                         size = limit - idx;
678                         if (size > ENTRIES_PER_PAGE)
679                                 size = ENTRIES_PER_PAGE;
680                         freed = shmem_map_and_free_swp(subdir,
681                                         offset, size, &dir, punch_lock);
682                         if (!dir)
683                                 dir = shmem_dir_map(middir);
684                         nr_swaps_freed += freed;
685                         if (offset || punch_lock) {
686                                 spin_lock(&info->lock);
687                                 set_page_private(subdir,
688                                         page_private(subdir) - freed);
689                                 spin_unlock(&info->lock);
690                         } else
691                                 BUG_ON(page_private(subdir) != freed);
692                 }
693                 offset = 0;
694         }
695 done1:
696         shmem_dir_unmap(dir);
697 done2:
698         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
699                 /*
700                  * Call truncate_inode_pages again: racing shmem_unuse_inode
701                  * may have swizzled a page in from swap since vmtruncate or
702                  * generic_delete_inode did it, before we lowered next_index.
703                  * Also, though shmem_getpage checks i_size before adding to
704                  * cache, no recheck after: so fix the narrow window there too.
705                  *
706                  * Recalling truncate_inode_pages_range and unmap_mapping_range
707                  * every time for punch_hole (which never got a chance to clear
708                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
709                  * yet hardly ever necessary: try to optimize them out later.
710                  */
711                 truncate_inode_pages_range(inode->i_mapping, start, end);
712                 if (punch_hole)
713                         unmap_mapping_range(inode->i_mapping, start,
714                                                         end - start, 1);
715         }
716
717         spin_lock(&info->lock);
718         info->flags &= ~SHMEM_TRUNCATE;
719         info->swapped -= nr_swaps_freed;
720         if (nr_pages_to_free)
721                 shmem_free_blocks(inode, nr_pages_to_free);
722         shmem_recalc_inode(inode);
723         spin_unlock(&info->lock);
724
725         /*
726          * Empty swap vector directory pages to be freed?
727          */
728         if (!list_empty(&pages_to_free)) {
729                 pages_to_free.prev->next = NULL;
730                 shmem_free_pages(pages_to_free.next);
731         }
732 }
733
734 static void shmem_truncate(struct inode *inode)
735 {
736         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
737 }
738
739 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
740 {
741         struct inode *inode = dentry->d_inode;
742         struct page *page = NULL;
743         int error;
744
745         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
746                 if (attr->ia_size < inode->i_size) {
747                         /*
748                          * If truncating down to a partial page, then
749                          * if that page is already allocated, hold it
750                          * in memory until the truncation is over, so
751                          * truncate_partial_page cannnot miss it were
752                          * it assigned to swap.
753                          */
754                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
755                                 (void) shmem_getpage(inode,
756                                         attr->ia_size>>PAGE_CACHE_SHIFT,
757                                                 &page, SGP_READ, NULL);
758                                 if (page)
759                                         unlock_page(page);
760                         }
761                         /*
762                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
763                          * detect if any pages might have been added to cache
764                          * after truncate_inode_pages.  But we needn't bother
765                          * if it's being fully truncated to zero-length: the
766                          * nrpages check is efficient enough in that case.
767                          */
768                         if (attr->ia_size) {
769                                 struct shmem_inode_info *info = SHMEM_I(inode);
770                                 spin_lock(&info->lock);
771                                 info->flags &= ~SHMEM_PAGEIN;
772                                 spin_unlock(&info->lock);
773                         }
774                 }
775         }
776
777         error = inode_change_ok(inode, attr);
778         if (!error)
779                 error = inode_setattr(inode, attr);
780 #ifdef CONFIG_TMPFS_POSIX_ACL
781         if (!error && (attr->ia_valid & ATTR_MODE))
782                 error = generic_acl_chmod(inode, &shmem_acl_ops);
783 #endif
784         if (page)
785                 page_cache_release(page);
786         return error;
787 }
788
789 static void shmem_delete_inode(struct inode *inode)
790 {
791         struct shmem_inode_info *info = SHMEM_I(inode);
792
793         if (inode->i_op->truncate == shmem_truncate) {
794                 truncate_inode_pages(inode->i_mapping, 0);
795                 shmem_unacct_size(info->flags, inode->i_size);
796                 inode->i_size = 0;
797                 shmem_truncate(inode);
798                 if (!list_empty(&info->swaplist)) {
799                         mutex_lock(&shmem_swaplist_mutex);
800                         list_del_init(&info->swaplist);
801                         mutex_unlock(&shmem_swaplist_mutex);
802                 }
803         }
804         BUG_ON(inode->i_blocks);
805         shmem_free_inode(inode->i_sb);
806         clear_inode(inode);
807 }
808
809 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
810 {
811         swp_entry_t *ptr;
812
813         for (ptr = dir; ptr < edir; ptr++) {
814                 if (ptr->val == entry.val)
815                         return ptr - dir;
816         }
817         return -1;
818 }
819
820 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
821 {
822         struct inode *inode;
823         unsigned long idx;
824         unsigned long size;
825         unsigned long limit;
826         unsigned long stage;
827         struct page **dir;
828         struct page *subdir;
829         swp_entry_t *ptr;
830         int offset;
831         int error;
832
833         idx = 0;
834         ptr = info->i_direct;
835         spin_lock(&info->lock);
836         limit = info->next_index;
837         size = limit;
838         if (size > SHMEM_NR_DIRECT)
839                 size = SHMEM_NR_DIRECT;
840         offset = shmem_find_swp(entry, ptr, ptr+size);
841         if (offset >= 0)
842                 goto found;
843         if (!info->i_indirect)
844                 goto lost2;
845
846         dir = shmem_dir_map(info->i_indirect);
847         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
848
849         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
850                 if (unlikely(idx == stage)) {
851                         shmem_dir_unmap(dir-1);
852                         if (cond_resched_lock(&info->lock)) {
853                                 /* check it has not been truncated */
854                                 if (limit > info->next_index) {
855                                         limit = info->next_index;
856                                         if (idx >= limit)
857                                                 goto lost2;
858                                 }
859                         }
860                         dir = shmem_dir_map(info->i_indirect) +
861                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
862                         while (!*dir) {
863                                 dir++;
864                                 idx += ENTRIES_PER_PAGEPAGE;
865                                 if (idx >= limit)
866                                         goto lost1;
867                         }
868                         stage = idx + ENTRIES_PER_PAGEPAGE;
869                         subdir = *dir;
870                         shmem_dir_unmap(dir);
871                         dir = shmem_dir_map(subdir);
872                 }
873                 subdir = *dir;
874                 if (subdir && page_private(subdir)) {
875                         ptr = shmem_swp_map(subdir);
876                         size = limit - idx;
877                         if (size > ENTRIES_PER_PAGE)
878                                 size = ENTRIES_PER_PAGE;
879                         offset = shmem_find_swp(entry, ptr, ptr+size);
880                         shmem_swp_unmap(ptr);
881                         if (offset >= 0) {
882                                 shmem_dir_unmap(dir);
883                                 goto found;
884                         }
885                 }
886         }
887 lost1:
888         shmem_dir_unmap(dir-1);
889 lost2:
890         spin_unlock(&info->lock);
891         return 0;
892 found:
893         idx += offset;
894         inode = igrab(&info->vfs_inode);
895         spin_unlock(&info->lock);
896
897         /* move head to start search for next from here */
898         list_move_tail(&shmem_swaplist, &info->swaplist);
899         mutex_unlock(&shmem_swaplist_mutex);
900
901         error = 1;
902         if (!inode)
903                 goto out;
904
905         spin_lock(&info->lock);
906         ptr = shmem_swp_entry(info, idx, NULL);
907         if (ptr && ptr->val == entry.val)
908                 error = add_to_page_cache(page, inode->i_mapping,
909                                                 idx, GFP_ATOMIC);
910         if (error == -EEXIST) {
911                 struct page *filepage = find_get_page(inode->i_mapping, idx);
912                 error = 1;
913                 if (filepage) {
914                         /*
915                          * There might be a more uptodate page coming down
916                          * from a stacked writepage: forget our swappage if so.
917                          */
918                         if (PageUptodate(filepage))
919                                 error = 0;
920                         page_cache_release(filepage);
921                 }
922         }
923         if (!error) {
924                 delete_from_swap_cache(page);
925                 set_page_dirty(page);
926                 info->flags |= SHMEM_PAGEIN;
927                 shmem_swp_set(info, ptr, 0);
928                 swap_free(entry);
929                 error = 1;      /* not an error, but entry was found */
930         }
931         if (ptr)
932                 shmem_swp_unmap(ptr);
933         spin_unlock(&info->lock);
934 out:
935         unlock_page(page);
936         page_cache_release(page);
937         iput(inode);            /* allows for NULL */
938         return error;
939 }
940
941 /*
942  * shmem_unuse() search for an eventually swapped out shmem page.
943  */
944 int shmem_unuse(swp_entry_t entry, struct page *page)
945 {
946         struct list_head *p, *next;
947         struct shmem_inode_info *info;
948         int found = 0;
949
950         mutex_lock(&shmem_swaplist_mutex);
951         list_for_each_safe(p, next, &shmem_swaplist) {
952                 info = list_entry(p, struct shmem_inode_info, swaplist);
953                 if (info->swapped)
954                         found = shmem_unuse_inode(info, entry, page);
955                 else
956                         list_del_init(&info->swaplist);
957                 cond_resched();
958                 if (found)
959                         goto out;
960         }
961         mutex_unlock(&shmem_swaplist_mutex);
962 out:    return found;   /* 0 or 1 or -ENOMEM */
963 }
964
965 /*
966  * Move the page from the page cache to the swap cache.
967  */
968 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
969 {
970         struct shmem_inode_info *info;
971         swp_entry_t *entry, swap;
972         struct address_space *mapping;
973         unsigned long index;
974         struct inode *inode;
975
976         BUG_ON(!PageLocked(page));
977         mapping = page->mapping;
978         index = page->index;
979         inode = mapping->host;
980         info = SHMEM_I(inode);
981         if (info->flags & VM_LOCKED)
982                 goto redirty;
983         if (!total_swap_pages)
984                 goto redirty;
985
986         /*
987          * shmem_backing_dev_info's capabilities prevent regular writeback or
988          * sync from ever calling shmem_writepage; but a stacking filesystem
989          * may use the ->writepage of its underlying filesystem, in which case
990          * tmpfs should write out to swap only in response to memory pressure,
991          * and not for pdflush or sync.  However, in those cases, we do still
992          * want to check if there's a redundant swappage to be discarded.
993          */
994         if (wbc->for_reclaim)
995                 swap = get_swap_page();
996         else
997                 swap.val = 0;
998
999         spin_lock(&info->lock);
1000         if (index >= info->next_index) {
1001                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1002                 goto unlock;
1003         }
1004         entry = shmem_swp_entry(info, index, NULL);
1005         if (entry->val) {
1006                 /*
1007                  * The more uptodate page coming down from a stacked
1008                  * writepage should replace our old swappage.
1009                  */
1010                 free_swap_and_cache(*entry);
1011                 shmem_swp_set(info, entry, 0);
1012         }
1013         shmem_recalc_inode(inode);
1014
1015         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1016                 remove_from_page_cache(page);
1017                 shmem_swp_set(info, entry, swap.val);
1018                 shmem_swp_unmap(entry);
1019                 spin_unlock(&info->lock);
1020                 if (list_empty(&info->swaplist)) {
1021                         mutex_lock(&shmem_swaplist_mutex);
1022                         /* move instead of add in case we're racing */
1023                         list_move_tail(&info->swaplist, &shmem_swaplist);
1024                         mutex_unlock(&shmem_swaplist_mutex);
1025                 }
1026                 swap_duplicate(swap);
1027                 BUG_ON(page_mapped(page));
1028                 page_cache_release(page);       /* pagecache ref */
1029                 set_page_dirty(page);
1030                 unlock_page(page);
1031                 return 0;
1032         }
1033
1034         shmem_swp_unmap(entry);
1035 unlock:
1036         spin_unlock(&info->lock);
1037         swap_free(swap);
1038 redirty:
1039         set_page_dirty(page);
1040         if (wbc->for_reclaim)
1041                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1042         unlock_page(page);
1043         return 0;
1044 }
1045
1046 #ifdef CONFIG_NUMA
1047 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
1048 {
1049         char *nodelist = strchr(value, ':');
1050         int err = 1;
1051
1052         if (nodelist) {
1053                 /* NUL-terminate policy string */
1054                 *nodelist++ = '\0';
1055                 if (nodelist_parse(nodelist, *policy_nodes))
1056                         goto out;
1057                 if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
1058                         goto out;
1059         }
1060         if (!strcmp(value, "default")) {
1061                 *policy = MPOL_DEFAULT;
1062                 /* Don't allow a nodelist */
1063                 if (!nodelist)
1064                         err = 0;
1065         } else if (!strcmp(value, "prefer")) {
1066                 *policy = MPOL_PREFERRED;
1067                 /* Insist on a nodelist of one node only */
1068                 if (nodelist) {
1069                         char *rest = nodelist;
1070                         while (isdigit(*rest))
1071                                 rest++;
1072                         if (!*rest)
1073                                 err = 0;
1074                 }
1075         } else if (!strcmp(value, "bind")) {
1076                 *policy = MPOL_BIND;
1077                 /* Insist on a nodelist */
1078                 if (nodelist)
1079                         err = 0;
1080         } else if (!strcmp(value, "interleave")) {
1081                 *policy = MPOL_INTERLEAVE;
1082                 /*
1083                  * Default to online nodes with memory if no nodelist
1084                  */
1085                 if (!nodelist)
1086                         *policy_nodes = node_states[N_HIGH_MEMORY];
1087                 err = 0;
1088         }
1089 out:
1090         /* Restore string for error message */
1091         if (nodelist)
1092                 *--nodelist = ':';
1093         return err;
1094 }
1095
1096 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1097                         struct shmem_inode_info *info, unsigned long idx)
1098 {
1099         struct vm_area_struct pvma;
1100         struct page *page;
1101
1102         /* Create a pseudo vma that just contains the policy */
1103         pvma.vm_start = 0;
1104         pvma.vm_pgoff = idx;
1105         pvma.vm_ops = NULL;
1106         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1107         page = swapin_readahead(entry, gfp, &pvma, 0);
1108         mpol_free(pvma.vm_policy);
1109         return page;
1110 }
1111
1112 static struct page *shmem_alloc_page(gfp_t gfp,
1113                         struct shmem_inode_info *info, unsigned long idx)
1114 {
1115         struct vm_area_struct pvma;
1116         struct page *page;
1117
1118         /* Create a pseudo vma that just contains the policy */
1119         pvma.vm_start = 0;
1120         pvma.vm_pgoff = idx;
1121         pvma.vm_ops = NULL;
1122         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1123         page = alloc_page_vma(gfp, &pvma, 0);
1124         mpol_free(pvma.vm_policy);
1125         return page;
1126 }
1127 #else
1128 static inline int shmem_parse_mpol(char *value, int *policy,
1129                                                 nodemask_t *policy_nodes)
1130 {
1131         return 1;
1132 }
1133
1134 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1135                         struct shmem_inode_info *info, unsigned long idx)
1136 {
1137         return swapin_readahead(entry, gfp, NULL, 0);
1138 }
1139
1140 static inline struct page *shmem_alloc_page(gfp_t gfp,
1141                         struct shmem_inode_info *info, unsigned long idx)
1142 {
1143         return alloc_page(gfp);
1144 }
1145 #endif
1146
1147 /*
1148  * shmem_getpage - either get the page from swap or allocate a new one
1149  *
1150  * If we allocate a new one we do not mark it dirty. That's up to the
1151  * vm. If we swap it in we mark it dirty since we also free the swap
1152  * entry since a page cannot live in both the swap and page cache
1153  */
1154 static int shmem_getpage(struct inode *inode, unsigned long idx,
1155                         struct page **pagep, enum sgp_type sgp, int *type)
1156 {
1157         struct address_space *mapping = inode->i_mapping;
1158         struct shmem_inode_info *info = SHMEM_I(inode);
1159         struct shmem_sb_info *sbinfo;
1160         struct page *filepage = *pagep;
1161         struct page *swappage;
1162         swp_entry_t *entry;
1163         swp_entry_t swap;
1164         gfp_t gfp;
1165         int error;
1166
1167         if (idx >= SHMEM_MAX_INDEX)
1168                 return -EFBIG;
1169
1170         if (type)
1171                 *type = 0;
1172
1173         /*
1174          * Normally, filepage is NULL on entry, and either found
1175          * uptodate immediately, or allocated and zeroed, or read
1176          * in under swappage, which is then assigned to filepage.
1177          * But shmem_readpage (required for splice) passes in a locked
1178          * filepage, which may be found not uptodate by other callers
1179          * too, and may need to be copied from the swappage read in.
1180          */
1181 repeat:
1182         if (!filepage)
1183                 filepage = find_lock_page(mapping, idx);
1184         if (filepage && PageUptodate(filepage))
1185                 goto done;
1186         error = 0;
1187         gfp = mapping_gfp_mask(mapping);
1188
1189         spin_lock(&info->lock);
1190         shmem_recalc_inode(inode);
1191         entry = shmem_swp_alloc(info, idx, sgp);
1192         if (IS_ERR(entry)) {
1193                 spin_unlock(&info->lock);
1194                 error = PTR_ERR(entry);
1195                 goto failed;
1196         }
1197         swap = *entry;
1198
1199         if (swap.val) {
1200                 /* Look it up and read it in.. */
1201                 swappage = lookup_swap_cache(swap);
1202                 if (!swappage) {
1203                         shmem_swp_unmap(entry);
1204                         /* here we actually do the io */
1205                         if (type && !(*type & VM_FAULT_MAJOR)) {
1206                                 __count_vm_event(PGMAJFAULT);
1207                                 *type |= VM_FAULT_MAJOR;
1208                         }
1209                         spin_unlock(&info->lock);
1210                         swappage = shmem_swapin(swap, gfp, info, idx);
1211                         if (!swappage) {
1212                                 spin_lock(&info->lock);
1213                                 entry = shmem_swp_alloc(info, idx, sgp);
1214                                 if (IS_ERR(entry))
1215                                         error = PTR_ERR(entry);
1216                                 else {
1217                                         if (entry->val == swap.val)
1218                                                 error = -ENOMEM;
1219                                         shmem_swp_unmap(entry);
1220                                 }
1221                                 spin_unlock(&info->lock);
1222                                 if (error)
1223                                         goto failed;
1224                                 goto repeat;
1225                         }
1226                         wait_on_page_locked(swappage);
1227                         page_cache_release(swappage);
1228                         goto repeat;
1229                 }
1230
1231                 /* We have to do this with page locked to prevent races */
1232                 if (TestSetPageLocked(swappage)) {
1233                         shmem_swp_unmap(entry);
1234                         spin_unlock(&info->lock);
1235                         wait_on_page_locked(swappage);
1236                         page_cache_release(swappage);
1237                         goto repeat;
1238                 }
1239                 if (PageWriteback(swappage)) {
1240                         shmem_swp_unmap(entry);
1241                         spin_unlock(&info->lock);
1242                         wait_on_page_writeback(swappage);
1243                         unlock_page(swappage);
1244                         page_cache_release(swappage);
1245                         goto repeat;
1246                 }
1247                 if (!PageUptodate(swappage)) {
1248                         shmem_swp_unmap(entry);
1249                         spin_unlock(&info->lock);
1250                         unlock_page(swappage);
1251                         page_cache_release(swappage);
1252                         error = -EIO;
1253                         goto failed;
1254                 }
1255
1256                 if (filepage) {
1257                         shmem_swp_set(info, entry, 0);
1258                         shmem_swp_unmap(entry);
1259                         delete_from_swap_cache(swappage);
1260                         spin_unlock(&info->lock);
1261                         copy_highpage(filepage, swappage);
1262                         unlock_page(swappage);
1263                         page_cache_release(swappage);
1264                         flush_dcache_page(filepage);
1265                         SetPageUptodate(filepage);
1266                         set_page_dirty(filepage);
1267                         swap_free(swap);
1268                 } else if (!(error = add_to_page_cache(
1269                                 swappage, mapping, idx, GFP_ATOMIC))) {
1270                         info->flags |= SHMEM_PAGEIN;
1271                         shmem_swp_set(info, entry, 0);
1272                         shmem_swp_unmap(entry);
1273                         delete_from_swap_cache(swappage);
1274                         spin_unlock(&info->lock);
1275                         filepage = swappage;
1276                         set_page_dirty(filepage);
1277                         swap_free(swap);
1278                 } else {
1279                         shmem_swp_unmap(entry);
1280                         spin_unlock(&info->lock);
1281                         unlock_page(swappage);
1282                         page_cache_release(swappage);
1283                         if (error == -ENOMEM) {
1284                                 /* let kswapd refresh zone for GFP_ATOMICs */
1285                                 congestion_wait(WRITE, HZ/50);
1286                         }
1287                         goto repeat;
1288                 }
1289         } else if (sgp == SGP_READ && !filepage) {
1290                 shmem_swp_unmap(entry);
1291                 filepage = find_get_page(mapping, idx);
1292                 if (filepage &&
1293                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1294                         spin_unlock(&info->lock);
1295                         wait_on_page_locked(filepage);
1296                         page_cache_release(filepage);
1297                         filepage = NULL;
1298                         goto repeat;
1299                 }
1300                 spin_unlock(&info->lock);
1301         } else {
1302                 shmem_swp_unmap(entry);
1303                 sbinfo = SHMEM_SB(inode->i_sb);
1304                 if (sbinfo->max_blocks) {
1305                         spin_lock(&sbinfo->stat_lock);
1306                         if (sbinfo->free_blocks == 0 ||
1307                             shmem_acct_block(info->flags)) {
1308                                 spin_unlock(&sbinfo->stat_lock);
1309                                 spin_unlock(&info->lock);
1310                                 error = -ENOSPC;
1311                                 goto failed;
1312                         }
1313                         sbinfo->free_blocks--;
1314                         inode->i_blocks += BLOCKS_PER_PAGE;
1315                         spin_unlock(&sbinfo->stat_lock);
1316                 } else if (shmem_acct_block(info->flags)) {
1317                         spin_unlock(&info->lock);
1318                         error = -ENOSPC;
1319                         goto failed;
1320                 }
1321
1322                 if (!filepage) {
1323                         spin_unlock(&info->lock);
1324                         filepage = shmem_alloc_page(gfp, info, idx);
1325                         if (!filepage) {
1326                                 shmem_unacct_blocks(info->flags, 1);
1327                                 shmem_free_blocks(inode, 1);
1328                                 error = -ENOMEM;
1329                                 goto failed;
1330                         }
1331
1332                         spin_lock(&info->lock);
1333                         entry = shmem_swp_alloc(info, idx, sgp);
1334                         if (IS_ERR(entry))
1335                                 error = PTR_ERR(entry);
1336                         else {
1337                                 swap = *entry;
1338                                 shmem_swp_unmap(entry);
1339                         }
1340                         if (error || swap.val || 0 != add_to_page_cache_lru(
1341                                         filepage, mapping, idx, GFP_ATOMIC)) {
1342                                 spin_unlock(&info->lock);
1343                                 page_cache_release(filepage);
1344                                 shmem_unacct_blocks(info->flags, 1);
1345                                 shmem_free_blocks(inode, 1);
1346                                 filepage = NULL;
1347                                 if (error)
1348                                         goto failed;
1349                                 goto repeat;
1350                         }
1351                         info->flags |= SHMEM_PAGEIN;
1352                 }
1353
1354                 info->alloced++;
1355                 spin_unlock(&info->lock);
1356                 clear_highpage(filepage);
1357                 flush_dcache_page(filepage);
1358                 SetPageUptodate(filepage);
1359                 if (sgp == SGP_DIRTY)
1360                         set_page_dirty(filepage);
1361         }
1362 done:
1363         *pagep = filepage;
1364         return 0;
1365
1366 failed:
1367         if (*pagep != filepage) {
1368                 unlock_page(filepage);
1369                 page_cache_release(filepage);
1370         }
1371         return error;
1372 }
1373
1374 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1375 {
1376         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1377         int error;
1378         int ret;
1379
1380         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1381                 return VM_FAULT_SIGBUS;
1382
1383         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1384         if (error)
1385                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1386
1387         mark_page_accessed(vmf->page);
1388         return ret | VM_FAULT_LOCKED;
1389 }
1390
1391 #ifdef CONFIG_NUMA
1392 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1393 {
1394         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1395         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1396 }
1397
1398 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1399                                           unsigned long addr)
1400 {
1401         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1402         unsigned long idx;
1403
1404         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1405         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1406 }
1407 #endif
1408
1409 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1410 {
1411         struct inode *inode = file->f_path.dentry->d_inode;
1412         struct shmem_inode_info *info = SHMEM_I(inode);
1413         int retval = -ENOMEM;
1414
1415         spin_lock(&info->lock);
1416         if (lock && !(info->flags & VM_LOCKED)) {
1417                 if (!user_shm_lock(inode->i_size, user))
1418                         goto out_nomem;
1419                 info->flags |= VM_LOCKED;
1420         }
1421         if (!lock && (info->flags & VM_LOCKED) && user) {
1422                 user_shm_unlock(inode->i_size, user);
1423                 info->flags &= ~VM_LOCKED;
1424         }
1425         retval = 0;
1426 out_nomem:
1427         spin_unlock(&info->lock);
1428         return retval;
1429 }
1430
1431 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1432 {
1433         file_accessed(file);
1434         vma->vm_ops = &shmem_vm_ops;
1435         vma->vm_flags |= VM_CAN_NONLINEAR;
1436         return 0;
1437 }
1438
1439 static struct inode *
1440 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1441 {
1442         struct inode *inode;
1443         struct shmem_inode_info *info;
1444         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1445
1446         if (shmem_reserve_inode(sb))
1447                 return NULL;
1448
1449         inode = new_inode(sb);
1450         if (inode) {
1451                 inode->i_mode = mode;
1452                 inode->i_uid = current->fsuid;
1453                 inode->i_gid = current->fsgid;
1454                 inode->i_blocks = 0;
1455                 inode->i_mapping->a_ops = &shmem_aops;
1456                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1457                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1458                 inode->i_generation = get_seconds();
1459                 info = SHMEM_I(inode);
1460                 memset(info, 0, (char *)inode - (char *)info);
1461                 spin_lock_init(&info->lock);
1462                 INIT_LIST_HEAD(&info->swaplist);
1463
1464                 switch (mode & S_IFMT) {
1465                 default:
1466                         inode->i_op = &shmem_special_inode_operations;
1467                         init_special_inode(inode, mode, dev);
1468                         break;
1469                 case S_IFREG:
1470                         inode->i_op = &shmem_inode_operations;
1471                         inode->i_fop = &shmem_file_operations;
1472                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1473                                                         &sbinfo->policy_nodes);
1474                         break;
1475                 case S_IFDIR:
1476                         inc_nlink(inode);
1477                         /* Some things misbehave if size == 0 on a directory */
1478                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1479                         inode->i_op = &shmem_dir_inode_operations;
1480                         inode->i_fop = &simple_dir_operations;
1481                         break;
1482                 case S_IFLNK:
1483                         /*
1484                          * Must not load anything in the rbtree,
1485                          * mpol_free_shared_policy will not be called.
1486                          */
1487                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1488                                                 NULL);
1489                         break;
1490                 }
1491         } else
1492                 shmem_free_inode(sb);
1493         return inode;
1494 }
1495
1496 #ifdef CONFIG_TMPFS
1497 static const struct inode_operations shmem_symlink_inode_operations;
1498 static const struct inode_operations shmem_symlink_inline_operations;
1499
1500 /*
1501  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1502  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1503  * below the loop driver, in the generic fashion that many filesystems support.
1504  */
1505 static int shmem_readpage(struct file *file, struct page *page)
1506 {
1507         struct inode *inode = page->mapping->host;
1508         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1509         unlock_page(page);
1510         return error;
1511 }
1512
1513 static int
1514 shmem_write_begin(struct file *file, struct address_space *mapping,
1515                         loff_t pos, unsigned len, unsigned flags,
1516                         struct page **pagep, void **fsdata)
1517 {
1518         struct inode *inode = mapping->host;
1519         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1520         *pagep = NULL;
1521         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1522 }
1523
1524 static int
1525 shmem_write_end(struct file *file, struct address_space *mapping,
1526                         loff_t pos, unsigned len, unsigned copied,
1527                         struct page *page, void *fsdata)
1528 {
1529         struct inode *inode = mapping->host;
1530
1531         if (pos + copied > inode->i_size)
1532                 i_size_write(inode, pos + copied);
1533
1534         unlock_page(page);
1535         set_page_dirty(page);
1536         page_cache_release(page);
1537
1538         return copied;
1539 }
1540
1541 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1542 {
1543         struct inode *inode = filp->f_path.dentry->d_inode;
1544         struct address_space *mapping = inode->i_mapping;
1545         unsigned long index, offset;
1546         enum sgp_type sgp = SGP_READ;
1547
1548         /*
1549          * Might this read be for a stacking filesystem?  Then when reading
1550          * holes of a sparse file, we actually need to allocate those pages,
1551          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1552          */
1553         if (segment_eq(get_fs(), KERNEL_DS))
1554                 sgp = SGP_DIRTY;
1555
1556         index = *ppos >> PAGE_CACHE_SHIFT;
1557         offset = *ppos & ~PAGE_CACHE_MASK;
1558
1559         for (;;) {
1560                 struct page *page = NULL;
1561                 unsigned long end_index, nr, ret;
1562                 loff_t i_size = i_size_read(inode);
1563
1564                 end_index = i_size >> PAGE_CACHE_SHIFT;
1565                 if (index > end_index)
1566                         break;
1567                 if (index == end_index) {
1568                         nr = i_size & ~PAGE_CACHE_MASK;
1569                         if (nr <= offset)
1570                                 break;
1571                 }
1572
1573                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1574                 if (desc->error) {
1575                         if (desc->error == -EINVAL)
1576                                 desc->error = 0;
1577                         break;
1578                 }
1579                 if (page)
1580                         unlock_page(page);
1581
1582                 /*
1583                  * We must evaluate after, since reads (unlike writes)
1584                  * are called without i_mutex protection against truncate
1585                  */
1586                 nr = PAGE_CACHE_SIZE;
1587                 i_size = i_size_read(inode);
1588                 end_index = i_size >> PAGE_CACHE_SHIFT;
1589                 if (index == end_index) {
1590                         nr = i_size & ~PAGE_CACHE_MASK;
1591                         if (nr <= offset) {
1592                                 if (page)
1593                                         page_cache_release(page);
1594                                 break;
1595                         }
1596                 }
1597                 nr -= offset;
1598
1599                 if (page) {
1600                         /*
1601                          * If users can be writing to this page using arbitrary
1602                          * virtual addresses, take care about potential aliasing
1603                          * before reading the page on the kernel side.
1604                          */
1605                         if (mapping_writably_mapped(mapping))
1606                                 flush_dcache_page(page);
1607                         /*
1608                          * Mark the page accessed if we read the beginning.
1609                          */
1610                         if (!offset)
1611                                 mark_page_accessed(page);
1612                 } else {
1613                         page = ZERO_PAGE(0);
1614                         page_cache_get(page);
1615                 }
1616
1617                 /*
1618                  * Ok, we have the page, and it's up-to-date, so
1619                  * now we can copy it to user space...
1620                  *
1621                  * The actor routine returns how many bytes were actually used..
1622                  * NOTE! This may not be the same as how much of a user buffer
1623                  * we filled up (we may be padding etc), so we can only update
1624                  * "pos" here (the actor routine has to update the user buffer
1625                  * pointers and the remaining count).
1626                  */
1627                 ret = actor(desc, page, offset, nr);
1628                 offset += ret;
1629                 index += offset >> PAGE_CACHE_SHIFT;
1630                 offset &= ~PAGE_CACHE_MASK;
1631
1632                 page_cache_release(page);
1633                 if (ret != nr || !desc->count)
1634                         break;
1635
1636                 cond_resched();
1637         }
1638
1639         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1640         file_accessed(filp);
1641 }
1642
1643 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1644 {
1645         read_descriptor_t desc;
1646
1647         if ((ssize_t) count < 0)
1648                 return -EINVAL;
1649         if (!access_ok(VERIFY_WRITE, buf, count))
1650                 return -EFAULT;
1651         if (!count)
1652                 return 0;
1653
1654         desc.written = 0;
1655         desc.count = count;
1656         desc.arg.buf = buf;
1657         desc.error = 0;
1658
1659         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1660         if (desc.written)
1661                 return desc.written;
1662         return desc.error;
1663 }
1664
1665 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1666 {
1667         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1668
1669         buf->f_type = TMPFS_MAGIC;
1670         buf->f_bsize = PAGE_CACHE_SIZE;
1671         buf->f_namelen = NAME_MAX;
1672         spin_lock(&sbinfo->stat_lock);
1673         if (sbinfo->max_blocks) {
1674                 buf->f_blocks = sbinfo->max_blocks;
1675                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1676         }
1677         if (sbinfo->max_inodes) {
1678                 buf->f_files = sbinfo->max_inodes;
1679                 buf->f_ffree = sbinfo->free_inodes;
1680         }
1681         /* else leave those fields 0 like simple_statfs */
1682         spin_unlock(&sbinfo->stat_lock);
1683         return 0;
1684 }
1685
1686 /*
1687  * File creation. Allocate an inode, and we're done..
1688  */
1689 static int
1690 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1691 {
1692         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1693         int error = -ENOSPC;
1694
1695         if (inode) {
1696                 error = security_inode_init_security(inode, dir, NULL, NULL,
1697                                                      NULL);
1698                 if (error) {
1699                         if (error != -EOPNOTSUPP) {
1700                                 iput(inode);
1701                                 return error;
1702                         }
1703                 }
1704                 error = shmem_acl_init(inode, dir);
1705                 if (error) {
1706                         iput(inode);
1707                         return error;
1708                 }
1709                 if (dir->i_mode & S_ISGID) {
1710                         inode->i_gid = dir->i_gid;
1711                         if (S_ISDIR(mode))
1712                                 inode->i_mode |= S_ISGID;
1713                 }
1714                 dir->i_size += BOGO_DIRENT_SIZE;
1715                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1716                 d_instantiate(dentry, inode);
1717                 dget(dentry); /* Extra count - pin the dentry in core */
1718         }
1719         return error;
1720 }
1721
1722 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1723 {
1724         int error;
1725
1726         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1727                 return error;
1728         inc_nlink(dir);
1729         return 0;
1730 }
1731
1732 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1733                 struct nameidata *nd)
1734 {
1735         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1736 }
1737
1738 /*
1739  * Link a file..
1740  */
1741 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1742 {
1743         struct inode *inode = old_dentry->d_inode;
1744         int ret;
1745
1746         /*
1747          * No ordinary (disk based) filesystem counts links as inodes;
1748          * but each new link needs a new dentry, pinning lowmem, and
1749          * tmpfs dentries cannot be pruned until they are unlinked.
1750          */
1751         ret = shmem_reserve_inode(inode->i_sb);
1752         if (ret)
1753                 goto out;
1754
1755         dir->i_size += BOGO_DIRENT_SIZE;
1756         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1757         inc_nlink(inode);
1758         atomic_inc(&inode->i_count);    /* New dentry reference */
1759         dget(dentry);           /* Extra pinning count for the created dentry */
1760         d_instantiate(dentry, inode);
1761 out:
1762         return ret;
1763 }
1764
1765 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1766 {
1767         struct inode *inode = dentry->d_inode;
1768
1769         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1770                 shmem_free_inode(inode->i_sb);
1771
1772         dir->i_size -= BOGO_DIRENT_SIZE;
1773         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1774         drop_nlink(inode);
1775         dput(dentry);   /* Undo the count from "create" - this does all the work */
1776         return 0;
1777 }
1778
1779 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1780 {
1781         if (!simple_empty(dentry))
1782                 return -ENOTEMPTY;
1783
1784         drop_nlink(dentry->d_inode);
1785         drop_nlink(dir);
1786         return shmem_unlink(dir, dentry);
1787 }
1788
1789 /*
1790  * The VFS layer already does all the dentry stuff for rename,
1791  * we just have to decrement the usage count for the target if
1792  * it exists so that the VFS layer correctly free's it when it
1793  * gets overwritten.
1794  */
1795 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1796 {
1797         struct inode *inode = old_dentry->d_inode;
1798         int they_are_dirs = S_ISDIR(inode->i_mode);
1799
1800         if (!simple_empty(new_dentry))
1801                 return -ENOTEMPTY;
1802
1803         if (new_dentry->d_inode) {
1804                 (void) shmem_unlink(new_dir, new_dentry);
1805                 if (they_are_dirs)
1806                         drop_nlink(old_dir);
1807         } else if (they_are_dirs) {
1808                 drop_nlink(old_dir);
1809                 inc_nlink(new_dir);
1810         }
1811
1812         old_dir->i_size -= BOGO_DIRENT_SIZE;
1813         new_dir->i_size += BOGO_DIRENT_SIZE;
1814         old_dir->i_ctime = old_dir->i_mtime =
1815         new_dir->i_ctime = new_dir->i_mtime =
1816         inode->i_ctime = CURRENT_TIME;
1817         return 0;
1818 }
1819
1820 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1821 {
1822         int error;
1823         int len;
1824         struct inode *inode;
1825         struct page *page = NULL;
1826         char *kaddr;
1827         struct shmem_inode_info *info;
1828
1829         len = strlen(symname) + 1;
1830         if (len > PAGE_CACHE_SIZE)
1831                 return -ENAMETOOLONG;
1832
1833         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1834         if (!inode)
1835                 return -ENOSPC;
1836
1837         error = security_inode_init_security(inode, dir, NULL, NULL,
1838                                              NULL);
1839         if (error) {
1840                 if (error != -EOPNOTSUPP) {
1841                         iput(inode);
1842                         return error;
1843                 }
1844                 error = 0;
1845         }
1846
1847         info = SHMEM_I(inode);
1848         inode->i_size = len-1;
1849         if (len <= (char *)inode - (char *)info) {
1850                 /* do it inline */
1851                 memcpy(info, symname, len);
1852                 inode->i_op = &shmem_symlink_inline_operations;
1853         } else {
1854                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1855                 if (error) {
1856                         iput(inode);
1857                         return error;
1858                 }
1859                 unlock_page(page);
1860                 inode->i_op = &shmem_symlink_inode_operations;
1861                 kaddr = kmap_atomic(page, KM_USER0);
1862                 memcpy(kaddr, symname, len);
1863                 kunmap_atomic(kaddr, KM_USER0);
1864                 set_page_dirty(page);
1865                 page_cache_release(page);
1866         }
1867         if (dir->i_mode & S_ISGID)
1868                 inode->i_gid = dir->i_gid;
1869         dir->i_size += BOGO_DIRENT_SIZE;
1870         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1871         d_instantiate(dentry, inode);
1872         dget(dentry);
1873         return 0;
1874 }
1875
1876 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1877 {
1878         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1879         return NULL;
1880 }
1881
1882 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1883 {
1884         struct page *page = NULL;
1885         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1886         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1887         if (page)
1888                 unlock_page(page);
1889         return page;
1890 }
1891
1892 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1893 {
1894         if (!IS_ERR(nd_get_link(nd))) {
1895                 struct page *page = cookie;
1896                 kunmap(page);
1897                 mark_page_accessed(page);
1898                 page_cache_release(page);
1899         }
1900 }
1901
1902 static const struct inode_operations shmem_symlink_inline_operations = {
1903         .readlink       = generic_readlink,
1904         .follow_link    = shmem_follow_link_inline,
1905 };
1906
1907 static const struct inode_operations shmem_symlink_inode_operations = {
1908         .truncate       = shmem_truncate,
1909         .readlink       = generic_readlink,
1910         .follow_link    = shmem_follow_link,
1911         .put_link       = shmem_put_link,
1912 };
1913
1914 #ifdef CONFIG_TMPFS_POSIX_ACL
1915 /**
1916  * Superblocks without xattr inode operations will get security.* xattr
1917  * support from the VFS "for free". As soon as we have any other xattrs
1918  * like ACLs, we also need to implement the security.* handlers at
1919  * filesystem level, though.
1920  */
1921
1922 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1923                                         size_t list_len, const char *name,
1924                                         size_t name_len)
1925 {
1926         return security_inode_listsecurity(inode, list, list_len);
1927 }
1928
1929 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1930                                     void *buffer, size_t size)
1931 {
1932         if (strcmp(name, "") == 0)
1933                 return -EINVAL;
1934         return security_inode_getsecurity(inode, name, buffer, size,
1935                                           -EOPNOTSUPP);
1936 }
1937
1938 static int shmem_xattr_security_set(struct inode *inode, const char *name,
1939                                     const void *value, size_t size, int flags)
1940 {
1941         if (strcmp(name, "") == 0)
1942                 return -EINVAL;
1943         return security_inode_setsecurity(inode, name, value, size, flags);
1944 }
1945
1946 static struct xattr_handler shmem_xattr_security_handler = {
1947         .prefix = XATTR_SECURITY_PREFIX,
1948         .list   = shmem_xattr_security_list,
1949         .get    = shmem_xattr_security_get,
1950         .set    = shmem_xattr_security_set,
1951 };
1952
1953 static struct xattr_handler *shmem_xattr_handlers[] = {
1954         &shmem_xattr_acl_access_handler,
1955         &shmem_xattr_acl_default_handler,
1956         &shmem_xattr_security_handler,
1957         NULL
1958 };
1959 #endif
1960
1961 static struct dentry *shmem_get_parent(struct dentry *child)
1962 {
1963         return ERR_PTR(-ESTALE);
1964 }
1965
1966 static int shmem_match(struct inode *ino, void *vfh)
1967 {
1968         __u32 *fh = vfh;
1969         __u64 inum = fh[2];
1970         inum = (inum << 32) | fh[1];
1971         return ino->i_ino == inum && fh[0] == ino->i_generation;
1972 }
1973
1974 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1975                 struct fid *fid, int fh_len, int fh_type)
1976 {
1977         struct inode *inode;
1978         struct dentry *dentry = NULL;
1979         u64 inum = fid->raw[2];
1980         inum = (inum << 32) | fid->raw[1];
1981
1982         if (fh_len < 3)
1983                 return NULL;
1984
1985         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1986                         shmem_match, fid->raw);
1987         if (inode) {
1988                 dentry = d_find_alias(inode);
1989                 iput(inode);
1990         }
1991
1992         return dentry;
1993 }
1994
1995 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1996                                 int connectable)
1997 {
1998         struct inode *inode = dentry->d_inode;
1999
2000         if (*len < 3)
2001                 return 255;
2002
2003         if (hlist_unhashed(&inode->i_hash)) {
2004                 /* Unfortunately insert_inode_hash is not idempotent,
2005                  * so as we hash inodes here rather than at creation
2006                  * time, we need a lock to ensure we only try
2007                  * to do it once
2008                  */
2009                 static DEFINE_SPINLOCK(lock);
2010                 spin_lock(&lock);
2011                 if (hlist_unhashed(&inode->i_hash))
2012                         __insert_inode_hash(inode,
2013                                             inode->i_ino + inode->i_generation);
2014                 spin_unlock(&lock);
2015         }
2016
2017         fh[0] = inode->i_generation;
2018         fh[1] = inode->i_ino;
2019         fh[2] = ((__u64)inode->i_ino) >> 32;
2020
2021         *len = 3;
2022         return 1;
2023 }
2024
2025 static const struct export_operations shmem_export_ops = {
2026         .get_parent     = shmem_get_parent,
2027         .encode_fh      = shmem_encode_fh,
2028         .fh_to_dentry   = shmem_fh_to_dentry,
2029 };
2030
2031 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2032         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2033         int *policy, nodemask_t *policy_nodes)
2034 {
2035         char *this_char, *value, *rest;
2036
2037         while (options != NULL) {
2038                 this_char = options;
2039                 for (;;) {
2040                         /*
2041                          * NUL-terminate this option: unfortunately,
2042                          * mount options form a comma-separated list,
2043                          * but mpol's nodelist may also contain commas.
2044                          */
2045                         options = strchr(options, ',');
2046                         if (options == NULL)
2047                                 break;
2048                         options++;
2049                         if (!isdigit(*options)) {
2050                                 options[-1] = '\0';
2051                                 break;
2052                         }
2053                 }
2054                 if (!*this_char)
2055                         continue;
2056                 if ((value = strchr(this_char,'=')) != NULL) {
2057                         *value++ = 0;
2058                 } else {
2059                         printk(KERN_ERR
2060                             "tmpfs: No value for mount option '%s'\n",
2061                             this_char);
2062                         return 1;
2063                 }
2064
2065                 if (!strcmp(this_char,"size")) {
2066                         unsigned long long size;
2067                         size = memparse(value,&rest);
2068                         if (*rest == '%') {
2069                                 size <<= PAGE_SHIFT;
2070                                 size *= totalram_pages;
2071                                 do_div(size, 100);
2072                                 rest++;
2073                         }
2074                         if (*rest)
2075                                 goto bad_val;
2076                         *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2077                 } else if (!strcmp(this_char,"nr_blocks")) {
2078                         *blocks = memparse(value,&rest);
2079                         if (*rest)
2080                                 goto bad_val;
2081                 } else if (!strcmp(this_char,"nr_inodes")) {
2082                         *inodes = memparse(value,&rest);
2083                         if (*rest)
2084                                 goto bad_val;
2085                 } else if (!strcmp(this_char,"mode")) {
2086                         if (!mode)
2087                                 continue;
2088                         *mode = simple_strtoul(value,&rest,8);
2089                         if (*rest)
2090                                 goto bad_val;
2091                 } else if (!strcmp(this_char,"uid")) {
2092                         if (!uid)
2093                                 continue;
2094                         *uid = simple_strtoul(value,&rest,0);
2095                         if (*rest)
2096                                 goto bad_val;
2097                 } else if (!strcmp(this_char,"gid")) {
2098                         if (!gid)
2099                                 continue;
2100                         *gid = simple_strtoul(value,&rest,0);
2101                         if (*rest)
2102                                 goto bad_val;
2103                 } else if (!strcmp(this_char,"mpol")) {
2104                         if (shmem_parse_mpol(value,policy,policy_nodes))
2105                                 goto bad_val;
2106                 } else {
2107                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2108                                this_char);
2109                         return 1;
2110                 }
2111         }
2112         return 0;
2113
2114 bad_val:
2115         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2116                value, this_char);
2117         return 1;
2118
2119 }
2120
2121 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2122 {
2123         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2124         unsigned long max_blocks = sbinfo->max_blocks;
2125         unsigned long max_inodes = sbinfo->max_inodes;
2126         int policy = sbinfo->policy;
2127         nodemask_t policy_nodes = sbinfo->policy_nodes;
2128         unsigned long blocks;
2129         unsigned long inodes;
2130         int error = -EINVAL;
2131
2132         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2133                                 &max_inodes, &policy, &policy_nodes))
2134                 return error;
2135
2136         spin_lock(&sbinfo->stat_lock);
2137         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2138         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2139         if (max_blocks < blocks)
2140                 goto out;
2141         if (max_inodes < inodes)
2142                 goto out;
2143         /*
2144          * Those tests also disallow limited->unlimited while any are in
2145          * use, so i_blocks will always be zero when max_blocks is zero;
2146          * but we must separately disallow unlimited->limited, because
2147          * in that case we have no record of how much is already in use.
2148          */
2149         if (max_blocks && !sbinfo->max_blocks)
2150                 goto out;
2151         if (max_inodes && !sbinfo->max_inodes)
2152                 goto out;
2153
2154         error = 0;
2155         sbinfo->max_blocks  = max_blocks;
2156         sbinfo->free_blocks = max_blocks - blocks;
2157         sbinfo->max_inodes  = max_inodes;
2158         sbinfo->free_inodes = max_inodes - inodes;
2159         sbinfo->policy = policy;
2160         sbinfo->policy_nodes = policy_nodes;
2161 out:
2162         spin_unlock(&sbinfo->stat_lock);
2163         return error;
2164 }
2165 #endif
2166
2167 static void shmem_put_super(struct super_block *sb)
2168 {
2169         kfree(sb->s_fs_info);
2170         sb->s_fs_info = NULL;
2171 }
2172
2173 static int shmem_fill_super(struct super_block *sb,
2174                             void *data, int silent)
2175 {
2176         struct inode *inode;
2177         struct dentry *root;
2178         int mode   = S_IRWXUGO | S_ISVTX;
2179         uid_t uid = current->fsuid;
2180         gid_t gid = current->fsgid;
2181         int err = -ENOMEM;
2182         struct shmem_sb_info *sbinfo;
2183         unsigned long blocks = 0;
2184         unsigned long inodes = 0;
2185         int policy = MPOL_DEFAULT;
2186         nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
2187
2188 #ifdef CONFIG_TMPFS
2189         /*
2190          * Per default we only allow half of the physical ram per
2191          * tmpfs instance, limiting inodes to one per page of lowmem;
2192          * but the internal instance is left unlimited.
2193          */
2194         if (!(sb->s_flags & MS_NOUSER)) {
2195                 blocks = totalram_pages / 2;
2196                 inodes = totalram_pages - totalhigh_pages;
2197                 if (inodes > blocks)
2198                         inodes = blocks;
2199                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2200                                         &inodes, &policy, &policy_nodes))
2201                         return -EINVAL;
2202         }
2203         sb->s_export_op = &shmem_export_ops;
2204 #else
2205         sb->s_flags |= MS_NOUSER;
2206 #endif
2207
2208         /* Round up to L1_CACHE_BYTES to resist false sharing */
2209         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2210                                 L1_CACHE_BYTES), GFP_KERNEL);
2211         if (!sbinfo)
2212                 return -ENOMEM;
2213
2214         spin_lock_init(&sbinfo->stat_lock);
2215         sbinfo->max_blocks = blocks;
2216         sbinfo->free_blocks = blocks;
2217         sbinfo->max_inodes = inodes;
2218         sbinfo->free_inodes = inodes;
2219         sbinfo->policy = policy;
2220         sbinfo->policy_nodes = policy_nodes;
2221
2222         sb->s_fs_info = sbinfo;
2223         sb->s_maxbytes = SHMEM_MAX_BYTES;
2224         sb->s_blocksize = PAGE_CACHE_SIZE;
2225         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2226         sb->s_magic = TMPFS_MAGIC;
2227         sb->s_op = &shmem_ops;
2228         sb->s_time_gran = 1;
2229 #ifdef CONFIG_TMPFS_POSIX_ACL
2230         sb->s_xattr = shmem_xattr_handlers;
2231         sb->s_flags |= MS_POSIXACL;
2232 #endif
2233
2234         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2235         if (!inode)
2236                 goto failed;
2237         inode->i_uid = uid;
2238         inode->i_gid = gid;
2239         root = d_alloc_root(inode);
2240         if (!root)
2241                 goto failed_iput;
2242         sb->s_root = root;
2243         return 0;
2244
2245 failed_iput:
2246         iput(inode);
2247 failed:
2248         shmem_put_super(sb);
2249         return err;
2250 }
2251
2252 static struct kmem_cache *shmem_inode_cachep;
2253
2254 static struct inode *shmem_alloc_inode(struct super_block *sb)
2255 {
2256         struct shmem_inode_info *p;
2257         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2258         if (!p)
2259                 return NULL;
2260         return &p->vfs_inode;
2261 }
2262
2263 static void shmem_destroy_inode(struct inode *inode)
2264 {
2265         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2266                 /* only struct inode is valid if it's an inline symlink */
2267                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2268         }
2269         shmem_acl_destroy_inode(inode);
2270         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2271 }
2272
2273 static void init_once(struct kmem_cache *cachep, void *foo)
2274 {
2275         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2276
2277         inode_init_once(&p->vfs_inode);
2278 #ifdef CONFIG_TMPFS_POSIX_ACL
2279         p->i_acl = NULL;
2280         p->i_default_acl = NULL;
2281 #endif
2282 }
2283
2284 static int init_inodecache(void)
2285 {
2286         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2287                                 sizeof(struct shmem_inode_info),
2288                                 0, SLAB_PANIC, init_once);
2289         return 0;
2290 }
2291
2292 static void destroy_inodecache(void)
2293 {
2294         kmem_cache_destroy(shmem_inode_cachep);
2295 }
2296
2297 static const struct address_space_operations shmem_aops = {
2298         .writepage      = shmem_writepage,
2299         .set_page_dirty = __set_page_dirty_no_writeback,
2300 #ifdef CONFIG_TMPFS
2301         .readpage       = shmem_readpage,
2302         .write_begin    = shmem_write_begin,
2303         .write_end      = shmem_write_end,
2304 #endif
2305         .migratepage    = migrate_page,
2306 };
2307
2308 static const struct file_operations shmem_file_operations = {
2309         .mmap           = shmem_mmap,
2310 #ifdef CONFIG_TMPFS
2311         .llseek         = generic_file_llseek,
2312         .read           = shmem_file_read,
2313         .write          = do_sync_write,
2314         .aio_write      = generic_file_aio_write,
2315         .fsync          = simple_sync_file,
2316         .splice_read    = generic_file_splice_read,
2317         .splice_write   = generic_file_splice_write,
2318 #endif
2319 };
2320
2321 static const struct inode_operations shmem_inode_operations = {
2322         .truncate       = shmem_truncate,
2323         .setattr        = shmem_notify_change,
2324         .truncate_range = shmem_truncate_range,
2325 #ifdef CONFIG_TMPFS_POSIX_ACL
2326         .setxattr       = generic_setxattr,
2327         .getxattr       = generic_getxattr,
2328         .listxattr      = generic_listxattr,
2329         .removexattr    = generic_removexattr,
2330         .permission     = shmem_permission,
2331 #endif
2332
2333 };
2334
2335 static const struct inode_operations shmem_dir_inode_operations = {
2336 #ifdef CONFIG_TMPFS
2337         .create         = shmem_create,
2338         .lookup         = simple_lookup,
2339         .link           = shmem_link,
2340         .unlink         = shmem_unlink,
2341         .symlink        = shmem_symlink,
2342         .mkdir          = shmem_mkdir,
2343         .rmdir          = shmem_rmdir,
2344         .mknod          = shmem_mknod,
2345         .rename         = shmem_rename,
2346 #endif
2347 #ifdef CONFIG_TMPFS_POSIX_ACL
2348         .setattr        = shmem_notify_change,
2349         .setxattr       = generic_setxattr,
2350         .getxattr       = generic_getxattr,
2351         .listxattr      = generic_listxattr,
2352         .removexattr    = generic_removexattr,
2353         .permission     = shmem_permission,
2354 #endif
2355 };
2356
2357 static const struct inode_operations shmem_special_inode_operations = {
2358 #ifdef CONFIG_TMPFS_POSIX_ACL
2359         .setattr        = shmem_notify_change,
2360         .setxattr       = generic_setxattr,
2361         .getxattr       = generic_getxattr,
2362         .listxattr      = generic_listxattr,
2363         .removexattr    = generic_removexattr,
2364         .permission     = shmem_permission,
2365 #endif
2366 };
2367
2368 static const struct super_operations shmem_ops = {
2369         .alloc_inode    = shmem_alloc_inode,
2370         .destroy_inode  = shmem_destroy_inode,
2371 #ifdef CONFIG_TMPFS
2372         .statfs         = shmem_statfs,
2373         .remount_fs     = shmem_remount_fs,
2374 #endif
2375         .delete_inode   = shmem_delete_inode,
2376         .drop_inode     = generic_delete_inode,
2377         .put_super      = shmem_put_super,
2378 };
2379
2380 static struct vm_operations_struct shmem_vm_ops = {
2381         .fault          = shmem_fault,
2382 #ifdef CONFIG_NUMA
2383         .set_policy     = shmem_set_policy,
2384         .get_policy     = shmem_get_policy,
2385 #endif
2386 };
2387
2388
2389 static int shmem_get_sb(struct file_system_type *fs_type,
2390         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2391 {
2392         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2393 }
2394
2395 static struct file_system_type tmpfs_fs_type = {
2396         .owner          = THIS_MODULE,
2397         .name           = "tmpfs",
2398         .get_sb         = shmem_get_sb,
2399         .kill_sb        = kill_litter_super,
2400 };
2401 static struct vfsmount *shm_mnt;
2402
2403 static int __init init_tmpfs(void)
2404 {
2405         int error;
2406
2407         error = bdi_init(&shmem_backing_dev_info);
2408         if (error)
2409                 goto out4;
2410
2411         error = init_inodecache();
2412         if (error)
2413                 goto out3;
2414
2415         error = register_filesystem(&tmpfs_fs_type);
2416         if (error) {
2417                 printk(KERN_ERR "Could not register tmpfs\n");
2418                 goto out2;
2419         }
2420
2421         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2422                                 tmpfs_fs_type.name, NULL);
2423         if (IS_ERR(shm_mnt)) {
2424                 error = PTR_ERR(shm_mnt);
2425                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2426                 goto out1;
2427         }
2428         return 0;
2429
2430 out1:
2431         unregister_filesystem(&tmpfs_fs_type);
2432 out2:
2433         destroy_inodecache();
2434 out3:
2435         bdi_destroy(&shmem_backing_dev_info);
2436 out4:
2437         shm_mnt = ERR_PTR(error);
2438         return error;
2439 }
2440 module_init(init_tmpfs)
2441
2442 /*
2443  * shmem_file_setup - get an unlinked file living in tmpfs
2444  *
2445  * @name: name for dentry (to be seen in /proc/<pid>/maps
2446  * @size: size to be set for the file
2447  *
2448  */
2449 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2450 {
2451         int error;
2452         struct file *file;
2453         struct inode *inode;
2454         struct dentry *dentry, *root;
2455         struct qstr this;
2456
2457         if (IS_ERR(shm_mnt))
2458                 return (void *)shm_mnt;
2459
2460         if (size < 0 || size > SHMEM_MAX_BYTES)
2461                 return ERR_PTR(-EINVAL);
2462
2463         if (shmem_acct_size(flags, size))
2464                 return ERR_PTR(-ENOMEM);
2465
2466         error = -ENOMEM;
2467         this.name = name;
2468         this.len = strlen(name);
2469         this.hash = 0; /* will go */
2470         root = shm_mnt->mnt_root;
2471         dentry = d_alloc(root, &this);
2472         if (!dentry)
2473                 goto put_memory;
2474
2475         error = -ENFILE;
2476         file = get_empty_filp();
2477         if (!file)
2478                 goto put_dentry;
2479
2480         error = -ENOSPC;
2481         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2482         if (!inode)
2483                 goto close_file;
2484
2485         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2486         d_instantiate(dentry, inode);
2487         inode->i_size = size;
2488         inode->i_nlink = 0;     /* It is unlinked */
2489         init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2490                         &shmem_file_operations);
2491         return file;
2492
2493 close_file:
2494         put_filp(file);
2495 put_dentry:
2496         dput(dentry);
2497 put_memory:
2498         shmem_unacct_size(flags, size);
2499         return ERR_PTR(error);
2500 }
2501
2502 /*
2503  * shmem_zero_setup - setup a shared anonymous mapping
2504  *
2505  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2506  */
2507 int shmem_zero_setup(struct vm_area_struct *vma)
2508 {
2509         struct file *file;
2510         loff_t size = vma->vm_end - vma->vm_start;
2511
2512         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2513         if (IS_ERR(file))
2514                 return PTR_ERR(file);
2515
2516         if (vma->vm_file)
2517                 fput(vma->vm_file);
2518         vma->vm_file = file;
2519         vma->vm_ops = &shmem_vm_ops;
2520         return 0;
2521 }