e49181d9d893734dade8f924ef599c0c7123e9cf
[safe/jmp/linux-2.6] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * This file is released under the GPL.
18  */
19
20 /*
21  * This virtual memory filesystem is heavily based on the ramfs. It
22  * extends ramfs by the ability to use swap and honor resource limits
23  * which makes it a completely usable filesystem.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/xattr.h>
30 #include <linux/generic_acl.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/swap.h>
35 #include <linux/pagemap.h>
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/backing-dev.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/mount.h>
41 #include <linux/writeback.h>
42 #include <linux/vfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/security.h>
45 #include <linux/swapops.h>
46 #include <linux/mempolicy.h>
47 #include <linux/namei.h>
48 #include <linux/ctype.h>
49 #include <linux/migrate.h>
50 #include <linux/highmem.h>
51 #include <linux/backing-dev.h>
52
53 #include <asm/uaccess.h>
54 #include <asm/div64.h>
55 #include <asm/pgtable.h>
56
57 /* This magic number is used in glibc for posix shared memory */
58 #define TMPFS_MAGIC     0x01021994
59
60 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61 #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64 #define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65 #define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70 #define SHMEM_PAGEIN     VM_READ
71 #define SHMEM_TRUNCATE   VM_WRITE
72
73 /* Definition to limit shmem_truncate's steps between cond_rescheds */
74 #define LATENCY_LIMIT    64
75
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78
79 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80 enum sgp_type {
81         SGP_QUICK,      /* don't try more than file page cache lookup */
82         SGP_READ,       /* don't exceed i_size, don't allocate page */
83         SGP_CACHE,      /* don't exceed i_size, may allocate page */
84         SGP_WRITE,      /* may exceed i_size, may allocate page */
85 };
86
87 static int shmem_getpage(struct inode *inode, unsigned long idx,
88                          struct page **pagep, enum sgp_type sgp, int *type);
89
90 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
91 {
92         /*
93          * The above definition of ENTRIES_PER_PAGE, and the use of
94          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95          * might be reconsidered if it ever diverges from PAGE_SIZE.
96          *
97          * __GFP_MOVABLE is masked out as swap vectors cannot move
98          */
99         return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
100                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
101 }
102
103 static inline void shmem_dir_free(struct page *page)
104 {
105         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
106 }
107
108 static struct page **shmem_dir_map(struct page *page)
109 {
110         return (struct page **)kmap_atomic(page, KM_USER0);
111 }
112
113 static inline void shmem_dir_unmap(struct page **dir)
114 {
115         kunmap_atomic(dir, KM_USER0);
116 }
117
118 static swp_entry_t *shmem_swp_map(struct page *page)
119 {
120         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
121 }
122
123 static inline void shmem_swp_balance_unmap(void)
124 {
125         /*
126          * When passing a pointer to an i_direct entry, to code which
127          * also handles indirect entries and so will shmem_swp_unmap,
128          * we must arrange for the preempt count to remain in balance.
129          * What kmap_atomic of a lowmem page does depends on config
130          * and architecture, so pretend to kmap_atomic some lowmem page.
131          */
132         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
133 }
134
135 static inline void shmem_swp_unmap(swp_entry_t *entry)
136 {
137         kunmap_atomic(entry, KM_USER1);
138 }
139
140 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
141 {
142         return sb->s_fs_info;
143 }
144
145 /*
146  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
147  * for shared memory and for shared anonymous (/dev/zero) mappings
148  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
149  * consistent with the pre-accounting of private mappings ...
150  */
151 static inline int shmem_acct_size(unsigned long flags, loff_t size)
152 {
153         return (flags & VM_ACCOUNT)?
154                 security_vm_enough_memory(VM_ACCT(size)): 0;
155 }
156
157 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
158 {
159         if (flags & VM_ACCOUNT)
160                 vm_unacct_memory(VM_ACCT(size));
161 }
162
163 /*
164  * ... whereas tmpfs objects are accounted incrementally as
165  * pages are allocated, in order to allow huge sparse files.
166  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
167  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
168  */
169 static inline int shmem_acct_block(unsigned long flags)
170 {
171         return (flags & VM_ACCOUNT)?
172                 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
173 }
174
175 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
176 {
177         if (!(flags & VM_ACCOUNT))
178                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
179 }
180
181 static const struct super_operations shmem_ops;
182 static const struct address_space_operations shmem_aops;
183 static const struct file_operations shmem_file_operations;
184 static const struct inode_operations shmem_inode_operations;
185 static const struct inode_operations shmem_dir_inode_operations;
186 static const struct inode_operations shmem_special_inode_operations;
187 static struct vm_operations_struct shmem_vm_ops;
188
189 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
190         .ra_pages       = 0,    /* No readahead */
191         .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
192         .unplug_io_fn   = default_unplug_io_fn,
193 };
194
195 static LIST_HEAD(shmem_swaplist);
196 static DEFINE_SPINLOCK(shmem_swaplist_lock);
197
198 static void shmem_free_blocks(struct inode *inode, long pages)
199 {
200         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
201         if (sbinfo->max_blocks) {
202                 spin_lock(&sbinfo->stat_lock);
203                 sbinfo->free_blocks += pages;
204                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
205                 spin_unlock(&sbinfo->stat_lock);
206         }
207 }
208
209 /*
210  * shmem_recalc_inode - recalculate the size of an inode
211  *
212  * @inode: inode to recalc
213  *
214  * We have to calculate the free blocks since the mm can drop
215  * undirtied hole pages behind our back.
216  *
217  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
218  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
219  *
220  * It has to be called with the spinlock held.
221  */
222 static void shmem_recalc_inode(struct inode *inode)
223 {
224         struct shmem_inode_info *info = SHMEM_I(inode);
225         long freed;
226
227         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
228         if (freed > 0) {
229                 info->alloced -= freed;
230                 shmem_unacct_blocks(info->flags, freed);
231                 shmem_free_blocks(inode, freed);
232         }
233 }
234
235 /*
236  * shmem_swp_entry - find the swap vector position in the info structure
237  *
238  * @info:  info structure for the inode
239  * @index: index of the page to find
240  * @page:  optional page to add to the structure. Has to be preset to
241  *         all zeros
242  *
243  * If there is no space allocated yet it will return NULL when
244  * page is NULL, else it will use the page for the needed block,
245  * setting it to NULL on return to indicate that it has been used.
246  *
247  * The swap vector is organized the following way:
248  *
249  * There are SHMEM_NR_DIRECT entries directly stored in the
250  * shmem_inode_info structure. So small files do not need an addional
251  * allocation.
252  *
253  * For pages with index > SHMEM_NR_DIRECT there is the pointer
254  * i_indirect which points to a page which holds in the first half
255  * doubly indirect blocks, in the second half triple indirect blocks:
256  *
257  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
258  * following layout (for SHMEM_NR_DIRECT == 16):
259  *
260  * i_indirect -> dir --> 16-19
261  *            |      +-> 20-23
262  *            |
263  *            +-->dir2 --> 24-27
264  *            |        +-> 28-31
265  *            |        +-> 32-35
266  *            |        +-> 36-39
267  *            |
268  *            +-->dir3 --> 40-43
269  *                     +-> 44-47
270  *                     +-> 48-51
271  *                     +-> 52-55
272  */
273 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
274 {
275         unsigned long offset;
276         struct page **dir;
277         struct page *subdir;
278
279         if (index < SHMEM_NR_DIRECT) {
280                 shmem_swp_balance_unmap();
281                 return info->i_direct+index;
282         }
283         if (!info->i_indirect) {
284                 if (page) {
285                         info->i_indirect = *page;
286                         *page = NULL;
287                 }
288                 return NULL;                    /* need another page */
289         }
290
291         index -= SHMEM_NR_DIRECT;
292         offset = index % ENTRIES_PER_PAGE;
293         index /= ENTRIES_PER_PAGE;
294         dir = shmem_dir_map(info->i_indirect);
295
296         if (index >= ENTRIES_PER_PAGE/2) {
297                 index -= ENTRIES_PER_PAGE/2;
298                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
299                 index %= ENTRIES_PER_PAGE;
300                 subdir = *dir;
301                 if (!subdir) {
302                         if (page) {
303                                 *dir = *page;
304                                 *page = NULL;
305                         }
306                         shmem_dir_unmap(dir);
307                         return NULL;            /* need another page */
308                 }
309                 shmem_dir_unmap(dir);
310                 dir = shmem_dir_map(subdir);
311         }
312
313         dir += index;
314         subdir = *dir;
315         if (!subdir) {
316                 if (!page || !(subdir = *page)) {
317                         shmem_dir_unmap(dir);
318                         return NULL;            /* need a page */
319                 }
320                 *dir = subdir;
321                 *page = NULL;
322         }
323         shmem_dir_unmap(dir);
324         return shmem_swp_map(subdir) + offset;
325 }
326
327 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
328 {
329         long incdec = value? 1: -1;
330
331         entry->val = value;
332         info->swapped += incdec;
333         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
334                 struct page *page = kmap_atomic_to_page(entry);
335                 set_page_private(page, page_private(page) + incdec);
336         }
337 }
338
339 /*
340  * shmem_swp_alloc - get the position of the swap entry for the page.
341  *                   If it does not exist allocate the entry.
342  *
343  * @info:       info structure for the inode
344  * @index:      index of the page to find
345  * @sgp:        check and recheck i_size? skip allocation?
346  */
347 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
348 {
349         struct inode *inode = &info->vfs_inode;
350         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
351         struct page *page = NULL;
352         swp_entry_t *entry;
353
354         if (sgp != SGP_WRITE &&
355             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
356                 return ERR_PTR(-EINVAL);
357
358         while (!(entry = shmem_swp_entry(info, index, &page))) {
359                 if (sgp == SGP_READ)
360                         return shmem_swp_map(ZERO_PAGE(0));
361                 /*
362                  * Test free_blocks against 1 not 0, since we have 1 data
363                  * page (and perhaps indirect index pages) yet to allocate:
364                  * a waste to allocate index if we cannot allocate data.
365                  */
366                 if (sbinfo->max_blocks) {
367                         spin_lock(&sbinfo->stat_lock);
368                         if (sbinfo->free_blocks <= 1) {
369                                 spin_unlock(&sbinfo->stat_lock);
370                                 return ERR_PTR(-ENOSPC);
371                         }
372                         sbinfo->free_blocks--;
373                         inode->i_blocks += BLOCKS_PER_PAGE;
374                         spin_unlock(&sbinfo->stat_lock);
375                 }
376
377                 spin_unlock(&info->lock);
378                 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
379                 if (page)
380                         set_page_private(page, 0);
381                 spin_lock(&info->lock);
382
383                 if (!page) {
384                         shmem_free_blocks(inode, 1);
385                         return ERR_PTR(-ENOMEM);
386                 }
387                 if (sgp != SGP_WRITE &&
388                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
389                         entry = ERR_PTR(-EINVAL);
390                         break;
391                 }
392                 if (info->next_index <= index)
393                         info->next_index = index + 1;
394         }
395         if (page) {
396                 /* another task gave its page, or truncated the file */
397                 shmem_free_blocks(inode, 1);
398                 shmem_dir_free(page);
399         }
400         if (info->next_index <= index && !IS_ERR(entry))
401                 info->next_index = index + 1;
402         return entry;
403 }
404
405 /*
406  * shmem_free_swp - free some swap entries in a directory
407  *
408  * @dir:        pointer to the directory
409  * @edir:       pointer after last entry of the directory
410  * @punch_lock: pointer to spinlock when needed for the holepunch case
411  */
412 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
413                                                 spinlock_t *punch_lock)
414 {
415         spinlock_t *punch_unlock = NULL;
416         swp_entry_t *ptr;
417         int freed = 0;
418
419         for (ptr = dir; ptr < edir; ptr++) {
420                 if (ptr->val) {
421                         if (unlikely(punch_lock)) {
422                                 punch_unlock = punch_lock;
423                                 punch_lock = NULL;
424                                 spin_lock(punch_unlock);
425                                 if (!ptr->val)
426                                         continue;
427                         }
428                         free_swap_and_cache(*ptr);
429                         *ptr = (swp_entry_t){0};
430                         freed++;
431                 }
432         }
433         if (punch_unlock)
434                 spin_unlock(punch_unlock);
435         return freed;
436 }
437
438 static int shmem_map_and_free_swp(struct page *subdir, int offset,
439                 int limit, struct page ***dir, spinlock_t *punch_lock)
440 {
441         swp_entry_t *ptr;
442         int freed = 0;
443
444         ptr = shmem_swp_map(subdir);
445         for (; offset < limit; offset += LATENCY_LIMIT) {
446                 int size = limit - offset;
447                 if (size > LATENCY_LIMIT)
448                         size = LATENCY_LIMIT;
449                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
450                                                         punch_lock);
451                 if (need_resched()) {
452                         shmem_swp_unmap(ptr);
453                         if (*dir) {
454                                 shmem_dir_unmap(*dir);
455                                 *dir = NULL;
456                         }
457                         cond_resched();
458                         ptr = shmem_swp_map(subdir);
459                 }
460         }
461         shmem_swp_unmap(ptr);
462         return freed;
463 }
464
465 static void shmem_free_pages(struct list_head *next)
466 {
467         struct page *page;
468         int freed = 0;
469
470         do {
471                 page = container_of(next, struct page, lru);
472                 next = next->next;
473                 shmem_dir_free(page);
474                 freed++;
475                 if (freed >= LATENCY_LIMIT) {
476                         cond_resched();
477                         freed = 0;
478                 }
479         } while (next);
480 }
481
482 static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
483 {
484         struct shmem_inode_info *info = SHMEM_I(inode);
485         unsigned long idx;
486         unsigned long size;
487         unsigned long limit;
488         unsigned long stage;
489         unsigned long diroff;
490         struct page **dir;
491         struct page *topdir;
492         struct page *middir;
493         struct page *subdir;
494         swp_entry_t *ptr;
495         LIST_HEAD(pages_to_free);
496         long nr_pages_to_free = 0;
497         long nr_swaps_freed = 0;
498         int offset;
499         int freed;
500         int punch_hole;
501         spinlock_t *needs_lock;
502         spinlock_t *punch_lock;
503         unsigned long upper_limit;
504
505         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
506         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
507         if (idx >= info->next_index)
508                 return;
509
510         spin_lock(&info->lock);
511         info->flags |= SHMEM_TRUNCATE;
512         if (likely(end == (loff_t) -1)) {
513                 limit = info->next_index;
514                 upper_limit = SHMEM_MAX_INDEX;
515                 info->next_index = idx;
516                 needs_lock = NULL;
517                 punch_hole = 0;
518         } else {
519                 if (end + 1 >= inode->i_size) { /* we may free a little more */
520                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
521                                                         PAGE_CACHE_SHIFT;
522                         upper_limit = SHMEM_MAX_INDEX;
523                 } else {
524                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
525                         upper_limit = limit;
526                 }
527                 needs_lock = &info->lock;
528                 punch_hole = 1;
529         }
530
531         topdir = info->i_indirect;
532         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
533                 info->i_indirect = NULL;
534                 nr_pages_to_free++;
535                 list_add(&topdir->lru, &pages_to_free);
536         }
537         spin_unlock(&info->lock);
538
539         if (info->swapped && idx < SHMEM_NR_DIRECT) {
540                 ptr = info->i_direct;
541                 size = limit;
542                 if (size > SHMEM_NR_DIRECT)
543                         size = SHMEM_NR_DIRECT;
544                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
545         }
546
547         /*
548          * If there are no indirect blocks or we are punching a hole
549          * below indirect blocks, nothing to be done.
550          */
551         if (!topdir || limit <= SHMEM_NR_DIRECT)
552                 goto done2;
553
554         /*
555          * The truncation case has already dropped info->lock, and we're safe
556          * because i_size and next_index have already been lowered, preventing
557          * access beyond.  But in the punch_hole case, we still need to take
558          * the lock when updating the swap directory, because there might be
559          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
560          * shmem_writepage.  However, whenever we find we can remove a whole
561          * directory page (not at the misaligned start or end of the range),
562          * we first NULLify its pointer in the level above, and then have no
563          * need to take the lock when updating its contents: needs_lock and
564          * punch_lock (either pointing to info->lock or NULL) manage this.
565          */
566
567         upper_limit -= SHMEM_NR_DIRECT;
568         limit -= SHMEM_NR_DIRECT;
569         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
570         offset = idx % ENTRIES_PER_PAGE;
571         idx -= offset;
572
573         dir = shmem_dir_map(topdir);
574         stage = ENTRIES_PER_PAGEPAGE/2;
575         if (idx < ENTRIES_PER_PAGEPAGE/2) {
576                 middir = topdir;
577                 diroff = idx/ENTRIES_PER_PAGE;
578         } else {
579                 dir += ENTRIES_PER_PAGE/2;
580                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
581                 while (stage <= idx)
582                         stage += ENTRIES_PER_PAGEPAGE;
583                 middir = *dir;
584                 if (*dir) {
585                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
586                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
587                         if (!diroff && !offset && upper_limit >= stage) {
588                                 if (needs_lock) {
589                                         spin_lock(needs_lock);
590                                         *dir = NULL;
591                                         spin_unlock(needs_lock);
592                                         needs_lock = NULL;
593                                 } else
594                                         *dir = NULL;
595                                 nr_pages_to_free++;
596                                 list_add(&middir->lru, &pages_to_free);
597                         }
598                         shmem_dir_unmap(dir);
599                         dir = shmem_dir_map(middir);
600                 } else {
601                         diroff = 0;
602                         offset = 0;
603                         idx = stage;
604                 }
605         }
606
607         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
608                 if (unlikely(idx == stage)) {
609                         shmem_dir_unmap(dir);
610                         dir = shmem_dir_map(topdir) +
611                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
612                         while (!*dir) {
613                                 dir++;
614                                 idx += ENTRIES_PER_PAGEPAGE;
615                                 if (idx >= limit)
616                                         goto done1;
617                         }
618                         stage = idx + ENTRIES_PER_PAGEPAGE;
619                         middir = *dir;
620                         if (punch_hole)
621                                 needs_lock = &info->lock;
622                         if (upper_limit >= stage) {
623                                 if (needs_lock) {
624                                         spin_lock(needs_lock);
625                                         *dir = NULL;
626                                         spin_unlock(needs_lock);
627                                         needs_lock = NULL;
628                                 } else
629                                         *dir = NULL;
630                                 nr_pages_to_free++;
631                                 list_add(&middir->lru, &pages_to_free);
632                         }
633                         shmem_dir_unmap(dir);
634                         cond_resched();
635                         dir = shmem_dir_map(middir);
636                         diroff = 0;
637                 }
638                 punch_lock = needs_lock;
639                 subdir = dir[diroff];
640                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
641                         if (needs_lock) {
642                                 spin_lock(needs_lock);
643                                 dir[diroff] = NULL;
644                                 spin_unlock(needs_lock);
645                                 punch_lock = NULL;
646                         } else
647                                 dir[diroff] = NULL;
648                         nr_pages_to_free++;
649                         list_add(&subdir->lru, &pages_to_free);
650                 }
651                 if (subdir && page_private(subdir) /* has swap entries */) {
652                         size = limit - idx;
653                         if (size > ENTRIES_PER_PAGE)
654                                 size = ENTRIES_PER_PAGE;
655                         freed = shmem_map_and_free_swp(subdir,
656                                         offset, size, &dir, punch_lock);
657                         if (!dir)
658                                 dir = shmem_dir_map(middir);
659                         nr_swaps_freed += freed;
660                         if (offset || punch_lock) {
661                                 spin_lock(&info->lock);
662                                 set_page_private(subdir,
663                                         page_private(subdir) - freed);
664                                 spin_unlock(&info->lock);
665                         } else
666                                 BUG_ON(page_private(subdir) != freed);
667                 }
668                 offset = 0;
669         }
670 done1:
671         shmem_dir_unmap(dir);
672 done2:
673         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
674                 /*
675                  * Call truncate_inode_pages again: racing shmem_unuse_inode
676                  * may have swizzled a page in from swap since vmtruncate or
677                  * generic_delete_inode did it, before we lowered next_index.
678                  * Also, though shmem_getpage checks i_size before adding to
679                  * cache, no recheck after: so fix the narrow window there too.
680                  *
681                  * Recalling truncate_inode_pages_range and unmap_mapping_range
682                  * every time for punch_hole (which never got a chance to clear
683                  * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
684                  * yet hardly ever necessary: try to optimize them out later.
685                  */
686                 truncate_inode_pages_range(inode->i_mapping, start, end);
687                 if (punch_hole)
688                         unmap_mapping_range(inode->i_mapping, start,
689                                                         end - start, 1);
690         }
691
692         spin_lock(&info->lock);
693         info->flags &= ~SHMEM_TRUNCATE;
694         info->swapped -= nr_swaps_freed;
695         if (nr_pages_to_free)
696                 shmem_free_blocks(inode, nr_pages_to_free);
697         shmem_recalc_inode(inode);
698         spin_unlock(&info->lock);
699
700         /*
701          * Empty swap vector directory pages to be freed?
702          */
703         if (!list_empty(&pages_to_free)) {
704                 pages_to_free.prev->next = NULL;
705                 shmem_free_pages(pages_to_free.next);
706         }
707 }
708
709 static void shmem_truncate(struct inode *inode)
710 {
711         shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
712 }
713
714 static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
715 {
716         struct inode *inode = dentry->d_inode;
717         struct page *page = NULL;
718         int error;
719
720         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
721                 if (attr->ia_size < inode->i_size) {
722                         /*
723                          * If truncating down to a partial page, then
724                          * if that page is already allocated, hold it
725                          * in memory until the truncation is over, so
726                          * truncate_partial_page cannnot miss it were
727                          * it assigned to swap.
728                          */
729                         if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
730                                 (void) shmem_getpage(inode,
731                                         attr->ia_size>>PAGE_CACHE_SHIFT,
732                                                 &page, SGP_READ, NULL);
733                         }
734                         /*
735                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
736                          * detect if any pages might have been added to cache
737                          * after truncate_inode_pages.  But we needn't bother
738                          * if it's being fully truncated to zero-length: the
739                          * nrpages check is efficient enough in that case.
740                          */
741                         if (attr->ia_size) {
742                                 struct shmem_inode_info *info = SHMEM_I(inode);
743                                 spin_lock(&info->lock);
744                                 info->flags &= ~SHMEM_PAGEIN;
745                                 spin_unlock(&info->lock);
746                         }
747                 }
748         }
749
750         error = inode_change_ok(inode, attr);
751         if (!error)
752                 error = inode_setattr(inode, attr);
753 #ifdef CONFIG_TMPFS_POSIX_ACL
754         if (!error && (attr->ia_valid & ATTR_MODE))
755                 error = generic_acl_chmod(inode, &shmem_acl_ops);
756 #endif
757         if (page)
758                 page_cache_release(page);
759         return error;
760 }
761
762 static void shmem_delete_inode(struct inode *inode)
763 {
764         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
765         struct shmem_inode_info *info = SHMEM_I(inode);
766
767         if (inode->i_op->truncate == shmem_truncate) {
768                 truncate_inode_pages(inode->i_mapping, 0);
769                 shmem_unacct_size(info->flags, inode->i_size);
770                 inode->i_size = 0;
771                 shmem_truncate(inode);
772                 if (!list_empty(&info->swaplist)) {
773                         spin_lock(&shmem_swaplist_lock);
774                         list_del_init(&info->swaplist);
775                         spin_unlock(&shmem_swaplist_lock);
776                 }
777         }
778         BUG_ON(inode->i_blocks);
779         if (sbinfo->max_inodes) {
780                 spin_lock(&sbinfo->stat_lock);
781                 sbinfo->free_inodes++;
782                 spin_unlock(&sbinfo->stat_lock);
783         }
784         clear_inode(inode);
785 }
786
787 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
788 {
789         swp_entry_t *ptr;
790
791         for (ptr = dir; ptr < edir; ptr++) {
792                 if (ptr->val == entry.val)
793                         return ptr - dir;
794         }
795         return -1;
796 }
797
798 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
799 {
800         struct inode *inode;
801         unsigned long idx;
802         unsigned long size;
803         unsigned long limit;
804         unsigned long stage;
805         struct page **dir;
806         struct page *subdir;
807         swp_entry_t *ptr;
808         int offset;
809
810         idx = 0;
811         ptr = info->i_direct;
812         spin_lock(&info->lock);
813         limit = info->next_index;
814         size = limit;
815         if (size > SHMEM_NR_DIRECT)
816                 size = SHMEM_NR_DIRECT;
817         offset = shmem_find_swp(entry, ptr, ptr+size);
818         if (offset >= 0) {
819                 shmem_swp_balance_unmap();
820                 goto found;
821         }
822         if (!info->i_indirect)
823                 goto lost2;
824
825         dir = shmem_dir_map(info->i_indirect);
826         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
827
828         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
829                 if (unlikely(idx == stage)) {
830                         shmem_dir_unmap(dir-1);
831                         dir = shmem_dir_map(info->i_indirect) +
832                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
833                         while (!*dir) {
834                                 dir++;
835                                 idx += ENTRIES_PER_PAGEPAGE;
836                                 if (idx >= limit)
837                                         goto lost1;
838                         }
839                         stage = idx + ENTRIES_PER_PAGEPAGE;
840                         subdir = *dir;
841                         shmem_dir_unmap(dir);
842                         dir = shmem_dir_map(subdir);
843                 }
844                 subdir = *dir;
845                 if (subdir && page_private(subdir)) {
846                         ptr = shmem_swp_map(subdir);
847                         size = limit - idx;
848                         if (size > ENTRIES_PER_PAGE)
849                                 size = ENTRIES_PER_PAGE;
850                         offset = shmem_find_swp(entry, ptr, ptr+size);
851                         if (offset >= 0) {
852                                 shmem_dir_unmap(dir);
853                                 goto found;
854                         }
855                         shmem_swp_unmap(ptr);
856                 }
857         }
858 lost1:
859         shmem_dir_unmap(dir-1);
860 lost2:
861         spin_unlock(&info->lock);
862         return 0;
863 found:
864         idx += offset;
865         inode = &info->vfs_inode;
866         if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
867                 info->flags |= SHMEM_PAGEIN;
868                 shmem_swp_set(info, ptr + offset, 0);
869         }
870         shmem_swp_unmap(ptr);
871         spin_unlock(&info->lock);
872         /*
873          * Decrement swap count even when the entry is left behind:
874          * try_to_unuse will skip over mms, then reincrement count.
875          */
876         swap_free(entry);
877         return 1;
878 }
879
880 /*
881  * shmem_unuse() search for an eventually swapped out shmem page.
882  */
883 int shmem_unuse(swp_entry_t entry, struct page *page)
884 {
885         struct list_head *p, *next;
886         struct shmem_inode_info *info;
887         int found = 0;
888
889         spin_lock(&shmem_swaplist_lock);
890         list_for_each_safe(p, next, &shmem_swaplist) {
891                 info = list_entry(p, struct shmem_inode_info, swaplist);
892                 if (!info->swapped)
893                         list_del_init(&info->swaplist);
894                 else if (shmem_unuse_inode(info, entry, page)) {
895                         /* move head to start search for next from here */
896                         list_move_tail(&shmem_swaplist, &info->swaplist);
897                         found = 1;
898                         break;
899                 }
900         }
901         spin_unlock(&shmem_swaplist_lock);
902         return found;
903 }
904
905 /*
906  * Move the page from the page cache to the swap cache.
907  */
908 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
909 {
910         struct shmem_inode_info *info;
911         swp_entry_t *entry, swap;
912         struct address_space *mapping;
913         unsigned long index;
914         struct inode *inode;
915
916         BUG_ON(!PageLocked(page));
917         BUG_ON(page_mapped(page));
918
919         mapping = page->mapping;
920         index = page->index;
921         inode = mapping->host;
922         info = SHMEM_I(inode);
923         if (info->flags & VM_LOCKED)
924                 goto redirty;
925         swap = get_swap_page();
926         if (!swap.val)
927                 goto redirty;
928
929         spin_lock(&info->lock);
930         shmem_recalc_inode(inode);
931         if (index >= info->next_index) {
932                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
933                 goto unlock;
934         }
935         entry = shmem_swp_entry(info, index, NULL);
936         BUG_ON(!entry);
937         BUG_ON(entry->val);
938
939         if (move_to_swap_cache(page, swap) == 0) {
940                 shmem_swp_set(info, entry, swap.val);
941                 shmem_swp_unmap(entry);
942                 spin_unlock(&info->lock);
943                 if (list_empty(&info->swaplist)) {
944                         spin_lock(&shmem_swaplist_lock);
945                         /* move instead of add in case we're racing */
946                         list_move_tail(&info->swaplist, &shmem_swaplist);
947                         spin_unlock(&shmem_swaplist_lock);
948                 }
949                 unlock_page(page);
950                 return 0;
951         }
952
953         shmem_swp_unmap(entry);
954 unlock:
955         spin_unlock(&info->lock);
956         swap_free(swap);
957 redirty:
958         set_page_dirty(page);
959         return AOP_WRITEPAGE_ACTIVATE;  /* Return with the page locked */
960 }
961
962 #ifdef CONFIG_NUMA
963 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
964 {
965         char *nodelist = strchr(value, ':');
966         int err = 1;
967
968         if (nodelist) {
969                 /* NUL-terminate policy string */
970                 *nodelist++ = '\0';
971                 if (nodelist_parse(nodelist, *policy_nodes))
972                         goto out;
973                 if (!nodes_subset(*policy_nodes, node_online_map))
974                         goto out;
975         }
976         if (!strcmp(value, "default")) {
977                 *policy = MPOL_DEFAULT;
978                 /* Don't allow a nodelist */
979                 if (!nodelist)
980                         err = 0;
981         } else if (!strcmp(value, "prefer")) {
982                 *policy = MPOL_PREFERRED;
983                 /* Insist on a nodelist of one node only */
984                 if (nodelist) {
985                         char *rest = nodelist;
986                         while (isdigit(*rest))
987                                 rest++;
988                         if (!*rest)
989                                 err = 0;
990                 }
991         } else if (!strcmp(value, "bind")) {
992                 *policy = MPOL_BIND;
993                 /* Insist on a nodelist */
994                 if (nodelist)
995                         err = 0;
996         } else if (!strcmp(value, "interleave")) {
997                 *policy = MPOL_INTERLEAVE;
998                 /* Default to nodes online if no nodelist */
999                 if (!nodelist)
1000                         *policy_nodes = node_online_map;
1001                 err = 0;
1002         }
1003 out:
1004         /* Restore string for error message */
1005         if (nodelist)
1006                 *--nodelist = ':';
1007         return err;
1008 }
1009
1010 static struct page *shmem_swapin_async(struct shared_policy *p,
1011                                        swp_entry_t entry, unsigned long idx)
1012 {
1013         struct page *page;
1014         struct vm_area_struct pvma;
1015
1016         /* Create a pseudo vma that just contains the policy */
1017         memset(&pvma, 0, sizeof(struct vm_area_struct));
1018         pvma.vm_end = PAGE_SIZE;
1019         pvma.vm_pgoff = idx;
1020         pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
1021         page = read_swap_cache_async(entry, &pvma, 0);
1022         mpol_free(pvma.vm_policy);
1023         return page;
1024 }
1025
1026 struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
1027                           unsigned long idx)
1028 {
1029         struct shared_policy *p = &info->policy;
1030         int i, num;
1031         struct page *page;
1032         unsigned long offset;
1033
1034         num = valid_swaphandles(entry, &offset);
1035         for (i = 0; i < num; offset++, i++) {
1036                 page = shmem_swapin_async(p,
1037                                 swp_entry(swp_type(entry), offset), idx);
1038                 if (!page)
1039                         break;
1040                 page_cache_release(page);
1041         }
1042         lru_add_drain();        /* Push any new pages onto the LRU now */
1043         return shmem_swapin_async(p, entry, idx);
1044 }
1045
1046 static struct page *
1047 shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1048                  unsigned long idx)
1049 {
1050         struct vm_area_struct pvma;
1051         struct page *page;
1052
1053         memset(&pvma, 0, sizeof(struct vm_area_struct));
1054         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1055         pvma.vm_pgoff = idx;
1056         pvma.vm_end = PAGE_SIZE;
1057         page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
1058         mpol_free(pvma.vm_policy);
1059         return page;
1060 }
1061 #else
1062 static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
1063 {
1064         return 1;
1065 }
1066
1067 static inline struct page *
1068 shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1069 {
1070         swapin_readahead(entry, 0, NULL);
1071         return read_swap_cache_async(entry, NULL, 0);
1072 }
1073
1074 static inline struct page *
1075 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1076 {
1077         return alloc_page(gfp | __GFP_ZERO);
1078 }
1079 #endif
1080
1081 /*
1082  * shmem_getpage - either get the page from swap or allocate a new one
1083  *
1084  * If we allocate a new one we do not mark it dirty. That's up to the
1085  * vm. If we swap it in we mark it dirty since we also free the swap
1086  * entry since a page cannot live in both the swap and page cache
1087  */
1088 static int shmem_getpage(struct inode *inode, unsigned long idx,
1089                         struct page **pagep, enum sgp_type sgp, int *type)
1090 {
1091         struct address_space *mapping = inode->i_mapping;
1092         struct shmem_inode_info *info = SHMEM_I(inode);
1093         struct shmem_sb_info *sbinfo;
1094         struct page *filepage = *pagep;
1095         struct page *swappage;
1096         swp_entry_t *entry;
1097         swp_entry_t swap;
1098         int error;
1099
1100         if (idx >= SHMEM_MAX_INDEX)
1101                 return -EFBIG;
1102         /*
1103          * Normally, filepage is NULL on entry, and either found
1104          * uptodate immediately, or allocated and zeroed, or read
1105          * in under swappage, which is then assigned to filepage.
1106          * But shmem_readpage and shmem_prepare_write pass in a locked
1107          * filepage, which may be found not uptodate by other callers
1108          * too, and may need to be copied from the swappage read in.
1109          */
1110 repeat:
1111         if (!filepage)
1112                 filepage = find_lock_page(mapping, idx);
1113         if (filepage && PageUptodate(filepage))
1114                 goto done;
1115         error = 0;
1116         if (sgp == SGP_QUICK)
1117                 goto failed;
1118
1119         spin_lock(&info->lock);
1120         shmem_recalc_inode(inode);
1121         entry = shmem_swp_alloc(info, idx, sgp);
1122         if (IS_ERR(entry)) {
1123                 spin_unlock(&info->lock);
1124                 error = PTR_ERR(entry);
1125                 goto failed;
1126         }
1127         swap = *entry;
1128
1129         if (swap.val) {
1130                 /* Look it up and read it in.. */
1131                 swappage = lookup_swap_cache(swap);
1132                 if (!swappage) {
1133                         shmem_swp_unmap(entry);
1134                         /* here we actually do the io */
1135                         if (type && *type == VM_FAULT_MINOR) {
1136                                 __count_vm_event(PGMAJFAULT);
1137                                 *type = VM_FAULT_MAJOR;
1138                         }
1139                         spin_unlock(&info->lock);
1140                         swappage = shmem_swapin(info, swap, idx);
1141                         if (!swappage) {
1142                                 spin_lock(&info->lock);
1143                                 entry = shmem_swp_alloc(info, idx, sgp);
1144                                 if (IS_ERR(entry))
1145                                         error = PTR_ERR(entry);
1146                                 else {
1147                                         if (entry->val == swap.val)
1148                                                 error = -ENOMEM;
1149                                         shmem_swp_unmap(entry);
1150                                 }
1151                                 spin_unlock(&info->lock);
1152                                 if (error)
1153                                         goto failed;
1154                                 goto repeat;
1155                         }
1156                         wait_on_page_locked(swappage);
1157                         page_cache_release(swappage);
1158                         goto repeat;
1159                 }
1160
1161                 /* We have to do this with page locked to prevent races */
1162                 if (TestSetPageLocked(swappage)) {
1163                         shmem_swp_unmap(entry);
1164                         spin_unlock(&info->lock);
1165                         wait_on_page_locked(swappage);
1166                         page_cache_release(swappage);
1167                         goto repeat;
1168                 }
1169                 if (PageWriteback(swappage)) {
1170                         shmem_swp_unmap(entry);
1171                         spin_unlock(&info->lock);
1172                         wait_on_page_writeback(swappage);
1173                         unlock_page(swappage);
1174                         page_cache_release(swappage);
1175                         goto repeat;
1176                 }
1177                 if (!PageUptodate(swappage)) {
1178                         shmem_swp_unmap(entry);
1179                         spin_unlock(&info->lock);
1180                         unlock_page(swappage);
1181                         page_cache_release(swappage);
1182                         error = -EIO;
1183                         goto failed;
1184                 }
1185
1186                 if (filepage) {
1187                         shmem_swp_set(info, entry, 0);
1188                         shmem_swp_unmap(entry);
1189                         delete_from_swap_cache(swappage);
1190                         spin_unlock(&info->lock);
1191                         copy_highpage(filepage, swappage);
1192                         unlock_page(swappage);
1193                         page_cache_release(swappage);
1194                         flush_dcache_page(filepage);
1195                         SetPageUptodate(filepage);
1196                         set_page_dirty(filepage);
1197                         swap_free(swap);
1198                 } else if (!(error = move_from_swap_cache(
1199                                 swappage, idx, mapping))) {
1200                         info->flags |= SHMEM_PAGEIN;
1201                         shmem_swp_set(info, entry, 0);
1202                         shmem_swp_unmap(entry);
1203                         spin_unlock(&info->lock);
1204                         filepage = swappage;
1205                         swap_free(swap);
1206                 } else {
1207                         shmem_swp_unmap(entry);
1208                         spin_unlock(&info->lock);
1209                         unlock_page(swappage);
1210                         page_cache_release(swappage);
1211                         if (error == -ENOMEM) {
1212                                 /* let kswapd refresh zone for GFP_ATOMICs */
1213                                 congestion_wait(WRITE, HZ/50);
1214                         }
1215                         goto repeat;
1216                 }
1217         } else if (sgp == SGP_READ && !filepage) {
1218                 shmem_swp_unmap(entry);
1219                 filepage = find_get_page(mapping, idx);
1220                 if (filepage &&
1221                     (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1222                         spin_unlock(&info->lock);
1223                         wait_on_page_locked(filepage);
1224                         page_cache_release(filepage);
1225                         filepage = NULL;
1226                         goto repeat;
1227                 }
1228                 spin_unlock(&info->lock);
1229         } else {
1230                 shmem_swp_unmap(entry);
1231                 sbinfo = SHMEM_SB(inode->i_sb);
1232                 if (sbinfo->max_blocks) {
1233                         spin_lock(&sbinfo->stat_lock);
1234                         if (sbinfo->free_blocks == 0 ||
1235                             shmem_acct_block(info->flags)) {
1236                                 spin_unlock(&sbinfo->stat_lock);
1237                                 spin_unlock(&info->lock);
1238                                 error = -ENOSPC;
1239                                 goto failed;
1240                         }
1241                         sbinfo->free_blocks--;
1242                         inode->i_blocks += BLOCKS_PER_PAGE;
1243                         spin_unlock(&sbinfo->stat_lock);
1244                 } else if (shmem_acct_block(info->flags)) {
1245                         spin_unlock(&info->lock);
1246                         error = -ENOSPC;
1247                         goto failed;
1248                 }
1249
1250                 if (!filepage) {
1251                         spin_unlock(&info->lock);
1252                         filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1253                                                     info,
1254                                                     idx);
1255                         if (!filepage) {
1256                                 shmem_unacct_blocks(info->flags, 1);
1257                                 shmem_free_blocks(inode, 1);
1258                                 error = -ENOMEM;
1259                                 goto failed;
1260                         }
1261
1262                         spin_lock(&info->lock);
1263                         entry = shmem_swp_alloc(info, idx, sgp);
1264                         if (IS_ERR(entry))
1265                                 error = PTR_ERR(entry);
1266                         else {
1267                                 swap = *entry;
1268                                 shmem_swp_unmap(entry);
1269                         }
1270                         if (error || swap.val || 0 != add_to_page_cache_lru(
1271                                         filepage, mapping, idx, GFP_ATOMIC)) {
1272                                 spin_unlock(&info->lock);
1273                                 page_cache_release(filepage);
1274                                 shmem_unacct_blocks(info->flags, 1);
1275                                 shmem_free_blocks(inode, 1);
1276                                 filepage = NULL;
1277                                 if (error)
1278                                         goto failed;
1279                                 goto repeat;
1280                         }
1281                         info->flags |= SHMEM_PAGEIN;
1282                 }
1283
1284                 info->alloced++;
1285                 spin_unlock(&info->lock);
1286                 flush_dcache_page(filepage);
1287                 SetPageUptodate(filepage);
1288         }
1289 done:
1290         if (*pagep != filepage) {
1291                 unlock_page(filepage);
1292                 *pagep = filepage;
1293         }
1294         return 0;
1295
1296 failed:
1297         if (*pagep != filepage) {
1298                 unlock_page(filepage);
1299                 page_cache_release(filepage);
1300         }
1301         return error;
1302 }
1303
1304 static struct page *shmem_nopage(struct vm_area_struct *vma,
1305                                  unsigned long address, int *type)
1306 {
1307         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1308         struct page *page = NULL;
1309         unsigned long idx;
1310         int error;
1311
1312         idx = (address - vma->vm_start) >> PAGE_SHIFT;
1313         idx += vma->vm_pgoff;
1314         idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1315         if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1316                 return NOPAGE_SIGBUS;
1317
1318         error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1319         if (error)
1320                 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1321
1322         mark_page_accessed(page);
1323         return page;
1324 }
1325
1326 static int shmem_populate(struct vm_area_struct *vma,
1327         unsigned long addr, unsigned long len,
1328         pgprot_t prot, unsigned long pgoff, int nonblock)
1329 {
1330         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1331         struct mm_struct *mm = vma->vm_mm;
1332         enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1333         unsigned long size;
1334
1335         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1336         if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1337                 return -EINVAL;
1338
1339         while ((long) len > 0) {
1340                 struct page *page = NULL;
1341                 int err;
1342                 /*
1343                  * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1344                  */
1345                 err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1346                 if (err)
1347                         return err;
1348                 /* Page may still be null, but only if nonblock was set. */
1349                 if (page) {
1350                         mark_page_accessed(page);
1351                         err = install_page(mm, vma, addr, page, prot);
1352                         if (err) {
1353                                 page_cache_release(page);
1354                                 return err;
1355                         }
1356                 } else if (vma->vm_flags & VM_NONLINEAR) {
1357                         /* No page was found just because we can't read it in
1358                          * now (being here implies nonblock != 0), but the page
1359                          * may exist, so set the PTE to fault it in later. */
1360                         err = install_file_pte(mm, vma, addr, pgoff, prot);
1361                         if (err)
1362                                 return err;
1363                 }
1364
1365                 len -= PAGE_SIZE;
1366                 addr += PAGE_SIZE;
1367                 pgoff++;
1368         }
1369         return 0;
1370 }
1371
1372 #ifdef CONFIG_NUMA
1373 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1374 {
1375         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1376         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1377 }
1378
1379 struct mempolicy *
1380 shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1381 {
1382         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1383         unsigned long idx;
1384
1385         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1386         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1387 }
1388 #endif
1389
1390 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1391 {
1392         struct inode *inode = file->f_path.dentry->d_inode;
1393         struct shmem_inode_info *info = SHMEM_I(inode);
1394         int retval = -ENOMEM;
1395
1396         spin_lock(&info->lock);
1397         if (lock && !(info->flags & VM_LOCKED)) {
1398                 if (!user_shm_lock(inode->i_size, user))
1399                         goto out_nomem;
1400                 info->flags |= VM_LOCKED;
1401         }
1402         if (!lock && (info->flags & VM_LOCKED) && user) {
1403                 user_shm_unlock(inode->i_size, user);
1404                 info->flags &= ~VM_LOCKED;
1405         }
1406         retval = 0;
1407 out_nomem:
1408         spin_unlock(&info->lock);
1409         return retval;
1410 }
1411
1412 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1413 {
1414         file_accessed(file);
1415         vma->vm_ops = &shmem_vm_ops;
1416         return 0;
1417 }
1418
1419 static struct inode *
1420 shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1421 {
1422         struct inode *inode;
1423         struct shmem_inode_info *info;
1424         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1425
1426         if (sbinfo->max_inodes) {
1427                 spin_lock(&sbinfo->stat_lock);
1428                 if (!sbinfo->free_inodes) {
1429                         spin_unlock(&sbinfo->stat_lock);
1430                         return NULL;
1431                 }
1432                 sbinfo->free_inodes--;
1433                 spin_unlock(&sbinfo->stat_lock);
1434         }
1435
1436         inode = new_inode(sb);
1437         if (inode) {
1438                 inode->i_mode = mode;
1439                 inode->i_uid = current->fsuid;
1440                 inode->i_gid = current->fsgid;
1441                 inode->i_blocks = 0;
1442                 inode->i_mapping->a_ops = &shmem_aops;
1443                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1444                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1445                 inode->i_generation = get_seconds();
1446                 info = SHMEM_I(inode);
1447                 memset(info, 0, (char *)inode - (char *)info);
1448                 spin_lock_init(&info->lock);
1449                 INIT_LIST_HEAD(&info->swaplist);
1450
1451                 switch (mode & S_IFMT) {
1452                 default:
1453                         inode->i_op = &shmem_special_inode_operations;
1454                         init_special_inode(inode, mode, dev);
1455                         break;
1456                 case S_IFREG:
1457                         inode->i_op = &shmem_inode_operations;
1458                         inode->i_fop = &shmem_file_operations;
1459                         mpol_shared_policy_init(&info->policy, sbinfo->policy,
1460                                                         &sbinfo->policy_nodes);
1461                         break;
1462                 case S_IFDIR:
1463                         inc_nlink(inode);
1464                         /* Some things misbehave if size == 0 on a directory */
1465                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1466                         inode->i_op = &shmem_dir_inode_operations;
1467                         inode->i_fop = &simple_dir_operations;
1468                         break;
1469                 case S_IFLNK:
1470                         /*
1471                          * Must not load anything in the rbtree,
1472                          * mpol_free_shared_policy will not be called.
1473                          */
1474                         mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1475                                                 NULL);
1476                         break;
1477                 }
1478         } else if (sbinfo->max_inodes) {
1479                 spin_lock(&sbinfo->stat_lock);
1480                 sbinfo->free_inodes++;
1481                 spin_unlock(&sbinfo->stat_lock);
1482         }
1483         return inode;
1484 }
1485
1486 #ifdef CONFIG_TMPFS
1487 static const struct inode_operations shmem_symlink_inode_operations;
1488 static const struct inode_operations shmem_symlink_inline_operations;
1489
1490 /*
1491  * Normally tmpfs avoids the use of shmem_readpage and shmem_prepare_write;
1492  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1493  * below the loop driver, in the generic fashion that many filesystems support.
1494  */
1495 static int shmem_readpage(struct file *file, struct page *page)
1496 {
1497         struct inode *inode = page->mapping->host;
1498         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1499         unlock_page(page);
1500         return error;
1501 }
1502
1503 static int
1504 shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1505 {
1506         struct inode *inode = page->mapping->host;
1507         return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1508 }
1509
1510 static ssize_t
1511 shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1512 {
1513         struct inode    *inode = file->f_path.dentry->d_inode;
1514         loff_t          pos;
1515         unsigned long   written;
1516         ssize_t         err;
1517
1518         if ((ssize_t) count < 0)
1519                 return -EINVAL;
1520
1521         if (!access_ok(VERIFY_READ, buf, count))
1522                 return -EFAULT;
1523
1524         mutex_lock(&inode->i_mutex);
1525
1526         pos = *ppos;
1527         written = 0;
1528
1529         err = generic_write_checks(file, &pos, &count, 0);
1530         if (err || !count)
1531                 goto out;
1532
1533         err = remove_suid(file->f_path.dentry);
1534         if (err)
1535                 goto out;
1536
1537         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1538
1539         do {
1540                 struct page *page = NULL;
1541                 unsigned long bytes, index, offset;
1542                 char *kaddr;
1543                 int left;
1544
1545                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1546                 index = pos >> PAGE_CACHE_SHIFT;
1547                 bytes = PAGE_CACHE_SIZE - offset;
1548                 if (bytes > count)
1549                         bytes = count;
1550
1551                 /*
1552                  * We don't hold page lock across copy from user -
1553                  * what would it guard against? - so no deadlock here.
1554                  * But it still may be a good idea to prefault below.
1555                  */
1556
1557                 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1558                 if (err)
1559                         break;
1560
1561                 left = bytes;
1562                 if (PageHighMem(page)) {
1563                         volatile unsigned char dummy;
1564                         __get_user(dummy, buf);
1565                         __get_user(dummy, buf + bytes - 1);
1566
1567                         kaddr = kmap_atomic(page, KM_USER0);
1568                         left = __copy_from_user_inatomic(kaddr + offset,
1569                                                         buf, bytes);
1570                         kunmap_atomic(kaddr, KM_USER0);
1571                 }
1572                 if (left) {
1573                         kaddr = kmap(page);
1574                         left = __copy_from_user(kaddr + offset, buf, bytes);
1575                         kunmap(page);
1576                 }
1577
1578                 written += bytes;
1579                 count -= bytes;
1580                 pos += bytes;
1581                 buf += bytes;
1582                 if (pos > inode->i_size)
1583                         i_size_write(inode, pos);
1584
1585                 flush_dcache_page(page);
1586                 set_page_dirty(page);
1587                 mark_page_accessed(page);
1588                 page_cache_release(page);
1589
1590                 if (left) {
1591                         pos -= left;
1592                         written -= left;
1593                         err = -EFAULT;
1594                         break;
1595                 }
1596
1597                 /*
1598                  * Our dirty pages are not counted in nr_dirty,
1599                  * and we do not attempt to balance dirty pages.
1600                  */
1601
1602                 cond_resched();
1603         } while (count);
1604
1605         *ppos = pos;
1606         if (written)
1607                 err = written;
1608 out:
1609         mutex_unlock(&inode->i_mutex);
1610         return err;
1611 }
1612
1613 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1614 {
1615         struct inode *inode = filp->f_path.dentry->d_inode;
1616         struct address_space *mapping = inode->i_mapping;
1617         unsigned long index, offset;
1618
1619         index = *ppos >> PAGE_CACHE_SHIFT;
1620         offset = *ppos & ~PAGE_CACHE_MASK;
1621
1622         for (;;) {
1623                 struct page *page = NULL;
1624                 unsigned long end_index, nr, ret;
1625                 loff_t i_size = i_size_read(inode);
1626
1627                 end_index = i_size >> PAGE_CACHE_SHIFT;
1628                 if (index > end_index)
1629                         break;
1630                 if (index == end_index) {
1631                         nr = i_size & ~PAGE_CACHE_MASK;
1632                         if (nr <= offset)
1633                                 break;
1634                 }
1635
1636                 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1637                 if (desc->error) {
1638                         if (desc->error == -EINVAL)
1639                                 desc->error = 0;
1640                         break;
1641                 }
1642
1643                 /*
1644                  * We must evaluate after, since reads (unlike writes)
1645                  * are called without i_mutex protection against truncate
1646                  */
1647                 nr = PAGE_CACHE_SIZE;
1648                 i_size = i_size_read(inode);
1649                 end_index = i_size >> PAGE_CACHE_SHIFT;
1650                 if (index == end_index) {
1651                         nr = i_size & ~PAGE_CACHE_MASK;
1652                         if (nr <= offset) {
1653                                 if (page)
1654                                         page_cache_release(page);
1655                                 break;
1656                         }
1657                 }
1658                 nr -= offset;
1659
1660                 if (page) {
1661                         /*
1662                          * If users can be writing to this page using arbitrary
1663                          * virtual addresses, take care about potential aliasing
1664                          * before reading the page on the kernel side.
1665                          */
1666                         if (mapping_writably_mapped(mapping))
1667                                 flush_dcache_page(page);
1668                         /*
1669                          * Mark the page accessed if we read the beginning.
1670                          */
1671                         if (!offset)
1672                                 mark_page_accessed(page);
1673                 } else {
1674                         page = ZERO_PAGE(0);
1675                         page_cache_get(page);
1676                 }
1677
1678                 /*
1679                  * Ok, we have the page, and it's up-to-date, so
1680                  * now we can copy it to user space...
1681                  *
1682                  * The actor routine returns how many bytes were actually used..
1683                  * NOTE! This may not be the same as how much of a user buffer
1684                  * we filled up (we may be padding etc), so we can only update
1685                  * "pos" here (the actor routine has to update the user buffer
1686                  * pointers and the remaining count).
1687                  */
1688                 ret = actor(desc, page, offset, nr);
1689                 offset += ret;
1690                 index += offset >> PAGE_CACHE_SHIFT;
1691                 offset &= ~PAGE_CACHE_MASK;
1692
1693                 page_cache_release(page);
1694                 if (ret != nr || !desc->count)
1695                         break;
1696
1697                 cond_resched();
1698         }
1699
1700         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1701         file_accessed(filp);
1702 }
1703
1704 static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1705 {
1706         read_descriptor_t desc;
1707
1708         if ((ssize_t) count < 0)
1709                 return -EINVAL;
1710         if (!access_ok(VERIFY_WRITE, buf, count))
1711                 return -EFAULT;
1712         if (!count)
1713                 return 0;
1714
1715         desc.written = 0;
1716         desc.count = count;
1717         desc.arg.buf = buf;
1718         desc.error = 0;
1719
1720         do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1721         if (desc.written)
1722                 return desc.written;
1723         return desc.error;
1724 }
1725
1726 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1727 {
1728         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1729
1730         buf->f_type = TMPFS_MAGIC;
1731         buf->f_bsize = PAGE_CACHE_SIZE;
1732         buf->f_namelen = NAME_MAX;
1733         spin_lock(&sbinfo->stat_lock);
1734         if (sbinfo->max_blocks) {
1735                 buf->f_blocks = sbinfo->max_blocks;
1736                 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1737         }
1738         if (sbinfo->max_inodes) {
1739                 buf->f_files = sbinfo->max_inodes;
1740                 buf->f_ffree = sbinfo->free_inodes;
1741         }
1742         /* else leave those fields 0 like simple_statfs */
1743         spin_unlock(&sbinfo->stat_lock);
1744         return 0;
1745 }
1746
1747 /*
1748  * File creation. Allocate an inode, and we're done..
1749  */
1750 static int
1751 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1752 {
1753         struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1754         int error = -ENOSPC;
1755
1756         if (inode) {
1757                 error = security_inode_init_security(inode, dir, NULL, NULL,
1758                                                      NULL);
1759                 if (error) {
1760                         if (error != -EOPNOTSUPP) {
1761                                 iput(inode);
1762                                 return error;
1763                         }
1764                 }
1765                 error = shmem_acl_init(inode, dir);
1766                 if (error) {
1767                         iput(inode);
1768                         return error;
1769                 }
1770                 if (dir->i_mode & S_ISGID) {
1771                         inode->i_gid = dir->i_gid;
1772                         if (S_ISDIR(mode))
1773                                 inode->i_mode |= S_ISGID;
1774                 }
1775                 dir->i_size += BOGO_DIRENT_SIZE;
1776                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1777                 d_instantiate(dentry, inode);
1778                 dget(dentry); /* Extra count - pin the dentry in core */
1779         }
1780         return error;
1781 }
1782
1783 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1784 {
1785         int error;
1786
1787         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1788                 return error;
1789         inc_nlink(dir);
1790         return 0;
1791 }
1792
1793 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1794                 struct nameidata *nd)
1795 {
1796         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1797 }
1798
1799 /*
1800  * Link a file..
1801  */
1802 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1803 {
1804         struct inode *inode = old_dentry->d_inode;
1805         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1806
1807         /*
1808          * No ordinary (disk based) filesystem counts links as inodes;
1809          * but each new link needs a new dentry, pinning lowmem, and
1810          * tmpfs dentries cannot be pruned until they are unlinked.
1811          */
1812         if (sbinfo->max_inodes) {
1813                 spin_lock(&sbinfo->stat_lock);
1814                 if (!sbinfo->free_inodes) {
1815                         spin_unlock(&sbinfo->stat_lock);
1816                         return -ENOSPC;
1817                 }
1818                 sbinfo->free_inodes--;
1819                 spin_unlock(&sbinfo->stat_lock);
1820         }
1821
1822         dir->i_size += BOGO_DIRENT_SIZE;
1823         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1824         inc_nlink(inode);
1825         atomic_inc(&inode->i_count);    /* New dentry reference */
1826         dget(dentry);           /* Extra pinning count for the created dentry */
1827         d_instantiate(dentry, inode);
1828         return 0;
1829 }
1830
1831 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1832 {
1833         struct inode *inode = dentry->d_inode;
1834
1835         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1836                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1837                 if (sbinfo->max_inodes) {
1838                         spin_lock(&sbinfo->stat_lock);
1839                         sbinfo->free_inodes++;
1840                         spin_unlock(&sbinfo->stat_lock);
1841                 }
1842         }
1843
1844         dir->i_size -= BOGO_DIRENT_SIZE;
1845         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1846         drop_nlink(inode);
1847         dput(dentry);   /* Undo the count from "create" - this does all the work */
1848         return 0;
1849 }
1850
1851 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1852 {
1853         if (!simple_empty(dentry))
1854                 return -ENOTEMPTY;
1855
1856         drop_nlink(dentry->d_inode);
1857         drop_nlink(dir);
1858         return shmem_unlink(dir, dentry);
1859 }
1860
1861 /*
1862  * The VFS layer already does all the dentry stuff for rename,
1863  * we just have to decrement the usage count for the target if
1864  * it exists so that the VFS layer correctly free's it when it
1865  * gets overwritten.
1866  */
1867 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1868 {
1869         struct inode *inode = old_dentry->d_inode;
1870         int they_are_dirs = S_ISDIR(inode->i_mode);
1871
1872         if (!simple_empty(new_dentry))
1873                 return -ENOTEMPTY;
1874
1875         if (new_dentry->d_inode) {
1876                 (void) shmem_unlink(new_dir, new_dentry);
1877                 if (they_are_dirs)
1878                         drop_nlink(old_dir);
1879         } else if (they_are_dirs) {
1880                 drop_nlink(old_dir);
1881                 inc_nlink(new_dir);
1882         }
1883
1884         old_dir->i_size -= BOGO_DIRENT_SIZE;
1885         new_dir->i_size += BOGO_DIRENT_SIZE;
1886         old_dir->i_ctime = old_dir->i_mtime =
1887         new_dir->i_ctime = new_dir->i_mtime =
1888         inode->i_ctime = CURRENT_TIME;
1889         return 0;
1890 }
1891
1892 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1893 {
1894         int error;
1895         int len;
1896         struct inode *inode;
1897         struct page *page = NULL;
1898         char *kaddr;
1899         struct shmem_inode_info *info;
1900
1901         len = strlen(symname) + 1;
1902         if (len > PAGE_CACHE_SIZE)
1903                 return -ENAMETOOLONG;
1904
1905         inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1906         if (!inode)
1907                 return -ENOSPC;
1908
1909         error = security_inode_init_security(inode, dir, NULL, NULL,
1910                                              NULL);
1911         if (error) {
1912                 if (error != -EOPNOTSUPP) {
1913                         iput(inode);
1914                         return error;
1915                 }
1916                 error = 0;
1917         }
1918
1919         info = SHMEM_I(inode);
1920         inode->i_size = len-1;
1921         if (len <= (char *)inode - (char *)info) {
1922                 /* do it inline */
1923                 memcpy(info, symname, len);
1924                 inode->i_op = &shmem_symlink_inline_operations;
1925         } else {
1926                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1927                 if (error) {
1928                         iput(inode);
1929                         return error;
1930                 }
1931                 inode->i_op = &shmem_symlink_inode_operations;
1932                 kaddr = kmap_atomic(page, KM_USER0);
1933                 memcpy(kaddr, symname, len);
1934                 kunmap_atomic(kaddr, KM_USER0);
1935                 set_page_dirty(page);
1936                 page_cache_release(page);
1937         }
1938         if (dir->i_mode & S_ISGID)
1939                 inode->i_gid = dir->i_gid;
1940         dir->i_size += BOGO_DIRENT_SIZE;
1941         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1942         d_instantiate(dentry, inode);
1943         dget(dentry);
1944         return 0;
1945 }
1946
1947 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1948 {
1949         nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1950         return NULL;
1951 }
1952
1953 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1954 {
1955         struct page *page = NULL;
1956         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1957         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1958         return page;
1959 }
1960
1961 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1962 {
1963         if (!IS_ERR(nd_get_link(nd))) {
1964                 struct page *page = cookie;
1965                 kunmap(page);
1966                 mark_page_accessed(page);
1967                 page_cache_release(page);
1968         }
1969 }
1970
1971 static const struct inode_operations shmem_symlink_inline_operations = {
1972         .readlink       = generic_readlink,
1973         .follow_link    = shmem_follow_link_inline,
1974 };
1975
1976 static const struct inode_operations shmem_symlink_inode_operations = {
1977         .truncate       = shmem_truncate,
1978         .readlink       = generic_readlink,
1979         .follow_link    = shmem_follow_link,
1980         .put_link       = shmem_put_link,
1981 };
1982
1983 #ifdef CONFIG_TMPFS_POSIX_ACL
1984 /**
1985  * Superblocks without xattr inode operations will get security.* xattr
1986  * support from the VFS "for free". As soon as we have any other xattrs
1987  * like ACLs, we also need to implement the security.* handlers at
1988  * filesystem level, though.
1989  */
1990
1991 static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1992                                         size_t list_len, const char *name,
1993                                         size_t name_len)
1994 {
1995         return security_inode_listsecurity(inode, list, list_len);
1996 }
1997
1998 static int shmem_xattr_security_get(struct inode *inode, const char *name,
1999                                     void *buffer, size_t size)
2000 {
2001         if (strcmp(name, "") == 0)
2002                 return -EINVAL;
2003         return security_inode_getsecurity(inode, name, buffer, size,
2004                                           -EOPNOTSUPP);
2005 }
2006
2007 static int shmem_xattr_security_set(struct inode *inode, const char *name,
2008                                     const void *value, size_t size, int flags)
2009 {
2010         if (strcmp(name, "") == 0)
2011                 return -EINVAL;
2012         return security_inode_setsecurity(inode, name, value, size, flags);
2013 }
2014
2015 static struct xattr_handler shmem_xattr_security_handler = {
2016         .prefix = XATTR_SECURITY_PREFIX,
2017         .list   = shmem_xattr_security_list,
2018         .get    = shmem_xattr_security_get,
2019         .set    = shmem_xattr_security_set,
2020 };
2021
2022 static struct xattr_handler *shmem_xattr_handlers[] = {
2023         &shmem_xattr_acl_access_handler,
2024         &shmem_xattr_acl_default_handler,
2025         &shmem_xattr_security_handler,
2026         NULL
2027 };
2028 #endif
2029
2030 static struct dentry *shmem_get_parent(struct dentry *child)
2031 {
2032         return ERR_PTR(-ESTALE);
2033 }
2034
2035 static int shmem_match(struct inode *ino, void *vfh)
2036 {
2037         __u32 *fh = vfh;
2038         __u64 inum = fh[2];
2039         inum = (inum << 32) | fh[1];
2040         return ino->i_ino == inum && fh[0] == ino->i_generation;
2041 }
2042
2043 static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
2044 {
2045         struct dentry *de = NULL;
2046         struct inode *inode;
2047         __u32 *fh = vfh;
2048         __u64 inum = fh[2];
2049         inum = (inum << 32) | fh[1];
2050
2051         inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
2052         if (inode) {
2053                 de = d_find_alias(inode);
2054                 iput(inode);
2055         }
2056
2057         return de? de: ERR_PTR(-ESTALE);
2058 }
2059
2060 static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
2061                 int len, int type,
2062                 int (*acceptable)(void *context, struct dentry *de),
2063                 void *context)
2064 {
2065         if (len < 3)
2066                 return ERR_PTR(-ESTALE);
2067
2068         return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
2069                                                         context);
2070 }
2071
2072 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2073                                 int connectable)
2074 {
2075         struct inode *inode = dentry->d_inode;
2076
2077         if (*len < 3)
2078                 return 255;
2079
2080         if (hlist_unhashed(&inode->i_hash)) {
2081                 /* Unfortunately insert_inode_hash is not idempotent,
2082                  * so as we hash inodes here rather than at creation
2083                  * time, we need a lock to ensure we only try
2084                  * to do it once
2085                  */
2086                 static DEFINE_SPINLOCK(lock);
2087                 spin_lock(&lock);
2088                 if (hlist_unhashed(&inode->i_hash))
2089                         __insert_inode_hash(inode,
2090                                             inode->i_ino + inode->i_generation);
2091                 spin_unlock(&lock);
2092         }
2093
2094         fh[0] = inode->i_generation;
2095         fh[1] = inode->i_ino;
2096         fh[2] = ((__u64)inode->i_ino) >> 32;
2097
2098         *len = 3;
2099         return 1;
2100 }
2101
2102 static struct export_operations shmem_export_ops = {
2103         .get_parent     = shmem_get_parent,
2104         .get_dentry     = shmem_get_dentry,
2105         .encode_fh      = shmem_encode_fh,
2106         .decode_fh      = shmem_decode_fh,
2107 };
2108
2109 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2110         gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2111         int *policy, nodemask_t *policy_nodes)
2112 {
2113         char *this_char, *value, *rest;
2114
2115         while (options != NULL) {
2116                 this_char = options;
2117                 for (;;) {
2118                         /*
2119                          * NUL-terminate this option: unfortunately,
2120                          * mount options form a comma-separated list,
2121                          * but mpol's nodelist may also contain commas.
2122                          */
2123                         options = strchr(options, ',');
2124                         if (options == NULL)
2125                                 break;
2126                         options++;
2127                         if (!isdigit(*options)) {
2128                                 options[-1] = '\0';
2129                                 break;
2130                         }
2131                 }
2132                 if (!*this_char)
2133                         continue;
2134                 if ((value = strchr(this_char,'=')) != NULL) {
2135                         *value++ = 0;
2136                 } else {
2137                         printk(KERN_ERR
2138                             "tmpfs: No value for mount option '%s'\n",
2139                             this_char);
2140                         return 1;
2141                 }
2142
2143                 if (!strcmp(this_char,"size")) {
2144                         unsigned long long size;
2145                         size = memparse(value,&rest);
2146                         if (*rest == '%') {
2147                                 size <<= PAGE_SHIFT;
2148                                 size *= totalram_pages;
2149                                 do_div(size, 100);
2150                                 rest++;
2151                         }
2152                         if (*rest)
2153                                 goto bad_val;
2154                         *blocks = size >> PAGE_CACHE_SHIFT;
2155                 } else if (!strcmp(this_char,"nr_blocks")) {
2156                         *blocks = memparse(value,&rest);
2157                         if (*rest)
2158                                 goto bad_val;
2159                 } else if (!strcmp(this_char,"nr_inodes")) {
2160                         *inodes = memparse(value,&rest);
2161                         if (*rest)
2162                                 goto bad_val;
2163                 } else if (!strcmp(this_char,"mode")) {
2164                         if (!mode)
2165                                 continue;
2166                         *mode = simple_strtoul(value,&rest,8);
2167                         if (*rest)
2168                                 goto bad_val;
2169                 } else if (!strcmp(this_char,"uid")) {
2170                         if (!uid)
2171                                 continue;
2172                         *uid = simple_strtoul(value,&rest,0);
2173                         if (*rest)
2174                                 goto bad_val;
2175                 } else if (!strcmp(this_char,"gid")) {
2176                         if (!gid)
2177                                 continue;
2178                         *gid = simple_strtoul(value,&rest,0);
2179                         if (*rest)
2180                                 goto bad_val;
2181                 } else if (!strcmp(this_char,"mpol")) {
2182                         if (shmem_parse_mpol(value,policy,policy_nodes))
2183                                 goto bad_val;
2184                 } else {
2185                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2186                                this_char);
2187                         return 1;
2188                 }
2189         }
2190         return 0;
2191
2192 bad_val:
2193         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2194                value, this_char);
2195         return 1;
2196
2197 }
2198
2199 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2200 {
2201         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2202         unsigned long max_blocks = sbinfo->max_blocks;
2203         unsigned long max_inodes = sbinfo->max_inodes;
2204         int policy = sbinfo->policy;
2205         nodemask_t policy_nodes = sbinfo->policy_nodes;
2206         unsigned long blocks;
2207         unsigned long inodes;
2208         int error = -EINVAL;
2209
2210         if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2211                                 &max_inodes, &policy, &policy_nodes))
2212                 return error;
2213
2214         spin_lock(&sbinfo->stat_lock);
2215         blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2216         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2217         if (max_blocks < blocks)
2218                 goto out;
2219         if (max_inodes < inodes)
2220                 goto out;
2221         /*
2222          * Those tests also disallow limited->unlimited while any are in
2223          * use, so i_blocks will always be zero when max_blocks is zero;
2224          * but we must separately disallow unlimited->limited, because
2225          * in that case we have no record of how much is already in use.
2226          */
2227         if (max_blocks && !sbinfo->max_blocks)
2228                 goto out;
2229         if (max_inodes && !sbinfo->max_inodes)
2230                 goto out;
2231
2232         error = 0;
2233         sbinfo->max_blocks  = max_blocks;
2234         sbinfo->free_blocks = max_blocks - blocks;
2235         sbinfo->max_inodes  = max_inodes;
2236         sbinfo->free_inodes = max_inodes - inodes;
2237         sbinfo->policy = policy;
2238         sbinfo->policy_nodes = policy_nodes;
2239 out:
2240         spin_unlock(&sbinfo->stat_lock);
2241         return error;
2242 }
2243 #endif
2244
2245 static void shmem_put_super(struct super_block *sb)
2246 {
2247         kfree(sb->s_fs_info);
2248         sb->s_fs_info = NULL;
2249 }
2250
2251 static int shmem_fill_super(struct super_block *sb,
2252                             void *data, int silent)
2253 {
2254         struct inode *inode;
2255         struct dentry *root;
2256         int mode   = S_IRWXUGO | S_ISVTX;
2257         uid_t uid = current->fsuid;
2258         gid_t gid = current->fsgid;
2259         int err = -ENOMEM;
2260         struct shmem_sb_info *sbinfo;
2261         unsigned long blocks = 0;
2262         unsigned long inodes = 0;
2263         int policy = MPOL_DEFAULT;
2264         nodemask_t policy_nodes = node_online_map;
2265
2266 #ifdef CONFIG_TMPFS
2267         /*
2268          * Per default we only allow half of the physical ram per
2269          * tmpfs instance, limiting inodes to one per page of lowmem;
2270          * but the internal instance is left unlimited.
2271          */
2272         if (!(sb->s_flags & MS_NOUSER)) {
2273                 blocks = totalram_pages / 2;
2274                 inodes = totalram_pages - totalhigh_pages;
2275                 if (inodes > blocks)
2276                         inodes = blocks;
2277                 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2278                                         &inodes, &policy, &policy_nodes))
2279                         return -EINVAL;
2280         }
2281         sb->s_export_op = &shmem_export_ops;
2282 #else
2283         sb->s_flags |= MS_NOUSER;
2284 #endif
2285
2286         /* Round up to L1_CACHE_BYTES to resist false sharing */
2287         sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2288                                 L1_CACHE_BYTES), GFP_KERNEL);
2289         if (!sbinfo)
2290                 return -ENOMEM;
2291
2292         spin_lock_init(&sbinfo->stat_lock);
2293         sbinfo->max_blocks = blocks;
2294         sbinfo->free_blocks = blocks;
2295         sbinfo->max_inodes = inodes;
2296         sbinfo->free_inodes = inodes;
2297         sbinfo->policy = policy;
2298         sbinfo->policy_nodes = policy_nodes;
2299
2300         sb->s_fs_info = sbinfo;
2301         sb->s_maxbytes = SHMEM_MAX_BYTES;
2302         sb->s_blocksize = PAGE_CACHE_SIZE;
2303         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2304         sb->s_magic = TMPFS_MAGIC;
2305         sb->s_op = &shmem_ops;
2306         sb->s_time_gran = 1;
2307 #ifdef CONFIG_TMPFS_POSIX_ACL
2308         sb->s_xattr = shmem_xattr_handlers;
2309         sb->s_flags |= MS_POSIXACL;
2310 #endif
2311
2312         inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2313         if (!inode)
2314                 goto failed;
2315         inode->i_uid = uid;
2316         inode->i_gid = gid;
2317         root = d_alloc_root(inode);
2318         if (!root)
2319                 goto failed_iput;
2320         sb->s_root = root;
2321         return 0;
2322
2323 failed_iput:
2324         iput(inode);
2325 failed:
2326         shmem_put_super(sb);
2327         return err;
2328 }
2329
2330 static struct kmem_cache *shmem_inode_cachep;
2331
2332 static struct inode *shmem_alloc_inode(struct super_block *sb)
2333 {
2334         struct shmem_inode_info *p;
2335         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2336         if (!p)
2337                 return NULL;
2338         return &p->vfs_inode;
2339 }
2340
2341 static void shmem_destroy_inode(struct inode *inode)
2342 {
2343         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2344                 /* only struct inode is valid if it's an inline symlink */
2345                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2346         }
2347         shmem_acl_destroy_inode(inode);
2348         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2349 }
2350
2351 static void init_once(void *foo, struct kmem_cache *cachep,
2352                       unsigned long flags)
2353 {
2354         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2355
2356         inode_init_once(&p->vfs_inode);
2357 #ifdef CONFIG_TMPFS_POSIX_ACL
2358         p->i_acl = NULL;
2359         p->i_default_acl = NULL;
2360 #endif
2361 }
2362
2363 static int init_inodecache(void)
2364 {
2365         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2366                                 sizeof(struct shmem_inode_info),
2367                                 0, 0, init_once, NULL);
2368         if (shmem_inode_cachep == NULL)
2369                 return -ENOMEM;
2370         return 0;
2371 }
2372
2373 static void destroy_inodecache(void)
2374 {
2375         kmem_cache_destroy(shmem_inode_cachep);
2376 }
2377
2378 static const struct address_space_operations shmem_aops = {
2379         .writepage      = shmem_writepage,
2380         .set_page_dirty = __set_page_dirty_no_writeback,
2381 #ifdef CONFIG_TMPFS
2382         .readpage       = shmem_readpage,
2383         .prepare_write  = shmem_prepare_write,
2384         .commit_write   = simple_commit_write,
2385 #endif
2386         .migratepage    = migrate_page,
2387 };
2388
2389 static const struct file_operations shmem_file_operations = {
2390         .mmap           = shmem_mmap,
2391 #ifdef CONFIG_TMPFS
2392         .llseek         = generic_file_llseek,
2393         .read           = shmem_file_read,
2394         .write          = shmem_file_write,
2395         .fsync          = simple_sync_file,
2396         .splice_read    = generic_file_splice_read,
2397         .splice_write   = generic_file_splice_write,
2398 #endif
2399 };
2400
2401 static const struct inode_operations shmem_inode_operations = {
2402         .truncate       = shmem_truncate,
2403         .setattr        = shmem_notify_change,
2404         .truncate_range = shmem_truncate_range,
2405 #ifdef CONFIG_TMPFS_POSIX_ACL
2406         .setxattr       = generic_setxattr,
2407         .getxattr       = generic_getxattr,
2408         .listxattr      = generic_listxattr,
2409         .removexattr    = generic_removexattr,
2410         .permission     = shmem_permission,
2411 #endif
2412
2413 };
2414
2415 static const struct inode_operations shmem_dir_inode_operations = {
2416 #ifdef CONFIG_TMPFS
2417         .create         = shmem_create,
2418         .lookup         = simple_lookup,
2419         .link           = shmem_link,
2420         .unlink         = shmem_unlink,
2421         .symlink        = shmem_symlink,
2422         .mkdir          = shmem_mkdir,
2423         .rmdir          = shmem_rmdir,
2424         .mknod          = shmem_mknod,
2425         .rename         = shmem_rename,
2426 #endif
2427 #ifdef CONFIG_TMPFS_POSIX_ACL
2428         .setattr        = shmem_notify_change,
2429         .setxattr       = generic_setxattr,
2430         .getxattr       = generic_getxattr,
2431         .listxattr      = generic_listxattr,
2432         .removexattr    = generic_removexattr,
2433         .permission     = shmem_permission,
2434 #endif
2435 };
2436
2437 static const struct inode_operations shmem_special_inode_operations = {
2438 #ifdef CONFIG_TMPFS_POSIX_ACL
2439         .setattr        = shmem_notify_change,
2440         .setxattr       = generic_setxattr,
2441         .getxattr       = generic_getxattr,
2442         .listxattr      = generic_listxattr,
2443         .removexattr    = generic_removexattr,
2444         .permission     = shmem_permission,
2445 #endif
2446 };
2447
2448 static const struct super_operations shmem_ops = {
2449         .alloc_inode    = shmem_alloc_inode,
2450         .destroy_inode  = shmem_destroy_inode,
2451 #ifdef CONFIG_TMPFS
2452         .statfs         = shmem_statfs,
2453         .remount_fs     = shmem_remount_fs,
2454 #endif
2455         .delete_inode   = shmem_delete_inode,
2456         .drop_inode     = generic_delete_inode,
2457         .put_super      = shmem_put_super,
2458 };
2459
2460 static struct vm_operations_struct shmem_vm_ops = {
2461         .nopage         = shmem_nopage,
2462         .populate       = shmem_populate,
2463 #ifdef CONFIG_NUMA
2464         .set_policy     = shmem_set_policy,
2465         .get_policy     = shmem_get_policy,
2466 #endif
2467 };
2468
2469
2470 static int shmem_get_sb(struct file_system_type *fs_type,
2471         int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2472 {
2473         return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2474 }
2475
2476 static struct file_system_type tmpfs_fs_type = {
2477         .owner          = THIS_MODULE,
2478         .name           = "tmpfs",
2479         .get_sb         = shmem_get_sb,
2480         .kill_sb        = kill_litter_super,
2481 };
2482 static struct vfsmount *shm_mnt;
2483
2484 static int __init init_tmpfs(void)
2485 {
2486         int error;
2487
2488         error = init_inodecache();
2489         if (error)
2490                 goto out3;
2491
2492         error = register_filesystem(&tmpfs_fs_type);
2493         if (error) {
2494                 printk(KERN_ERR "Could not register tmpfs\n");
2495                 goto out2;
2496         }
2497
2498         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2499                                 tmpfs_fs_type.name, NULL);
2500         if (IS_ERR(shm_mnt)) {
2501                 error = PTR_ERR(shm_mnt);
2502                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2503                 goto out1;
2504         }
2505         return 0;
2506
2507 out1:
2508         unregister_filesystem(&tmpfs_fs_type);
2509 out2:
2510         destroy_inodecache();
2511 out3:
2512         shm_mnt = ERR_PTR(error);
2513         return error;
2514 }
2515 module_init(init_tmpfs)
2516
2517 /*
2518  * shmem_file_setup - get an unlinked file living in tmpfs
2519  *
2520  * @name: name for dentry (to be seen in /proc/<pid>/maps
2521  * @size: size to be set for the file
2522  *
2523  */
2524 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2525 {
2526         int error;
2527         struct file *file;
2528         struct inode *inode;
2529         struct dentry *dentry, *root;
2530         struct qstr this;
2531
2532         if (IS_ERR(shm_mnt))
2533                 return (void *)shm_mnt;
2534
2535         if (size < 0 || size > SHMEM_MAX_BYTES)
2536                 return ERR_PTR(-EINVAL);
2537
2538         if (shmem_acct_size(flags, size))
2539                 return ERR_PTR(-ENOMEM);
2540
2541         error = -ENOMEM;
2542         this.name = name;
2543         this.len = strlen(name);
2544         this.hash = 0; /* will go */
2545         root = shm_mnt->mnt_root;
2546         dentry = d_alloc(root, &this);
2547         if (!dentry)
2548                 goto put_memory;
2549
2550         error = -ENFILE;
2551         file = get_empty_filp();
2552         if (!file)
2553                 goto put_dentry;
2554
2555         error = -ENOSPC;
2556         inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2557         if (!inode)
2558                 goto close_file;
2559
2560         SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2561         d_instantiate(dentry, inode);
2562         inode->i_size = size;
2563         inode->i_nlink = 0;     /* It is unlinked */
2564         file->f_path.mnt = mntget(shm_mnt);
2565         file->f_path.dentry = dentry;
2566         file->f_mapping = inode->i_mapping;
2567         file->f_op = &shmem_file_operations;
2568         file->f_mode = FMODE_WRITE | FMODE_READ;
2569         return file;
2570
2571 close_file:
2572         put_filp(file);
2573 put_dentry:
2574         dput(dentry);
2575 put_memory:
2576         shmem_unacct_size(flags, size);
2577         return ERR_PTR(error);
2578 }
2579
2580 /*
2581  * shmem_zero_setup - setup a shared anonymous mapping
2582  *
2583  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2584  */
2585 int shmem_zero_setup(struct vm_area_struct *vma)
2586 {
2587         struct file *file;
2588         loff_t size = vma->vm_end - vma->vm_start;
2589
2590         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2591         if (IS_ERR(file))
2592                 return PTR_ERR(file);
2593
2594         if (vma->vm_file)
2595                 fput(vma->vm_file);
2596         vma->vm_file = file;
2597         vma->vm_ops = &shmem_vm_ops;
2598         return 0;
2599 }