hugetlb reservations: fix hugetlb MAP_PRIVATE reservations across vma splits
[safe/jmp/linux-2.6] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 static unsigned long surplus_huge_pages;
27 static unsigned long nr_overcommit_huge_pages;
28 unsigned long max_huge_pages;
29 unsigned long sysctl_overcommit_huge_pages;
30 static struct list_head hugepage_freelists[MAX_NUMNODES];
31 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
32 static unsigned int free_huge_pages_node[MAX_NUMNODES];
33 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 static int hugetlb_next_nid;
37
38 /*
39  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
40  */
41 static DEFINE_SPINLOCK(hugetlb_lock);
42
43 /*
44  * Region tracking -- allows tracking of reservations and instantiated pages
45  *                    across the pages in a mapping.
46  *
47  * The region data structures are protected by a combination of the mmap_sem
48  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
49  * must either hold the mmap_sem for write, or the mmap_sem for read and
50  * the hugetlb_instantiation mutex:
51  *
52  *      down_write(&mm->mmap_sem);
53  * or
54  *      down_read(&mm->mmap_sem);
55  *      mutex_lock(&hugetlb_instantiation_mutex);
56  */
57 struct file_region {
58         struct list_head link;
59         long from;
60         long to;
61 };
62
63 static long region_add(struct list_head *head, long f, long t)
64 {
65         struct file_region *rg, *nrg, *trg;
66
67         /* Locate the region we are either in or before. */
68         list_for_each_entry(rg, head, link)
69                 if (f <= rg->to)
70                         break;
71
72         /* Round our left edge to the current segment if it encloses us. */
73         if (f > rg->from)
74                 f = rg->from;
75
76         /* Check for and consume any regions we now overlap with. */
77         nrg = rg;
78         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
79                 if (&rg->link == head)
80                         break;
81                 if (rg->from > t)
82                         break;
83
84                 /* If this area reaches higher then extend our area to
85                  * include it completely.  If this is not the first area
86                  * which we intend to reuse, free it. */
87                 if (rg->to > t)
88                         t = rg->to;
89                 if (rg != nrg) {
90                         list_del(&rg->link);
91                         kfree(rg);
92                 }
93         }
94         nrg->from = f;
95         nrg->to = t;
96         return 0;
97 }
98
99 static long region_chg(struct list_head *head, long f, long t)
100 {
101         struct file_region *rg, *nrg;
102         long chg = 0;
103
104         /* Locate the region we are before or in. */
105         list_for_each_entry(rg, head, link)
106                 if (f <= rg->to)
107                         break;
108
109         /* If we are below the current region then a new region is required.
110          * Subtle, allocate a new region at the position but make it zero
111          * size such that we can guarantee to record the reservation. */
112         if (&rg->link == head || t < rg->from) {
113                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
114                 if (!nrg)
115                         return -ENOMEM;
116                 nrg->from = f;
117                 nrg->to   = f;
118                 INIT_LIST_HEAD(&nrg->link);
119                 list_add(&nrg->link, rg->link.prev);
120
121                 return t - f;
122         }
123
124         /* Round our left edge to the current segment if it encloses us. */
125         if (f > rg->from)
126                 f = rg->from;
127         chg = t - f;
128
129         /* Check for and consume any regions we now overlap with. */
130         list_for_each_entry(rg, rg->link.prev, link) {
131                 if (&rg->link == head)
132                         break;
133                 if (rg->from > t)
134                         return chg;
135
136                 /* We overlap with this area, if it extends futher than
137                  * us then we must extend ourselves.  Account for its
138                  * existing reservation. */
139                 if (rg->to > t) {
140                         chg += rg->to - t;
141                         t = rg->to;
142                 }
143                 chg -= rg->to - rg->from;
144         }
145         return chg;
146 }
147
148 static long region_truncate(struct list_head *head, long end)
149 {
150         struct file_region *rg, *trg;
151         long chg = 0;
152
153         /* Locate the region we are either in or before. */
154         list_for_each_entry(rg, head, link)
155                 if (end <= rg->to)
156                         break;
157         if (&rg->link == head)
158                 return 0;
159
160         /* If we are in the middle of a region then adjust it. */
161         if (end > rg->from) {
162                 chg = rg->to - end;
163                 rg->to = end;
164                 rg = list_entry(rg->link.next, typeof(*rg), link);
165         }
166
167         /* Drop any remaining regions. */
168         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
169                 if (&rg->link == head)
170                         break;
171                 chg += rg->to - rg->from;
172                 list_del(&rg->link);
173                 kfree(rg);
174         }
175         return chg;
176 }
177
178 static long region_count(struct list_head *head, long f, long t)
179 {
180         struct file_region *rg;
181         long chg = 0;
182
183         /* Locate each segment we overlap with, and count that overlap. */
184         list_for_each_entry(rg, head, link) {
185                 int seg_from;
186                 int seg_to;
187
188                 if (rg->to <= f)
189                         continue;
190                 if (rg->from >= t)
191                         break;
192
193                 seg_from = max(rg->from, f);
194                 seg_to = min(rg->to, t);
195
196                 chg += seg_to - seg_from;
197         }
198
199         return chg;
200 }
201
202 /*
203  * Convert the address within this vma to the page offset within
204  * the mapping, in base page units.
205  */
206 static pgoff_t vma_page_offset(struct vm_area_struct *vma,
207                                 unsigned long address)
208 {
209         return ((address - vma->vm_start) >> PAGE_SHIFT) +
210                                         (vma->vm_pgoff >> PAGE_SHIFT);
211 }
212
213 /*
214  * Convert the address within this vma to the page offset within
215  * the mapping, in pagecache page units; huge pages here.
216  */
217 static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma,
218                                         unsigned long address)
219 {
220         return ((address - vma->vm_start) >> HPAGE_SHIFT) +
221                         (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
222 }
223
224 /*
225  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
226  * bits of the reservation map pointer, which are always clear due to
227  * alignment.
228  */
229 #define HPAGE_RESV_OWNER    (1UL << 0)
230 #define HPAGE_RESV_UNMAPPED (1UL << 1)
231 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
232
233 /*
234  * These helpers are used to track how many pages are reserved for
235  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
236  * is guaranteed to have their future faults succeed.
237  *
238  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
239  * the reserve counters are updated with the hugetlb_lock held. It is safe
240  * to reset the VMA at fork() time as it is not in use yet and there is no
241  * chance of the global counters getting corrupted as a result of the values.
242  *
243  * The private mapping reservation is represented in a subtly different
244  * manner to a shared mapping.  A shared mapping has a region map associated
245  * with the underlying file, this region map represents the backing file
246  * pages which have ever had a reservation assigned which this persists even
247  * after the page is instantiated.  A private mapping has a region map
248  * associated with the original mmap which is attached to all VMAs which
249  * reference it, this region map represents those offsets which have consumed
250  * reservation ie. where pages have been instantiated.
251  */
252 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
253 {
254         return (unsigned long)vma->vm_private_data;
255 }
256
257 static void set_vma_private_data(struct vm_area_struct *vma,
258                                                         unsigned long value)
259 {
260         vma->vm_private_data = (void *)value;
261 }
262
263 struct resv_map {
264         struct kref refs;
265         struct list_head regions;
266 };
267
268 struct resv_map *resv_map_alloc(void)
269 {
270         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
271         if (!resv_map)
272                 return NULL;
273
274         kref_init(&resv_map->refs);
275         INIT_LIST_HEAD(&resv_map->regions);
276
277         return resv_map;
278 }
279
280 void resv_map_release(struct kref *ref)
281 {
282         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
283
284         /* Clear out any active regions before we release the map. */
285         region_truncate(&resv_map->regions, 0);
286         kfree(resv_map);
287 }
288
289 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
290 {
291         VM_BUG_ON(!is_vm_hugetlb_page(vma));
292         if (!(vma->vm_flags & VM_SHARED))
293                 return (struct resv_map *)(get_vma_private_data(vma) &
294                                                         ~HPAGE_RESV_MASK);
295         return 0;
296 }
297
298 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
299 {
300         VM_BUG_ON(!is_vm_hugetlb_page(vma));
301         VM_BUG_ON(vma->vm_flags & VM_SHARED);
302
303         set_vma_private_data(vma, (get_vma_private_data(vma) &
304                                 HPAGE_RESV_MASK) | (unsigned long)map);
305 }
306
307 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
308 {
309         VM_BUG_ON(!is_vm_hugetlb_page(vma));
310         VM_BUG_ON(vma->vm_flags & VM_SHARED);
311
312         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
313 }
314
315 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
316 {
317         VM_BUG_ON(!is_vm_hugetlb_page(vma));
318
319         return (get_vma_private_data(vma) & flag) != 0;
320 }
321
322 /* Decrement the reserved pages in the hugepage pool by one */
323 static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
324 {
325         if (vma->vm_flags & VM_NORESERVE)
326                 return;
327
328         if (vma->vm_flags & VM_SHARED) {
329                 /* Shared mappings always use reserves */
330                 resv_huge_pages--;
331         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
332                 /*
333                  * Only the process that called mmap() has reserves for
334                  * private mappings.
335                  */
336                 resv_huge_pages--;
337         }
338 }
339
340 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
341 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
342 {
343         VM_BUG_ON(!is_vm_hugetlb_page(vma));
344         if (!(vma->vm_flags & VM_SHARED))
345                 vma->vm_private_data = (void *)0;
346 }
347
348 /* Returns true if the VMA has associated reserve pages */
349 static int vma_has_private_reserves(struct vm_area_struct *vma)
350 {
351         if (vma->vm_flags & VM_SHARED)
352                 return 0;
353         if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
354                 return 0;
355         return 1;
356 }
357
358 static void clear_huge_page(struct page *page, unsigned long addr)
359 {
360         int i;
361
362         might_sleep();
363         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
364                 cond_resched();
365                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
366         }
367 }
368
369 static void copy_huge_page(struct page *dst, struct page *src,
370                            unsigned long addr, struct vm_area_struct *vma)
371 {
372         int i;
373
374         might_sleep();
375         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
376                 cond_resched();
377                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
378         }
379 }
380
381 static void enqueue_huge_page(struct page *page)
382 {
383         int nid = page_to_nid(page);
384         list_add(&page->lru, &hugepage_freelists[nid]);
385         free_huge_pages++;
386         free_huge_pages_node[nid]++;
387 }
388
389 static struct page *dequeue_huge_page(void)
390 {
391         int nid;
392         struct page *page = NULL;
393
394         for (nid = 0; nid < MAX_NUMNODES; ++nid) {
395                 if (!list_empty(&hugepage_freelists[nid])) {
396                         page = list_entry(hugepage_freelists[nid].next,
397                                           struct page, lru);
398                         list_del(&page->lru);
399                         free_huge_pages--;
400                         free_huge_pages_node[nid]--;
401                         break;
402                 }
403         }
404         return page;
405 }
406
407 static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
408                                 unsigned long address, int avoid_reserve)
409 {
410         int nid;
411         struct page *page = NULL;
412         struct mempolicy *mpol;
413         nodemask_t *nodemask;
414         struct zonelist *zonelist = huge_zonelist(vma, address,
415                                         htlb_alloc_mask, &mpol, &nodemask);
416         struct zone *zone;
417         struct zoneref *z;
418
419         /*
420          * A child process with MAP_PRIVATE mappings created by their parent
421          * have no page reserves. This check ensures that reservations are
422          * not "stolen". The child may still get SIGKILLed
423          */
424         if (!vma_has_private_reserves(vma) &&
425                         free_huge_pages - resv_huge_pages == 0)
426                 return NULL;
427
428         /* If reserves cannot be used, ensure enough pages are in the pool */
429         if (avoid_reserve && free_huge_pages - resv_huge_pages == 0)
430                 return NULL;
431
432         for_each_zone_zonelist_nodemask(zone, z, zonelist,
433                                                 MAX_NR_ZONES - 1, nodemask) {
434                 nid = zone_to_nid(zone);
435                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
436                     !list_empty(&hugepage_freelists[nid])) {
437                         page = list_entry(hugepage_freelists[nid].next,
438                                           struct page, lru);
439                         list_del(&page->lru);
440                         free_huge_pages--;
441                         free_huge_pages_node[nid]--;
442
443                         if (!avoid_reserve)
444                                 decrement_hugepage_resv_vma(vma);
445
446                         break;
447                 }
448         }
449         mpol_cond_put(mpol);
450         return page;
451 }
452
453 static void update_and_free_page(struct page *page)
454 {
455         int i;
456         nr_huge_pages--;
457         nr_huge_pages_node[page_to_nid(page)]--;
458         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
459                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
460                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
461                                 1 << PG_private | 1<< PG_writeback);
462         }
463         set_compound_page_dtor(page, NULL);
464         set_page_refcounted(page);
465         arch_release_hugepage(page);
466         __free_pages(page, HUGETLB_PAGE_ORDER);
467 }
468
469 static void free_huge_page(struct page *page)
470 {
471         int nid = page_to_nid(page);
472         struct address_space *mapping;
473
474         mapping = (struct address_space *) page_private(page);
475         set_page_private(page, 0);
476         BUG_ON(page_count(page));
477         INIT_LIST_HEAD(&page->lru);
478
479         spin_lock(&hugetlb_lock);
480         if (surplus_huge_pages_node[nid]) {
481                 update_and_free_page(page);
482                 surplus_huge_pages--;
483                 surplus_huge_pages_node[nid]--;
484         } else {
485                 enqueue_huge_page(page);
486         }
487         spin_unlock(&hugetlb_lock);
488         if (mapping)
489                 hugetlb_put_quota(mapping, 1);
490 }
491
492 /*
493  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
494  * balanced by operating on them in a round-robin fashion.
495  * Returns 1 if an adjustment was made.
496  */
497 static int adjust_pool_surplus(int delta)
498 {
499         static int prev_nid;
500         int nid = prev_nid;
501         int ret = 0;
502
503         VM_BUG_ON(delta != -1 && delta != 1);
504         do {
505                 nid = next_node(nid, node_online_map);
506                 if (nid == MAX_NUMNODES)
507                         nid = first_node(node_online_map);
508
509                 /* To shrink on this node, there must be a surplus page */
510                 if (delta < 0 && !surplus_huge_pages_node[nid])
511                         continue;
512                 /* Surplus cannot exceed the total number of pages */
513                 if (delta > 0 && surplus_huge_pages_node[nid] >=
514                                                 nr_huge_pages_node[nid])
515                         continue;
516
517                 surplus_huge_pages += delta;
518                 surplus_huge_pages_node[nid] += delta;
519                 ret = 1;
520                 break;
521         } while (nid != prev_nid);
522
523         prev_nid = nid;
524         return ret;
525 }
526
527 static struct page *alloc_fresh_huge_page_node(int nid)
528 {
529         struct page *page;
530
531         page = alloc_pages_node(nid,
532                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
533                                                 __GFP_REPEAT|__GFP_NOWARN,
534                 HUGETLB_PAGE_ORDER);
535         if (page) {
536                 if (arch_prepare_hugepage(page)) {
537                         __free_pages(page, HUGETLB_PAGE_ORDER);
538                         return NULL;
539                 }
540                 set_compound_page_dtor(page, free_huge_page);
541                 spin_lock(&hugetlb_lock);
542                 nr_huge_pages++;
543                 nr_huge_pages_node[nid]++;
544                 spin_unlock(&hugetlb_lock);
545                 put_page(page); /* free it into the hugepage allocator */
546         }
547
548         return page;
549 }
550
551 static int alloc_fresh_huge_page(void)
552 {
553         struct page *page;
554         int start_nid;
555         int next_nid;
556         int ret = 0;
557
558         start_nid = hugetlb_next_nid;
559
560         do {
561                 page = alloc_fresh_huge_page_node(hugetlb_next_nid);
562                 if (page)
563                         ret = 1;
564                 /*
565                  * Use a helper variable to find the next node and then
566                  * copy it back to hugetlb_next_nid afterwards:
567                  * otherwise there's a window in which a racer might
568                  * pass invalid nid MAX_NUMNODES to alloc_pages_node.
569                  * But we don't need to use a spin_lock here: it really
570                  * doesn't matter if occasionally a racer chooses the
571                  * same nid as we do.  Move nid forward in the mask even
572                  * if we just successfully allocated a hugepage so that
573                  * the next caller gets hugepages on the next node.
574                  */
575                 next_nid = next_node(hugetlb_next_nid, node_online_map);
576                 if (next_nid == MAX_NUMNODES)
577                         next_nid = first_node(node_online_map);
578                 hugetlb_next_nid = next_nid;
579         } while (!page && hugetlb_next_nid != start_nid);
580
581         if (ret)
582                 count_vm_event(HTLB_BUDDY_PGALLOC);
583         else
584                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
585
586         return ret;
587 }
588
589 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
590                                                 unsigned long address)
591 {
592         struct page *page;
593         unsigned int nid;
594
595         /*
596          * Assume we will successfully allocate the surplus page to
597          * prevent racing processes from causing the surplus to exceed
598          * overcommit
599          *
600          * This however introduces a different race, where a process B
601          * tries to grow the static hugepage pool while alloc_pages() is
602          * called by process A. B will only examine the per-node
603          * counters in determining if surplus huge pages can be
604          * converted to normal huge pages in adjust_pool_surplus(). A
605          * won't be able to increment the per-node counter, until the
606          * lock is dropped by B, but B doesn't drop hugetlb_lock until
607          * no more huge pages can be converted from surplus to normal
608          * state (and doesn't try to convert again). Thus, we have a
609          * case where a surplus huge page exists, the pool is grown, and
610          * the surplus huge page still exists after, even though it
611          * should just have been converted to a normal huge page. This
612          * does not leak memory, though, as the hugepage will be freed
613          * once it is out of use. It also does not allow the counters to
614          * go out of whack in adjust_pool_surplus() as we don't modify
615          * the node values until we've gotten the hugepage and only the
616          * per-node value is checked there.
617          */
618         spin_lock(&hugetlb_lock);
619         if (surplus_huge_pages >= nr_overcommit_huge_pages) {
620                 spin_unlock(&hugetlb_lock);
621                 return NULL;
622         } else {
623                 nr_huge_pages++;
624                 surplus_huge_pages++;
625         }
626         spin_unlock(&hugetlb_lock);
627
628         page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
629                                         __GFP_REPEAT|__GFP_NOWARN,
630                                         HUGETLB_PAGE_ORDER);
631
632         spin_lock(&hugetlb_lock);
633         if (page) {
634                 /*
635                  * This page is now managed by the hugetlb allocator and has
636                  * no users -- drop the buddy allocator's reference.
637                  */
638                 put_page_testzero(page);
639                 VM_BUG_ON(page_count(page));
640                 nid = page_to_nid(page);
641                 set_compound_page_dtor(page, free_huge_page);
642                 /*
643                  * We incremented the global counters already
644                  */
645                 nr_huge_pages_node[nid]++;
646                 surplus_huge_pages_node[nid]++;
647                 __count_vm_event(HTLB_BUDDY_PGALLOC);
648         } else {
649                 nr_huge_pages--;
650                 surplus_huge_pages--;
651                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
652         }
653         spin_unlock(&hugetlb_lock);
654
655         return page;
656 }
657
658 /*
659  * Increase the hugetlb pool such that it can accomodate a reservation
660  * of size 'delta'.
661  */
662 static int gather_surplus_pages(int delta)
663 {
664         struct list_head surplus_list;
665         struct page *page, *tmp;
666         int ret, i;
667         int needed, allocated;
668
669         needed = (resv_huge_pages + delta) - free_huge_pages;
670         if (needed <= 0) {
671                 resv_huge_pages += delta;
672                 return 0;
673         }
674
675         allocated = 0;
676         INIT_LIST_HEAD(&surplus_list);
677
678         ret = -ENOMEM;
679 retry:
680         spin_unlock(&hugetlb_lock);
681         for (i = 0; i < needed; i++) {
682                 page = alloc_buddy_huge_page(NULL, 0);
683                 if (!page) {
684                         /*
685                          * We were not able to allocate enough pages to
686                          * satisfy the entire reservation so we free what
687                          * we've allocated so far.
688                          */
689                         spin_lock(&hugetlb_lock);
690                         needed = 0;
691                         goto free;
692                 }
693
694                 list_add(&page->lru, &surplus_list);
695         }
696         allocated += needed;
697
698         /*
699          * After retaking hugetlb_lock, we need to recalculate 'needed'
700          * because either resv_huge_pages or free_huge_pages may have changed.
701          */
702         spin_lock(&hugetlb_lock);
703         needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
704         if (needed > 0)
705                 goto retry;
706
707         /*
708          * The surplus_list now contains _at_least_ the number of extra pages
709          * needed to accomodate the reservation.  Add the appropriate number
710          * of pages to the hugetlb pool and free the extras back to the buddy
711          * allocator.  Commit the entire reservation here to prevent another
712          * process from stealing the pages as they are added to the pool but
713          * before they are reserved.
714          */
715         needed += allocated;
716         resv_huge_pages += delta;
717         ret = 0;
718 free:
719         /* Free the needed pages to the hugetlb pool */
720         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
721                 if ((--needed) < 0)
722                         break;
723                 list_del(&page->lru);
724                 enqueue_huge_page(page);
725         }
726
727         /* Free unnecessary surplus pages to the buddy allocator */
728         if (!list_empty(&surplus_list)) {
729                 spin_unlock(&hugetlb_lock);
730                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
731                         list_del(&page->lru);
732                         /*
733                          * The page has a reference count of zero already, so
734                          * call free_huge_page directly instead of using
735                          * put_page.  This must be done with hugetlb_lock
736                          * unlocked which is safe because free_huge_page takes
737                          * hugetlb_lock before deciding how to free the page.
738                          */
739                         free_huge_page(page);
740                 }
741                 spin_lock(&hugetlb_lock);
742         }
743
744         return ret;
745 }
746
747 /*
748  * When releasing a hugetlb pool reservation, any surplus pages that were
749  * allocated to satisfy the reservation must be explicitly freed if they were
750  * never used.
751  */
752 static void return_unused_surplus_pages(unsigned long unused_resv_pages)
753 {
754         static int nid = -1;
755         struct page *page;
756         unsigned long nr_pages;
757
758         /*
759          * We want to release as many surplus pages as possible, spread
760          * evenly across all nodes. Iterate across all nodes until we
761          * can no longer free unreserved surplus pages. This occurs when
762          * the nodes with surplus pages have no free pages.
763          */
764         unsigned long remaining_iterations = num_online_nodes();
765
766         /* Uncommit the reservation */
767         resv_huge_pages -= unused_resv_pages;
768
769         nr_pages = min(unused_resv_pages, surplus_huge_pages);
770
771         while (remaining_iterations-- && nr_pages) {
772                 nid = next_node(nid, node_online_map);
773                 if (nid == MAX_NUMNODES)
774                         nid = first_node(node_online_map);
775
776                 if (!surplus_huge_pages_node[nid])
777                         continue;
778
779                 if (!list_empty(&hugepage_freelists[nid])) {
780                         page = list_entry(hugepage_freelists[nid].next,
781                                           struct page, lru);
782                         list_del(&page->lru);
783                         update_and_free_page(page);
784                         free_huge_pages--;
785                         free_huge_pages_node[nid]--;
786                         surplus_huge_pages--;
787                         surplus_huge_pages_node[nid]--;
788                         nr_pages--;
789                         remaining_iterations = num_online_nodes();
790                 }
791         }
792 }
793
794 /*
795  * Determine if the huge page at addr within the vma has an associated
796  * reservation.  Where it does not we will need to logically increase
797  * reservation and actually increase quota before an allocation can occur.
798  * Where any new reservation would be required the reservation change is
799  * prepared, but not committed.  Once the page has been quota'd allocated
800  * an instantiated the change should be committed via vma_commit_reservation.
801  * No action is required on failure.
802  */
803 static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
804 {
805         struct address_space *mapping = vma->vm_file->f_mapping;
806         struct inode *inode = mapping->host;
807
808         if (vma->vm_flags & VM_SHARED) {
809                 pgoff_t idx = vma_pagecache_offset(vma, addr);
810                 return region_chg(&inode->i_mapping->private_list,
811                                                         idx, idx + 1);
812
813         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
814                 return 1;
815
816         } else  {
817                 int err;
818                 pgoff_t idx = vma_pagecache_offset(vma, addr);
819                 struct resv_map *reservations = vma_resv_map(vma);
820
821                 err = region_chg(&reservations->regions, idx, idx + 1);
822                 if (err < 0)
823                         return err;
824                 return 0;
825         }
826 }
827 static void vma_commit_reservation(struct vm_area_struct *vma,
828                                                         unsigned long addr)
829 {
830         struct address_space *mapping = vma->vm_file->f_mapping;
831         struct inode *inode = mapping->host;
832
833         if (vma->vm_flags & VM_SHARED) {
834                 pgoff_t idx = vma_pagecache_offset(vma, addr);
835                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
836
837         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
838                 pgoff_t idx = vma_pagecache_offset(vma, addr);
839                 struct resv_map *reservations = vma_resv_map(vma);
840
841                 /* Mark this page used in the map. */
842                 region_add(&reservations->regions, idx, idx + 1);
843         }
844 }
845
846 static struct page *alloc_huge_page(struct vm_area_struct *vma,
847                                     unsigned long addr, int avoid_reserve)
848 {
849         struct page *page;
850         struct address_space *mapping = vma->vm_file->f_mapping;
851         struct inode *inode = mapping->host;
852         unsigned int chg;
853
854         /*
855          * Processes that did not create the mapping will have no reserves and
856          * will not have accounted against quota. Check that the quota can be
857          * made before satisfying the allocation
858          * MAP_NORESERVE mappings may also need pages and quota allocated
859          * if no reserve mapping overlaps.
860          */
861         chg = vma_needs_reservation(vma, addr);
862         if (chg < 0)
863                 return ERR_PTR(chg);
864         if (chg)
865                 if (hugetlb_get_quota(inode->i_mapping, chg))
866                         return ERR_PTR(-ENOSPC);
867
868         spin_lock(&hugetlb_lock);
869         page = dequeue_huge_page_vma(vma, addr, avoid_reserve);
870         spin_unlock(&hugetlb_lock);
871
872         if (!page) {
873                 page = alloc_buddy_huge_page(vma, addr);
874                 if (!page) {
875                         hugetlb_put_quota(inode->i_mapping, chg);
876                         return ERR_PTR(-VM_FAULT_OOM);
877                 }
878         }
879
880         set_page_refcounted(page);
881         set_page_private(page, (unsigned long) mapping);
882
883         vma_commit_reservation(vma, addr);
884
885         return page;
886 }
887
888 static int __init hugetlb_init(void)
889 {
890         unsigned long i;
891
892         if (HPAGE_SHIFT == 0)
893                 return 0;
894
895         for (i = 0; i < MAX_NUMNODES; ++i)
896                 INIT_LIST_HEAD(&hugepage_freelists[i]);
897
898         hugetlb_next_nid = first_node(node_online_map);
899
900         for (i = 0; i < max_huge_pages; ++i) {
901                 if (!alloc_fresh_huge_page())
902                         break;
903         }
904         max_huge_pages = free_huge_pages = nr_huge_pages = i;
905         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
906         return 0;
907 }
908 module_init(hugetlb_init);
909
910 static int __init hugetlb_setup(char *s)
911 {
912         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
913                 max_huge_pages = 0;
914         return 1;
915 }
916 __setup("hugepages=", hugetlb_setup);
917
918 static unsigned int cpuset_mems_nr(unsigned int *array)
919 {
920         int node;
921         unsigned int nr = 0;
922
923         for_each_node_mask(node, cpuset_current_mems_allowed)
924                 nr += array[node];
925
926         return nr;
927 }
928
929 #ifdef CONFIG_SYSCTL
930 #ifdef CONFIG_HIGHMEM
931 static void try_to_free_low(unsigned long count)
932 {
933         int i;
934
935         for (i = 0; i < MAX_NUMNODES; ++i) {
936                 struct page *page, *next;
937                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
938                         if (count >= nr_huge_pages)
939                                 return;
940                         if (PageHighMem(page))
941                                 continue;
942                         list_del(&page->lru);
943                         update_and_free_page(page);
944                         free_huge_pages--;
945                         free_huge_pages_node[page_to_nid(page)]--;
946                 }
947         }
948 }
949 #else
950 static inline void try_to_free_low(unsigned long count)
951 {
952 }
953 #endif
954
955 #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
956 static unsigned long set_max_huge_pages(unsigned long count)
957 {
958         unsigned long min_count, ret;
959
960         /*
961          * Increase the pool size
962          * First take pages out of surplus state.  Then make up the
963          * remaining difference by allocating fresh huge pages.
964          *
965          * We might race with alloc_buddy_huge_page() here and be unable
966          * to convert a surplus huge page to a normal huge page. That is
967          * not critical, though, it just means the overall size of the
968          * pool might be one hugepage larger than it needs to be, but
969          * within all the constraints specified by the sysctls.
970          */
971         spin_lock(&hugetlb_lock);
972         while (surplus_huge_pages && count > persistent_huge_pages) {
973                 if (!adjust_pool_surplus(-1))
974                         break;
975         }
976
977         while (count > persistent_huge_pages) {
978                 /*
979                  * If this allocation races such that we no longer need the
980                  * page, free_huge_page will handle it by freeing the page
981                  * and reducing the surplus.
982                  */
983                 spin_unlock(&hugetlb_lock);
984                 ret = alloc_fresh_huge_page();
985                 spin_lock(&hugetlb_lock);
986                 if (!ret)
987                         goto out;
988
989         }
990
991         /*
992          * Decrease the pool size
993          * First return free pages to the buddy allocator (being careful
994          * to keep enough around to satisfy reservations).  Then place
995          * pages into surplus state as needed so the pool will shrink
996          * to the desired size as pages become free.
997          *
998          * By placing pages into the surplus state independent of the
999          * overcommit value, we are allowing the surplus pool size to
1000          * exceed overcommit. There are few sane options here. Since
1001          * alloc_buddy_huge_page() is checking the global counter,
1002          * though, we'll note that we're not allowed to exceed surplus
1003          * and won't grow the pool anywhere else. Not until one of the
1004          * sysctls are changed, or the surplus pages go out of use.
1005          */
1006         min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
1007         min_count = max(count, min_count);
1008         try_to_free_low(min_count);
1009         while (min_count < persistent_huge_pages) {
1010                 struct page *page = dequeue_huge_page();
1011                 if (!page)
1012                         break;
1013                 update_and_free_page(page);
1014         }
1015         while (count < persistent_huge_pages) {
1016                 if (!adjust_pool_surplus(1))
1017                         break;
1018         }
1019 out:
1020         ret = persistent_huge_pages;
1021         spin_unlock(&hugetlb_lock);
1022         return ret;
1023 }
1024
1025 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1026                            struct file *file, void __user *buffer,
1027                            size_t *length, loff_t *ppos)
1028 {
1029         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
1030         max_huge_pages = set_max_huge_pages(max_huge_pages);
1031         return 0;
1032 }
1033
1034 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1035                         struct file *file, void __user *buffer,
1036                         size_t *length, loff_t *ppos)
1037 {
1038         proc_dointvec(table, write, file, buffer, length, ppos);
1039         if (hugepages_treat_as_movable)
1040                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1041         else
1042                 htlb_alloc_mask = GFP_HIGHUSER;
1043         return 0;
1044 }
1045
1046 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1047                         struct file *file, void __user *buffer,
1048                         size_t *length, loff_t *ppos)
1049 {
1050         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
1051         spin_lock(&hugetlb_lock);
1052         nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
1053         spin_unlock(&hugetlb_lock);
1054         return 0;
1055 }
1056
1057 #endif /* CONFIG_SYSCTL */
1058
1059 int hugetlb_report_meminfo(char *buf)
1060 {
1061         return sprintf(buf,
1062                         "HugePages_Total: %5lu\n"
1063                         "HugePages_Free:  %5lu\n"
1064                         "HugePages_Rsvd:  %5lu\n"
1065                         "HugePages_Surp:  %5lu\n"
1066                         "Hugepagesize:    %5lu kB\n",
1067                         nr_huge_pages,
1068                         free_huge_pages,
1069                         resv_huge_pages,
1070                         surplus_huge_pages,
1071                         HPAGE_SIZE/1024);
1072 }
1073
1074 int hugetlb_report_node_meminfo(int nid, char *buf)
1075 {
1076         return sprintf(buf,
1077                 "Node %d HugePages_Total: %5u\n"
1078                 "Node %d HugePages_Free:  %5u\n"
1079                 "Node %d HugePages_Surp:  %5u\n",
1080                 nid, nr_huge_pages_node[nid],
1081                 nid, free_huge_pages_node[nid],
1082                 nid, surplus_huge_pages_node[nid]);
1083 }
1084
1085 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1086 unsigned long hugetlb_total_pages(void)
1087 {
1088         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
1089 }
1090
1091 static int hugetlb_acct_memory(long delta)
1092 {
1093         int ret = -ENOMEM;
1094
1095         spin_lock(&hugetlb_lock);
1096         /*
1097          * When cpuset is configured, it breaks the strict hugetlb page
1098          * reservation as the accounting is done on a global variable. Such
1099          * reservation is completely rubbish in the presence of cpuset because
1100          * the reservation is not checked against page availability for the
1101          * current cpuset. Application can still potentially OOM'ed by kernel
1102          * with lack of free htlb page in cpuset that the task is in.
1103          * Attempt to enforce strict accounting with cpuset is almost
1104          * impossible (or too ugly) because cpuset is too fluid that
1105          * task or memory node can be dynamically moved between cpusets.
1106          *
1107          * The change of semantics for shared hugetlb mapping with cpuset is
1108          * undesirable. However, in order to preserve some of the semantics,
1109          * we fall back to check against current free page availability as
1110          * a best attempt and hopefully to minimize the impact of changing
1111          * semantics that cpuset has.
1112          */
1113         if (delta > 0) {
1114                 if (gather_surplus_pages(delta) < 0)
1115                         goto out;
1116
1117                 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
1118                         return_unused_surplus_pages(delta);
1119                         goto out;
1120                 }
1121         }
1122
1123         ret = 0;
1124         if (delta < 0)
1125                 return_unused_surplus_pages((unsigned long) -delta);
1126
1127 out:
1128         spin_unlock(&hugetlb_lock);
1129         return ret;
1130 }
1131
1132 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1133 {
1134         struct resv_map *reservations = vma_resv_map(vma);
1135
1136         /*
1137          * This new VMA should share its siblings reservation map if present.
1138          * The VMA will only ever have a valid reservation map pointer where
1139          * it is being copied for another still existing VMA.  As that VMA
1140          * has a reference to the reservation map it cannot dissappear until
1141          * after this open call completes.  It is therefore safe to take a
1142          * new reference here without additional locking.
1143          */
1144         if (reservations)
1145                 kref_get(&reservations->refs);
1146 }
1147
1148 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1149 {
1150         struct resv_map *reservations = vma_resv_map(vma);
1151         unsigned long reserve;
1152         unsigned long start;
1153         unsigned long end;
1154
1155         if (reservations) {
1156                 start = vma_pagecache_offset(vma, vma->vm_start);
1157                 end = vma_pagecache_offset(vma, vma->vm_end);
1158
1159                 reserve = (end - start) -
1160                         region_count(&reservations->regions, start, end);
1161
1162                 kref_put(&reservations->refs, resv_map_release);
1163
1164                 if (reserve)
1165                         hugetlb_acct_memory(-reserve);
1166         }
1167 }
1168
1169 /*
1170  * We cannot handle pagefaults against hugetlb pages at all.  They cause
1171  * handle_mm_fault() to try to instantiate regular-sized pages in the
1172  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
1173  * this far.
1174  */
1175 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1176 {
1177         BUG();
1178         return 0;
1179 }
1180
1181 struct vm_operations_struct hugetlb_vm_ops = {
1182         .fault = hugetlb_vm_op_fault,
1183         .open = hugetlb_vm_op_open,
1184         .close = hugetlb_vm_op_close,
1185 };
1186
1187 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1188                                 int writable)
1189 {
1190         pte_t entry;
1191
1192         if (writable) {
1193                 entry =
1194                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1195         } else {
1196                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
1197         }
1198         entry = pte_mkyoung(entry);
1199         entry = pte_mkhuge(entry);
1200
1201         return entry;
1202 }
1203
1204 static void set_huge_ptep_writable(struct vm_area_struct *vma,
1205                                    unsigned long address, pte_t *ptep)
1206 {
1207         pte_t entry;
1208
1209         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
1210         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
1211                 update_mmu_cache(vma, address, entry);
1212         }
1213 }
1214
1215
1216 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1217                             struct vm_area_struct *vma)
1218 {
1219         pte_t *src_pte, *dst_pte, entry;
1220         struct page *ptepage;
1221         unsigned long addr;
1222         int cow;
1223
1224         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1225
1226         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
1227                 src_pte = huge_pte_offset(src, addr);
1228                 if (!src_pte)
1229                         continue;
1230                 dst_pte = huge_pte_alloc(dst, addr);
1231                 if (!dst_pte)
1232                         goto nomem;
1233
1234                 /* If the pagetables are shared don't copy or take references */
1235                 if (dst_pte == src_pte)
1236                         continue;
1237
1238                 spin_lock(&dst->page_table_lock);
1239                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
1240                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
1241                         if (cow)
1242                                 huge_ptep_set_wrprotect(src, addr, src_pte);
1243                         entry = huge_ptep_get(src_pte);
1244                         ptepage = pte_page(entry);
1245                         get_page(ptepage);
1246                         set_huge_pte_at(dst, addr, dst_pte, entry);
1247                 }
1248                 spin_unlock(&src->page_table_lock);
1249                 spin_unlock(&dst->page_table_lock);
1250         }
1251         return 0;
1252
1253 nomem:
1254         return -ENOMEM;
1255 }
1256
1257 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1258                             unsigned long end, struct page *ref_page)
1259 {
1260         struct mm_struct *mm = vma->vm_mm;
1261         unsigned long address;
1262         pte_t *ptep;
1263         pte_t pte;
1264         struct page *page;
1265         struct page *tmp;
1266         /*
1267          * A page gathering list, protected by per file i_mmap_lock. The
1268          * lock is used to avoid list corruption from multiple unmapping
1269          * of the same page since we are using page->lru.
1270          */
1271         LIST_HEAD(page_list);
1272
1273         WARN_ON(!is_vm_hugetlb_page(vma));
1274         BUG_ON(start & ~HPAGE_MASK);
1275         BUG_ON(end & ~HPAGE_MASK);
1276
1277         spin_lock(&mm->page_table_lock);
1278         for (address = start; address < end; address += HPAGE_SIZE) {
1279                 ptep = huge_pte_offset(mm, address);
1280                 if (!ptep)
1281                         continue;
1282
1283                 if (huge_pmd_unshare(mm, &address, ptep))
1284                         continue;
1285
1286                 /*
1287                  * If a reference page is supplied, it is because a specific
1288                  * page is being unmapped, not a range. Ensure the page we
1289                  * are about to unmap is the actual page of interest.
1290                  */
1291                 if (ref_page) {
1292                         pte = huge_ptep_get(ptep);
1293                         if (huge_pte_none(pte))
1294                                 continue;
1295                         page = pte_page(pte);
1296                         if (page != ref_page)
1297                                 continue;
1298
1299                         /*
1300                          * Mark the VMA as having unmapped its page so that
1301                          * future faults in this VMA will fail rather than
1302                          * looking like data was lost
1303                          */
1304                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1305                 }
1306
1307                 pte = huge_ptep_get_and_clear(mm, address, ptep);
1308                 if (huge_pte_none(pte))
1309                         continue;
1310
1311                 page = pte_page(pte);
1312                 if (pte_dirty(pte))
1313                         set_page_dirty(page);
1314                 list_add(&page->lru, &page_list);
1315         }
1316         spin_unlock(&mm->page_table_lock);
1317         flush_tlb_range(vma, start, end);
1318         list_for_each_entry_safe(page, tmp, &page_list, lru) {
1319                 list_del(&page->lru);
1320                 put_page(page);
1321         }
1322 }
1323
1324 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1325                           unsigned long end, struct page *ref_page)
1326 {
1327         /*
1328          * It is undesirable to test vma->vm_file as it should be non-null
1329          * for valid hugetlb area. However, vm_file will be NULL in the error
1330          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
1331          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
1332          * to clean up. Since no pte has actually been setup, it is safe to
1333          * do nothing in this case.
1334          */
1335         if (vma->vm_file) {
1336                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1337                 __unmap_hugepage_range(vma, start, end, ref_page);
1338                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1339         }
1340 }
1341
1342 /*
1343  * This is called when the original mapper is failing to COW a MAP_PRIVATE
1344  * mappping it owns the reserve page for. The intention is to unmap the page
1345  * from other VMAs and let the children be SIGKILLed if they are faulting the
1346  * same region.
1347  */
1348 int unmap_ref_private(struct mm_struct *mm,
1349                                         struct vm_area_struct *vma,
1350                                         struct page *page,
1351                                         unsigned long address)
1352 {
1353         struct vm_area_struct *iter_vma;
1354         struct address_space *mapping;
1355         struct prio_tree_iter iter;
1356         pgoff_t pgoff;
1357
1358         /*
1359          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1360          * from page cache lookup which is in HPAGE_SIZE units.
1361          */
1362         address = address & huge_page_mask(hstate_vma(vma));
1363         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1364                 + (vma->vm_pgoff >> PAGE_SHIFT);
1365         mapping = (struct address_space *)page_private(page);
1366
1367         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1368                 /* Do not unmap the current VMA */
1369                 if (iter_vma == vma)
1370                         continue;
1371
1372                 /*
1373                  * Unmap the page from other VMAs without their own reserves.
1374                  * They get marked to be SIGKILLed if they fault in these
1375                  * areas. This is because a future no-page fault on this VMA
1376                  * could insert a zeroed page instead of the data existing
1377                  * from the time of fork. This would look like data corruption
1378                  */
1379                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1380                         unmap_hugepage_range(iter_vma,
1381                                 address, address + HPAGE_SIZE,
1382                                 page);
1383         }
1384
1385         return 1;
1386 }
1387
1388 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
1389                         unsigned long address, pte_t *ptep, pte_t pte,
1390                         struct page *pagecache_page)
1391 {
1392         struct page *old_page, *new_page;
1393         int avoidcopy;
1394         int outside_reserve = 0;
1395
1396         old_page = pte_page(pte);
1397
1398 retry_avoidcopy:
1399         /* If no-one else is actually using this page, avoid the copy
1400          * and just make the page writable */
1401         avoidcopy = (page_count(old_page) == 1);
1402         if (avoidcopy) {
1403                 set_huge_ptep_writable(vma, address, ptep);
1404                 return 0;
1405         }
1406
1407         /*
1408          * If the process that created a MAP_PRIVATE mapping is about to
1409          * perform a COW due to a shared page count, attempt to satisfy
1410          * the allocation without using the existing reserves. The pagecache
1411          * page is used to determine if the reserve at this address was
1412          * consumed or not. If reserves were used, a partial faulted mapping
1413          * at the time of fork() could consume its reserves on COW instead
1414          * of the full address range.
1415          */
1416         if (!(vma->vm_flags & VM_SHARED) &&
1417                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1418                         old_page != pagecache_page)
1419                 outside_reserve = 1;
1420
1421         page_cache_get(old_page);
1422         new_page = alloc_huge_page(vma, address, outside_reserve);
1423
1424         if (IS_ERR(new_page)) {
1425                 page_cache_release(old_page);
1426
1427                 /*
1428                  * If a process owning a MAP_PRIVATE mapping fails to COW,
1429                  * it is due to references held by a child and an insufficient
1430                  * huge page pool. To guarantee the original mappers
1431                  * reliability, unmap the page from child processes. The child
1432                  * may get SIGKILLed if it later faults.
1433                  */
1434                 if (outside_reserve) {
1435                         BUG_ON(huge_pte_none(pte));
1436                         if (unmap_ref_private(mm, vma, old_page, address)) {
1437                                 BUG_ON(page_count(old_page) != 1);
1438                                 BUG_ON(huge_pte_none(pte));
1439                                 goto retry_avoidcopy;
1440                         }
1441                         WARN_ON_ONCE(1);
1442                 }
1443
1444                 return -PTR_ERR(new_page);
1445         }
1446
1447         spin_unlock(&mm->page_table_lock);
1448         copy_huge_page(new_page, old_page, address, vma);
1449         __SetPageUptodate(new_page);
1450         spin_lock(&mm->page_table_lock);
1451
1452         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
1453         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
1454                 /* Break COW */
1455                 huge_ptep_clear_flush(vma, address, ptep);
1456                 set_huge_pte_at(mm, address, ptep,
1457                                 make_huge_pte(vma, new_page, 1));
1458                 /* Make the old page be freed below */
1459                 new_page = old_page;
1460         }
1461         page_cache_release(new_page);
1462         page_cache_release(old_page);
1463         return 0;
1464 }
1465
1466 /* Return the pagecache page at a given address within a VMA */
1467 static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
1468                         unsigned long address)
1469 {
1470         struct address_space *mapping;
1471         pgoff_t idx;
1472
1473         mapping = vma->vm_file->f_mapping;
1474         idx = vma_pagecache_offset(vma, address);
1475
1476         return find_lock_page(mapping, idx);
1477 }
1478
1479 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1480                         unsigned long address, pte_t *ptep, int write_access)
1481 {
1482         int ret = VM_FAULT_SIGBUS;
1483         pgoff_t idx;
1484         unsigned long size;
1485         struct page *page;
1486         struct address_space *mapping;
1487         pte_t new_pte;
1488
1489         /*
1490          * Currently, we are forced to kill the process in the event the
1491          * original mapper has unmapped pages from the child due to a failed
1492          * COW. Warn that such a situation has occured as it may not be obvious
1493          */
1494         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
1495                 printk(KERN_WARNING
1496                         "PID %d killed due to inadequate hugepage pool\n",
1497                         current->pid);
1498                 return ret;
1499         }
1500
1501         mapping = vma->vm_file->f_mapping;
1502         idx = vma_pagecache_offset(vma, address);
1503
1504         /*
1505          * Use page lock to guard against racing truncation
1506          * before we get page_table_lock.
1507          */
1508 retry:
1509         page = find_lock_page(mapping, idx);
1510         if (!page) {
1511                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
1512                 if (idx >= size)
1513                         goto out;
1514                 page = alloc_huge_page(vma, address, 0);
1515                 if (IS_ERR(page)) {
1516                         ret = -PTR_ERR(page);
1517                         goto out;
1518                 }
1519                 clear_huge_page(page, address);
1520                 __SetPageUptodate(page);
1521
1522                 if (vma->vm_flags & VM_SHARED) {
1523                         int err;
1524                         struct inode *inode = mapping->host;
1525
1526                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
1527                         if (err) {
1528                                 put_page(page);
1529                                 if (err == -EEXIST)
1530                                         goto retry;
1531                                 goto out;
1532                         }
1533
1534                         spin_lock(&inode->i_lock);
1535                         inode->i_blocks += BLOCKS_PER_HUGEPAGE;
1536                         spin_unlock(&inode->i_lock);
1537                 } else
1538                         lock_page(page);
1539         }
1540
1541         spin_lock(&mm->page_table_lock);
1542         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
1543         if (idx >= size)
1544                 goto backout;
1545
1546         ret = 0;
1547         if (!huge_pte_none(huge_ptep_get(ptep)))
1548                 goto backout;
1549
1550         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
1551                                 && (vma->vm_flags & VM_SHARED)));
1552         set_huge_pte_at(mm, address, ptep, new_pte);
1553
1554         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1555                 /* Optimization, do the COW without a second fault */
1556                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
1557         }
1558
1559         spin_unlock(&mm->page_table_lock);
1560         unlock_page(page);
1561 out:
1562         return ret;
1563
1564 backout:
1565         spin_unlock(&mm->page_table_lock);
1566         unlock_page(page);
1567         put_page(page);
1568         goto out;
1569 }
1570
1571 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1572                         unsigned long address, int write_access)
1573 {
1574         pte_t *ptep;
1575         pte_t entry;
1576         int ret;
1577         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1578
1579         ptep = huge_pte_alloc(mm, address);
1580         if (!ptep)
1581                 return VM_FAULT_OOM;
1582
1583         /*
1584          * Serialize hugepage allocation and instantiation, so that we don't
1585          * get spurious allocation failures if two CPUs race to instantiate
1586          * the same page in the page cache.
1587          */
1588         mutex_lock(&hugetlb_instantiation_mutex);
1589         entry = huge_ptep_get(ptep);
1590         if (huge_pte_none(entry)) {
1591                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
1592                 mutex_unlock(&hugetlb_instantiation_mutex);
1593                 return ret;
1594         }
1595
1596         ret = 0;
1597
1598         spin_lock(&mm->page_table_lock);
1599         /* Check for a racing update before calling hugetlb_cow */
1600         if (likely(pte_same(entry, huge_ptep_get(ptep))))
1601                 if (write_access && !pte_write(entry)) {
1602                         struct page *page;
1603                         page = hugetlbfs_pagecache_page(vma, address);
1604                         ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
1605                         if (page) {
1606                                 unlock_page(page);
1607                                 put_page(page);
1608                         }
1609                 }
1610         spin_unlock(&mm->page_table_lock);
1611         mutex_unlock(&hugetlb_instantiation_mutex);
1612
1613         return ret;
1614 }
1615
1616 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1617                         struct page **pages, struct vm_area_struct **vmas,
1618                         unsigned long *position, int *length, int i,
1619                         int write)
1620 {
1621         unsigned long pfn_offset;
1622         unsigned long vaddr = *position;
1623         int remainder = *length;
1624
1625         spin_lock(&mm->page_table_lock);
1626         while (vaddr < vma->vm_end && remainder) {
1627                 pte_t *pte;
1628                 struct page *page;
1629
1630                 /*
1631                  * Some archs (sparc64, sh*) have multiple pte_ts to
1632                  * each hugepage.  We have to make * sure we get the
1633                  * first, for the page indexing below to work.
1634                  */
1635                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
1636
1637                 if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
1638                     (write && !pte_write(huge_ptep_get(pte)))) {
1639                         int ret;
1640
1641                         spin_unlock(&mm->page_table_lock);
1642                         ret = hugetlb_fault(mm, vma, vaddr, write);
1643                         spin_lock(&mm->page_table_lock);
1644                         if (!(ret & VM_FAULT_ERROR))
1645                                 continue;
1646
1647                         remainder = 0;
1648                         if (!i)
1649                                 i = -EFAULT;
1650                         break;
1651                 }
1652
1653                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
1654                 page = pte_page(huge_ptep_get(pte));
1655 same_page:
1656                 if (pages) {
1657                         get_page(page);
1658                         pages[i] = page + pfn_offset;
1659                 }
1660
1661                 if (vmas)
1662                         vmas[i] = vma;
1663
1664                 vaddr += PAGE_SIZE;
1665                 ++pfn_offset;
1666                 --remainder;
1667                 ++i;
1668                 if (vaddr < vma->vm_end && remainder &&
1669                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
1670                         /*
1671                          * We use pfn_offset to avoid touching the pageframes
1672                          * of this compound page.
1673                          */
1674                         goto same_page;
1675                 }
1676         }
1677         spin_unlock(&mm->page_table_lock);
1678         *length = remainder;
1679         *position = vaddr;
1680
1681         return i;
1682 }
1683
1684 void hugetlb_change_protection(struct vm_area_struct *vma,
1685                 unsigned long address, unsigned long end, pgprot_t newprot)
1686 {
1687         struct mm_struct *mm = vma->vm_mm;
1688         unsigned long start = address;
1689         pte_t *ptep;
1690         pte_t pte;
1691
1692         BUG_ON(address >= end);
1693         flush_cache_range(vma, address, end);
1694
1695         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1696         spin_lock(&mm->page_table_lock);
1697         for (; address < end; address += HPAGE_SIZE) {
1698                 ptep = huge_pte_offset(mm, address);
1699                 if (!ptep)
1700                         continue;
1701                 if (huge_pmd_unshare(mm, &address, ptep))
1702                         continue;
1703                 if (!huge_pte_none(huge_ptep_get(ptep))) {
1704                         pte = huge_ptep_get_and_clear(mm, address, ptep);
1705                         pte = pte_mkhuge(pte_modify(pte, newprot));
1706                         set_huge_pte_at(mm, address, ptep, pte);
1707                 }
1708         }
1709         spin_unlock(&mm->page_table_lock);
1710         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1711
1712         flush_tlb_range(vma, start, end);
1713 }
1714
1715 int hugetlb_reserve_pages(struct inode *inode,
1716                                         long from, long to,
1717                                         struct vm_area_struct *vma)
1718 {
1719         long ret, chg;
1720
1721         if (vma && vma->vm_flags & VM_NORESERVE)
1722                 return 0;
1723
1724         /*
1725          * Shared mappings base their reservation on the number of pages that
1726          * are already allocated on behalf of the file. Private mappings need
1727          * to reserve the full area even if read-only as mprotect() may be
1728          * called to make the mapping read-write. Assume !vma is a shm mapping
1729          */
1730         if (!vma || vma->vm_flags & VM_SHARED)
1731                 chg = region_chg(&inode->i_mapping->private_list, from, to);
1732         else {
1733                 struct resv_map *resv_map = resv_map_alloc();
1734                 if (!resv_map)
1735                         return -ENOMEM;
1736
1737                 chg = to - from;
1738
1739                 set_vma_resv_map(vma, resv_map);
1740                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
1741         }
1742
1743         if (chg < 0)
1744                 return chg;
1745
1746         if (hugetlb_get_quota(inode->i_mapping, chg))
1747                 return -ENOSPC;
1748         ret = hugetlb_acct_memory(chg);
1749         if (ret < 0) {
1750                 hugetlb_put_quota(inode->i_mapping, chg);
1751                 return ret;
1752         }
1753         if (!vma || vma->vm_flags & VM_SHARED)
1754                 region_add(&inode->i_mapping->private_list, from, to);
1755         return 0;
1756 }
1757
1758 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1759 {
1760         long chg = region_truncate(&inode->i_mapping->private_list, offset);
1761
1762         spin_lock(&inode->i_lock);
1763         inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
1764         spin_unlock(&inode->i_lock);
1765
1766         hugetlb_put_quota(inode->i_mapping, (chg - freed));
1767         hugetlb_acct_memory(-(chg - freed));
1768 }