huge page private reservation review cleanups
[safe/jmp/linux-2.6] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 static unsigned long surplus_huge_pages;
27 static unsigned long nr_overcommit_huge_pages;
28 unsigned long max_huge_pages;
29 unsigned long sysctl_overcommit_huge_pages;
30 static struct list_head hugepage_freelists[MAX_NUMNODES];
31 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
32 static unsigned int free_huge_pages_node[MAX_NUMNODES];
33 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35 unsigned long hugepages_treat_as_movable;
36 static int hugetlb_next_nid;
37
38 /*
39  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
40  */
41 static DEFINE_SPINLOCK(hugetlb_lock);
42
43 /*
44  * Convert the address within this vma to the page offset within
45  * the mapping, in base page units.
46  */
47 static pgoff_t vma_page_offset(struct vm_area_struct *vma,
48                                 unsigned long address)
49 {
50         return ((address - vma->vm_start) >> PAGE_SHIFT) +
51                                         (vma->vm_pgoff >> PAGE_SHIFT);
52 }
53
54 /*
55  * Convert the address within this vma to the page offset within
56  * the mapping, in pagecache page units; huge pages here.
57  */
58 static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma,
59                                         unsigned long address)
60 {
61         return ((address - vma->vm_start) >> HPAGE_SHIFT) +
62                         (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
63 }
64
65 #define HPAGE_RESV_OWNER    (1UL << (BITS_PER_LONG - 1))
66 #define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2))
67 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
68 /*
69  * These helpers are used to track how many pages are reserved for
70  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
71  * is guaranteed to have their future faults succeed.
72  *
73  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
74  * the reserve counters are updated with the hugetlb_lock held. It is safe
75  * to reset the VMA at fork() time as it is not in use yet and there is no
76  * chance of the global counters getting corrupted as a result of the values.
77  */
78 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
79 {
80         return (unsigned long)vma->vm_private_data;
81 }
82
83 static void set_vma_private_data(struct vm_area_struct *vma,
84                                                         unsigned long value)
85 {
86         vma->vm_private_data = (void *)value;
87 }
88
89 static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma)
90 {
91         VM_BUG_ON(!is_vm_hugetlb_page(vma));
92         if (!(vma->vm_flags & VM_SHARED))
93                 return get_vma_private_data(vma) & ~HPAGE_RESV_MASK;
94         return 0;
95 }
96
97 static void set_vma_resv_huge_pages(struct vm_area_struct *vma,
98                                                         unsigned long reserve)
99 {
100         VM_BUG_ON(!is_vm_hugetlb_page(vma));
101         VM_BUG_ON(vma->vm_flags & VM_SHARED);
102
103         set_vma_private_data(vma,
104                 (get_vma_private_data(vma) & HPAGE_RESV_MASK) | reserve);
105 }
106
107 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
108 {
109         VM_BUG_ON(!is_vm_hugetlb_page(vma));
110         VM_BUG_ON(vma->vm_flags & VM_SHARED);
111
112         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
113 }
114
115 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
116 {
117         VM_BUG_ON(!is_vm_hugetlb_page(vma));
118
119         return (get_vma_private_data(vma) & flag) != 0;
120 }
121
122 /* Decrement the reserved pages in the hugepage pool by one */
123 static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
124 {
125         if (vma->vm_flags & VM_SHARED) {
126                 /* Shared mappings always use reserves */
127                 resv_huge_pages--;
128         } else {
129                 /*
130                  * Only the process that called mmap() has reserves for
131                  * private mappings.
132                  */
133                 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
134                         unsigned long flags, reserve;
135                         resv_huge_pages--;
136                         flags = (unsigned long)vma->vm_private_data &
137                                                         HPAGE_RESV_MASK;
138                         reserve = (unsigned long)vma->vm_private_data - 1;
139                         vma->vm_private_data = (void *)(reserve | flags);
140                 }
141         }
142 }
143
144 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
145 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
146 {
147         VM_BUG_ON(!is_vm_hugetlb_page(vma));
148         if (!(vma->vm_flags & VM_SHARED))
149                 vma->vm_private_data = (void *)0;
150 }
151
152 /* Returns true if the VMA has associated reserve pages */
153 static int vma_has_private_reserves(struct vm_area_struct *vma)
154 {
155         if (vma->vm_flags & VM_SHARED)
156                 return 0;
157         if (!vma_resv_huge_pages(vma))
158                 return 0;
159         return 1;
160 }
161
162 static void clear_huge_page(struct page *page, unsigned long addr)
163 {
164         int i;
165
166         might_sleep();
167         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
168                 cond_resched();
169                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
170         }
171 }
172
173 static void copy_huge_page(struct page *dst, struct page *src,
174                            unsigned long addr, struct vm_area_struct *vma)
175 {
176         int i;
177
178         might_sleep();
179         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
180                 cond_resched();
181                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
182         }
183 }
184
185 static void enqueue_huge_page(struct page *page)
186 {
187         int nid = page_to_nid(page);
188         list_add(&page->lru, &hugepage_freelists[nid]);
189         free_huge_pages++;
190         free_huge_pages_node[nid]++;
191 }
192
193 static struct page *dequeue_huge_page(void)
194 {
195         int nid;
196         struct page *page = NULL;
197
198         for (nid = 0; nid < MAX_NUMNODES; ++nid) {
199                 if (!list_empty(&hugepage_freelists[nid])) {
200                         page = list_entry(hugepage_freelists[nid].next,
201                                           struct page, lru);
202                         list_del(&page->lru);
203                         free_huge_pages--;
204                         free_huge_pages_node[nid]--;
205                         break;
206                 }
207         }
208         return page;
209 }
210
211 static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
212                                 unsigned long address, int avoid_reserve)
213 {
214         int nid;
215         struct page *page = NULL;
216         struct mempolicy *mpol;
217         nodemask_t *nodemask;
218         struct zonelist *zonelist = huge_zonelist(vma, address,
219                                         htlb_alloc_mask, &mpol, &nodemask);
220         struct zone *zone;
221         struct zoneref *z;
222
223         /*
224          * A child process with MAP_PRIVATE mappings created by their parent
225          * have no page reserves. This check ensures that reservations are
226          * not "stolen". The child may still get SIGKILLed
227          */
228         if (!vma_has_private_reserves(vma) &&
229                         free_huge_pages - resv_huge_pages == 0)
230                 return NULL;
231
232         /* If reserves cannot be used, ensure enough pages are in the pool */
233         if (avoid_reserve && free_huge_pages - resv_huge_pages == 0)
234                 return NULL;
235
236         for_each_zone_zonelist_nodemask(zone, z, zonelist,
237                                                 MAX_NR_ZONES - 1, nodemask) {
238                 nid = zone_to_nid(zone);
239                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
240                     !list_empty(&hugepage_freelists[nid])) {
241                         page = list_entry(hugepage_freelists[nid].next,
242                                           struct page, lru);
243                         list_del(&page->lru);
244                         free_huge_pages--;
245                         free_huge_pages_node[nid]--;
246
247                         if (!avoid_reserve)
248                                 decrement_hugepage_resv_vma(vma);
249
250                         break;
251                 }
252         }
253         mpol_cond_put(mpol);
254         return page;
255 }
256
257 static void update_and_free_page(struct page *page)
258 {
259         int i;
260         nr_huge_pages--;
261         nr_huge_pages_node[page_to_nid(page)]--;
262         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
263                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
264                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
265                                 1 << PG_private | 1<< PG_writeback);
266         }
267         set_compound_page_dtor(page, NULL);
268         set_page_refcounted(page);
269         arch_release_hugepage(page);
270         __free_pages(page, HUGETLB_PAGE_ORDER);
271 }
272
273 static void free_huge_page(struct page *page)
274 {
275         int nid = page_to_nid(page);
276         struct address_space *mapping;
277
278         mapping = (struct address_space *) page_private(page);
279         set_page_private(page, 0);
280         BUG_ON(page_count(page));
281         INIT_LIST_HEAD(&page->lru);
282
283         spin_lock(&hugetlb_lock);
284         if (surplus_huge_pages_node[nid]) {
285                 update_and_free_page(page);
286                 surplus_huge_pages--;
287                 surplus_huge_pages_node[nid]--;
288         } else {
289                 enqueue_huge_page(page);
290         }
291         spin_unlock(&hugetlb_lock);
292         if (mapping)
293                 hugetlb_put_quota(mapping, 1);
294 }
295
296 /*
297  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
298  * balanced by operating on them in a round-robin fashion.
299  * Returns 1 if an adjustment was made.
300  */
301 static int adjust_pool_surplus(int delta)
302 {
303         static int prev_nid;
304         int nid = prev_nid;
305         int ret = 0;
306
307         VM_BUG_ON(delta != -1 && delta != 1);
308         do {
309                 nid = next_node(nid, node_online_map);
310                 if (nid == MAX_NUMNODES)
311                         nid = first_node(node_online_map);
312
313                 /* To shrink on this node, there must be a surplus page */
314                 if (delta < 0 && !surplus_huge_pages_node[nid])
315                         continue;
316                 /* Surplus cannot exceed the total number of pages */
317                 if (delta > 0 && surplus_huge_pages_node[nid] >=
318                                                 nr_huge_pages_node[nid])
319                         continue;
320
321                 surplus_huge_pages += delta;
322                 surplus_huge_pages_node[nid] += delta;
323                 ret = 1;
324                 break;
325         } while (nid != prev_nid);
326
327         prev_nid = nid;
328         return ret;
329 }
330
331 static struct page *alloc_fresh_huge_page_node(int nid)
332 {
333         struct page *page;
334
335         page = alloc_pages_node(nid,
336                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
337                                                 __GFP_REPEAT|__GFP_NOWARN,
338                 HUGETLB_PAGE_ORDER);
339         if (page) {
340                 if (arch_prepare_hugepage(page)) {
341                         __free_pages(page, HUGETLB_PAGE_ORDER);
342                         return NULL;
343                 }
344                 set_compound_page_dtor(page, free_huge_page);
345                 spin_lock(&hugetlb_lock);
346                 nr_huge_pages++;
347                 nr_huge_pages_node[nid]++;
348                 spin_unlock(&hugetlb_lock);
349                 put_page(page); /* free it into the hugepage allocator */
350         }
351
352         return page;
353 }
354
355 static int alloc_fresh_huge_page(void)
356 {
357         struct page *page;
358         int start_nid;
359         int next_nid;
360         int ret = 0;
361
362         start_nid = hugetlb_next_nid;
363
364         do {
365                 page = alloc_fresh_huge_page_node(hugetlb_next_nid);
366                 if (page)
367                         ret = 1;
368                 /*
369                  * Use a helper variable to find the next node and then
370                  * copy it back to hugetlb_next_nid afterwards:
371                  * otherwise there's a window in which a racer might
372                  * pass invalid nid MAX_NUMNODES to alloc_pages_node.
373                  * But we don't need to use a spin_lock here: it really
374                  * doesn't matter if occasionally a racer chooses the
375                  * same nid as we do.  Move nid forward in the mask even
376                  * if we just successfully allocated a hugepage so that
377                  * the next caller gets hugepages on the next node.
378                  */
379                 next_nid = next_node(hugetlb_next_nid, node_online_map);
380                 if (next_nid == MAX_NUMNODES)
381                         next_nid = first_node(node_online_map);
382                 hugetlb_next_nid = next_nid;
383         } while (!page && hugetlb_next_nid != start_nid);
384
385         if (ret)
386                 count_vm_event(HTLB_BUDDY_PGALLOC);
387         else
388                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
389
390         return ret;
391 }
392
393 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
394                                                 unsigned long address)
395 {
396         struct page *page;
397         unsigned int nid;
398
399         /*
400          * Assume we will successfully allocate the surplus page to
401          * prevent racing processes from causing the surplus to exceed
402          * overcommit
403          *
404          * This however introduces a different race, where a process B
405          * tries to grow the static hugepage pool while alloc_pages() is
406          * called by process A. B will only examine the per-node
407          * counters in determining if surplus huge pages can be
408          * converted to normal huge pages in adjust_pool_surplus(). A
409          * won't be able to increment the per-node counter, until the
410          * lock is dropped by B, but B doesn't drop hugetlb_lock until
411          * no more huge pages can be converted from surplus to normal
412          * state (and doesn't try to convert again). Thus, we have a
413          * case where a surplus huge page exists, the pool is grown, and
414          * the surplus huge page still exists after, even though it
415          * should just have been converted to a normal huge page. This
416          * does not leak memory, though, as the hugepage will be freed
417          * once it is out of use. It also does not allow the counters to
418          * go out of whack in adjust_pool_surplus() as we don't modify
419          * the node values until we've gotten the hugepage and only the
420          * per-node value is checked there.
421          */
422         spin_lock(&hugetlb_lock);
423         if (surplus_huge_pages >= nr_overcommit_huge_pages) {
424                 spin_unlock(&hugetlb_lock);
425                 return NULL;
426         } else {
427                 nr_huge_pages++;
428                 surplus_huge_pages++;
429         }
430         spin_unlock(&hugetlb_lock);
431
432         page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
433                                         __GFP_REPEAT|__GFP_NOWARN,
434                                         HUGETLB_PAGE_ORDER);
435
436         spin_lock(&hugetlb_lock);
437         if (page) {
438                 /*
439                  * This page is now managed by the hugetlb allocator and has
440                  * no users -- drop the buddy allocator's reference.
441                  */
442                 put_page_testzero(page);
443                 VM_BUG_ON(page_count(page));
444                 nid = page_to_nid(page);
445                 set_compound_page_dtor(page, free_huge_page);
446                 /*
447                  * We incremented the global counters already
448                  */
449                 nr_huge_pages_node[nid]++;
450                 surplus_huge_pages_node[nid]++;
451                 __count_vm_event(HTLB_BUDDY_PGALLOC);
452         } else {
453                 nr_huge_pages--;
454                 surplus_huge_pages--;
455                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
456         }
457         spin_unlock(&hugetlb_lock);
458
459         return page;
460 }
461
462 /*
463  * Increase the hugetlb pool such that it can accomodate a reservation
464  * of size 'delta'.
465  */
466 static int gather_surplus_pages(int delta)
467 {
468         struct list_head surplus_list;
469         struct page *page, *tmp;
470         int ret, i;
471         int needed, allocated;
472
473         needed = (resv_huge_pages + delta) - free_huge_pages;
474         if (needed <= 0) {
475                 resv_huge_pages += delta;
476                 return 0;
477         }
478
479         allocated = 0;
480         INIT_LIST_HEAD(&surplus_list);
481
482         ret = -ENOMEM;
483 retry:
484         spin_unlock(&hugetlb_lock);
485         for (i = 0; i < needed; i++) {
486                 page = alloc_buddy_huge_page(NULL, 0);
487                 if (!page) {
488                         /*
489                          * We were not able to allocate enough pages to
490                          * satisfy the entire reservation so we free what
491                          * we've allocated so far.
492                          */
493                         spin_lock(&hugetlb_lock);
494                         needed = 0;
495                         goto free;
496                 }
497
498                 list_add(&page->lru, &surplus_list);
499         }
500         allocated += needed;
501
502         /*
503          * After retaking hugetlb_lock, we need to recalculate 'needed'
504          * because either resv_huge_pages or free_huge_pages may have changed.
505          */
506         spin_lock(&hugetlb_lock);
507         needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
508         if (needed > 0)
509                 goto retry;
510
511         /*
512          * The surplus_list now contains _at_least_ the number of extra pages
513          * needed to accomodate the reservation.  Add the appropriate number
514          * of pages to the hugetlb pool and free the extras back to the buddy
515          * allocator.  Commit the entire reservation here to prevent another
516          * process from stealing the pages as they are added to the pool but
517          * before they are reserved.
518          */
519         needed += allocated;
520         resv_huge_pages += delta;
521         ret = 0;
522 free:
523         /* Free the needed pages to the hugetlb pool */
524         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
525                 if ((--needed) < 0)
526                         break;
527                 list_del(&page->lru);
528                 enqueue_huge_page(page);
529         }
530
531         /* Free unnecessary surplus pages to the buddy allocator */
532         if (!list_empty(&surplus_list)) {
533                 spin_unlock(&hugetlb_lock);
534                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
535                         list_del(&page->lru);
536                         /*
537                          * The page has a reference count of zero already, so
538                          * call free_huge_page directly instead of using
539                          * put_page.  This must be done with hugetlb_lock
540                          * unlocked which is safe because free_huge_page takes
541                          * hugetlb_lock before deciding how to free the page.
542                          */
543                         free_huge_page(page);
544                 }
545                 spin_lock(&hugetlb_lock);
546         }
547
548         return ret;
549 }
550
551 /*
552  * When releasing a hugetlb pool reservation, any surplus pages that were
553  * allocated to satisfy the reservation must be explicitly freed if they were
554  * never used.
555  */
556 static void return_unused_surplus_pages(unsigned long unused_resv_pages)
557 {
558         static int nid = -1;
559         struct page *page;
560         unsigned long nr_pages;
561
562         /*
563          * We want to release as many surplus pages as possible, spread
564          * evenly across all nodes. Iterate across all nodes until we
565          * can no longer free unreserved surplus pages. This occurs when
566          * the nodes with surplus pages have no free pages.
567          */
568         unsigned long remaining_iterations = num_online_nodes();
569
570         /* Uncommit the reservation */
571         resv_huge_pages -= unused_resv_pages;
572
573         nr_pages = min(unused_resv_pages, surplus_huge_pages);
574
575         while (remaining_iterations-- && nr_pages) {
576                 nid = next_node(nid, node_online_map);
577                 if (nid == MAX_NUMNODES)
578                         nid = first_node(node_online_map);
579
580                 if (!surplus_huge_pages_node[nid])
581                         continue;
582
583                 if (!list_empty(&hugepage_freelists[nid])) {
584                         page = list_entry(hugepage_freelists[nid].next,
585                                           struct page, lru);
586                         list_del(&page->lru);
587                         update_and_free_page(page);
588                         free_huge_pages--;
589                         free_huge_pages_node[nid]--;
590                         surplus_huge_pages--;
591                         surplus_huge_pages_node[nid]--;
592                         nr_pages--;
593                         remaining_iterations = num_online_nodes();
594                 }
595         }
596 }
597
598 static struct page *alloc_huge_page(struct vm_area_struct *vma,
599                                     unsigned long addr, int avoid_reserve)
600 {
601         struct page *page;
602         struct address_space *mapping = vma->vm_file->f_mapping;
603         struct inode *inode = mapping->host;
604         unsigned int chg = 0;
605
606         /*
607          * Processes that did not create the mapping will have no reserves and
608          * will not have accounted against quota. Check that the quota can be
609          * made before satisfying the allocation
610          */
611         if (!(vma->vm_flags & VM_SHARED) &&
612                         !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
613                 chg = 1;
614                 if (hugetlb_get_quota(inode->i_mapping, chg))
615                         return ERR_PTR(-ENOSPC);
616         }
617
618         spin_lock(&hugetlb_lock);
619         page = dequeue_huge_page_vma(vma, addr, avoid_reserve);
620         spin_unlock(&hugetlb_lock);
621
622         if (!page) {
623                 page = alloc_buddy_huge_page(vma, addr);
624                 if (!page) {
625                         hugetlb_put_quota(inode->i_mapping, chg);
626                         return ERR_PTR(-VM_FAULT_OOM);
627                 }
628         }
629
630         set_page_refcounted(page);
631         set_page_private(page, (unsigned long) mapping);
632
633         return page;
634 }
635
636 static int __init hugetlb_init(void)
637 {
638         unsigned long i;
639
640         if (HPAGE_SHIFT == 0)
641                 return 0;
642
643         for (i = 0; i < MAX_NUMNODES; ++i)
644                 INIT_LIST_HEAD(&hugepage_freelists[i]);
645
646         hugetlb_next_nid = first_node(node_online_map);
647
648         for (i = 0; i < max_huge_pages; ++i) {
649                 if (!alloc_fresh_huge_page())
650                         break;
651         }
652         max_huge_pages = free_huge_pages = nr_huge_pages = i;
653         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
654         return 0;
655 }
656 module_init(hugetlb_init);
657
658 static int __init hugetlb_setup(char *s)
659 {
660         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
661                 max_huge_pages = 0;
662         return 1;
663 }
664 __setup("hugepages=", hugetlb_setup);
665
666 static unsigned int cpuset_mems_nr(unsigned int *array)
667 {
668         int node;
669         unsigned int nr = 0;
670
671         for_each_node_mask(node, cpuset_current_mems_allowed)
672                 nr += array[node];
673
674         return nr;
675 }
676
677 #ifdef CONFIG_SYSCTL
678 #ifdef CONFIG_HIGHMEM
679 static void try_to_free_low(unsigned long count)
680 {
681         int i;
682
683         for (i = 0; i < MAX_NUMNODES; ++i) {
684                 struct page *page, *next;
685                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
686                         if (count >= nr_huge_pages)
687                                 return;
688                         if (PageHighMem(page))
689                                 continue;
690                         list_del(&page->lru);
691                         update_and_free_page(page);
692                         free_huge_pages--;
693                         free_huge_pages_node[page_to_nid(page)]--;
694                 }
695         }
696 }
697 #else
698 static inline void try_to_free_low(unsigned long count)
699 {
700 }
701 #endif
702
703 #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
704 static unsigned long set_max_huge_pages(unsigned long count)
705 {
706         unsigned long min_count, ret;
707
708         /*
709          * Increase the pool size
710          * First take pages out of surplus state.  Then make up the
711          * remaining difference by allocating fresh huge pages.
712          *
713          * We might race with alloc_buddy_huge_page() here and be unable
714          * to convert a surplus huge page to a normal huge page. That is
715          * not critical, though, it just means the overall size of the
716          * pool might be one hugepage larger than it needs to be, but
717          * within all the constraints specified by the sysctls.
718          */
719         spin_lock(&hugetlb_lock);
720         while (surplus_huge_pages && count > persistent_huge_pages) {
721                 if (!adjust_pool_surplus(-1))
722                         break;
723         }
724
725         while (count > persistent_huge_pages) {
726                 /*
727                  * If this allocation races such that we no longer need the
728                  * page, free_huge_page will handle it by freeing the page
729                  * and reducing the surplus.
730                  */
731                 spin_unlock(&hugetlb_lock);
732                 ret = alloc_fresh_huge_page();
733                 spin_lock(&hugetlb_lock);
734                 if (!ret)
735                         goto out;
736
737         }
738
739         /*
740          * Decrease the pool size
741          * First return free pages to the buddy allocator (being careful
742          * to keep enough around to satisfy reservations).  Then place
743          * pages into surplus state as needed so the pool will shrink
744          * to the desired size as pages become free.
745          *
746          * By placing pages into the surplus state independent of the
747          * overcommit value, we are allowing the surplus pool size to
748          * exceed overcommit. There are few sane options here. Since
749          * alloc_buddy_huge_page() is checking the global counter,
750          * though, we'll note that we're not allowed to exceed surplus
751          * and won't grow the pool anywhere else. Not until one of the
752          * sysctls are changed, or the surplus pages go out of use.
753          */
754         min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
755         min_count = max(count, min_count);
756         try_to_free_low(min_count);
757         while (min_count < persistent_huge_pages) {
758                 struct page *page = dequeue_huge_page();
759                 if (!page)
760                         break;
761                 update_and_free_page(page);
762         }
763         while (count < persistent_huge_pages) {
764                 if (!adjust_pool_surplus(1))
765                         break;
766         }
767 out:
768         ret = persistent_huge_pages;
769         spin_unlock(&hugetlb_lock);
770         return ret;
771 }
772
773 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
774                            struct file *file, void __user *buffer,
775                            size_t *length, loff_t *ppos)
776 {
777         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
778         max_huge_pages = set_max_huge_pages(max_huge_pages);
779         return 0;
780 }
781
782 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
783                         struct file *file, void __user *buffer,
784                         size_t *length, loff_t *ppos)
785 {
786         proc_dointvec(table, write, file, buffer, length, ppos);
787         if (hugepages_treat_as_movable)
788                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
789         else
790                 htlb_alloc_mask = GFP_HIGHUSER;
791         return 0;
792 }
793
794 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
795                         struct file *file, void __user *buffer,
796                         size_t *length, loff_t *ppos)
797 {
798         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
799         spin_lock(&hugetlb_lock);
800         nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
801         spin_unlock(&hugetlb_lock);
802         return 0;
803 }
804
805 #endif /* CONFIG_SYSCTL */
806
807 int hugetlb_report_meminfo(char *buf)
808 {
809         return sprintf(buf,
810                         "HugePages_Total: %5lu\n"
811                         "HugePages_Free:  %5lu\n"
812                         "HugePages_Rsvd:  %5lu\n"
813                         "HugePages_Surp:  %5lu\n"
814                         "Hugepagesize:    %5lu kB\n",
815                         nr_huge_pages,
816                         free_huge_pages,
817                         resv_huge_pages,
818                         surplus_huge_pages,
819                         HPAGE_SIZE/1024);
820 }
821
822 int hugetlb_report_node_meminfo(int nid, char *buf)
823 {
824         return sprintf(buf,
825                 "Node %d HugePages_Total: %5u\n"
826                 "Node %d HugePages_Free:  %5u\n"
827                 "Node %d HugePages_Surp:  %5u\n",
828                 nid, nr_huge_pages_node[nid],
829                 nid, free_huge_pages_node[nid],
830                 nid, surplus_huge_pages_node[nid]);
831 }
832
833 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
834 unsigned long hugetlb_total_pages(void)
835 {
836         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
837 }
838
839 static int hugetlb_acct_memory(long delta)
840 {
841         int ret = -ENOMEM;
842
843         spin_lock(&hugetlb_lock);
844         /*
845          * When cpuset is configured, it breaks the strict hugetlb page
846          * reservation as the accounting is done on a global variable. Such
847          * reservation is completely rubbish in the presence of cpuset because
848          * the reservation is not checked against page availability for the
849          * current cpuset. Application can still potentially OOM'ed by kernel
850          * with lack of free htlb page in cpuset that the task is in.
851          * Attempt to enforce strict accounting with cpuset is almost
852          * impossible (or too ugly) because cpuset is too fluid that
853          * task or memory node can be dynamically moved between cpusets.
854          *
855          * The change of semantics for shared hugetlb mapping with cpuset is
856          * undesirable. However, in order to preserve some of the semantics,
857          * we fall back to check against current free page availability as
858          * a best attempt and hopefully to minimize the impact of changing
859          * semantics that cpuset has.
860          */
861         if (delta > 0) {
862                 if (gather_surplus_pages(delta) < 0)
863                         goto out;
864
865                 if (delta > cpuset_mems_nr(free_huge_pages_node)) {
866                         return_unused_surplus_pages(delta);
867                         goto out;
868                 }
869         }
870
871         ret = 0;
872         if (delta < 0)
873                 return_unused_surplus_pages((unsigned long) -delta);
874
875 out:
876         spin_unlock(&hugetlb_lock);
877         return ret;
878 }
879
880 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
881 {
882         unsigned long reserve = vma_resv_huge_pages(vma);
883         if (reserve)
884                 hugetlb_acct_memory(-reserve);
885 }
886
887 /*
888  * We cannot handle pagefaults against hugetlb pages at all.  They cause
889  * handle_mm_fault() to try to instantiate regular-sized pages in the
890  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
891  * this far.
892  */
893 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
894 {
895         BUG();
896         return 0;
897 }
898
899 struct vm_operations_struct hugetlb_vm_ops = {
900         .fault = hugetlb_vm_op_fault,
901         .close = hugetlb_vm_op_close,
902 };
903
904 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
905                                 int writable)
906 {
907         pte_t entry;
908
909         if (writable) {
910                 entry =
911                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
912         } else {
913                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
914         }
915         entry = pte_mkyoung(entry);
916         entry = pte_mkhuge(entry);
917
918         return entry;
919 }
920
921 static void set_huge_ptep_writable(struct vm_area_struct *vma,
922                                    unsigned long address, pte_t *ptep)
923 {
924         pte_t entry;
925
926         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
927         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
928                 update_mmu_cache(vma, address, entry);
929         }
930 }
931
932
933 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
934                             struct vm_area_struct *vma)
935 {
936         pte_t *src_pte, *dst_pte, entry;
937         struct page *ptepage;
938         unsigned long addr;
939         int cow;
940
941         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
942
943         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
944                 src_pte = huge_pte_offset(src, addr);
945                 if (!src_pte)
946                         continue;
947                 dst_pte = huge_pte_alloc(dst, addr);
948                 if (!dst_pte)
949                         goto nomem;
950
951                 /* If the pagetables are shared don't copy or take references */
952                 if (dst_pte == src_pte)
953                         continue;
954
955                 spin_lock(&dst->page_table_lock);
956                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
957                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
958                         if (cow)
959                                 huge_ptep_set_wrprotect(src, addr, src_pte);
960                         entry = huge_ptep_get(src_pte);
961                         ptepage = pte_page(entry);
962                         get_page(ptepage);
963                         set_huge_pte_at(dst, addr, dst_pte, entry);
964                 }
965                 spin_unlock(&src->page_table_lock);
966                 spin_unlock(&dst->page_table_lock);
967         }
968         return 0;
969
970 nomem:
971         return -ENOMEM;
972 }
973
974 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
975                             unsigned long end, struct page *ref_page)
976 {
977         struct mm_struct *mm = vma->vm_mm;
978         unsigned long address;
979         pte_t *ptep;
980         pte_t pte;
981         struct page *page;
982         struct page *tmp;
983         /*
984          * A page gathering list, protected by per file i_mmap_lock. The
985          * lock is used to avoid list corruption from multiple unmapping
986          * of the same page since we are using page->lru.
987          */
988         LIST_HEAD(page_list);
989
990         WARN_ON(!is_vm_hugetlb_page(vma));
991         BUG_ON(start & ~HPAGE_MASK);
992         BUG_ON(end & ~HPAGE_MASK);
993
994         spin_lock(&mm->page_table_lock);
995         for (address = start; address < end; address += HPAGE_SIZE) {
996                 ptep = huge_pte_offset(mm, address);
997                 if (!ptep)
998                         continue;
999
1000                 if (huge_pmd_unshare(mm, &address, ptep))
1001                         continue;
1002
1003                 /*
1004                  * If a reference page is supplied, it is because a specific
1005                  * page is being unmapped, not a range. Ensure the page we
1006                  * are about to unmap is the actual page of interest.
1007                  */
1008                 if (ref_page) {
1009                         pte = huge_ptep_get(ptep);
1010                         if (huge_pte_none(pte))
1011                                 continue;
1012                         page = pte_page(pte);
1013                         if (page != ref_page)
1014                                 continue;
1015
1016                         /*
1017                          * Mark the VMA as having unmapped its page so that
1018                          * future faults in this VMA will fail rather than
1019                          * looking like data was lost
1020                          */
1021                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1022                 }
1023
1024                 pte = huge_ptep_get_and_clear(mm, address, ptep);
1025                 if (huge_pte_none(pte))
1026                         continue;
1027
1028                 page = pte_page(pte);
1029                 if (pte_dirty(pte))
1030                         set_page_dirty(page);
1031                 list_add(&page->lru, &page_list);
1032         }
1033         spin_unlock(&mm->page_table_lock);
1034         flush_tlb_range(vma, start, end);
1035         list_for_each_entry_safe(page, tmp, &page_list, lru) {
1036                 list_del(&page->lru);
1037                 put_page(page);
1038         }
1039 }
1040
1041 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1042                           unsigned long end, struct page *ref_page)
1043 {
1044         /*
1045          * It is undesirable to test vma->vm_file as it should be non-null
1046          * for valid hugetlb area. However, vm_file will be NULL in the error
1047          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
1048          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
1049          * to clean up. Since no pte has actually been setup, it is safe to
1050          * do nothing in this case.
1051          */
1052         if (vma->vm_file) {
1053                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1054                 __unmap_hugepage_range(vma, start, end, ref_page);
1055                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1056         }
1057 }
1058
1059 /*
1060  * This is called when the original mapper is failing to COW a MAP_PRIVATE
1061  * mappping it owns the reserve page for. The intention is to unmap the page
1062  * from other VMAs and let the children be SIGKILLed if they are faulting the
1063  * same region.
1064  */
1065 int unmap_ref_private(struct mm_struct *mm,
1066                                         struct vm_area_struct *vma,
1067                                         struct page *page,
1068                                         unsigned long address)
1069 {
1070         struct vm_area_struct *iter_vma;
1071         struct address_space *mapping;
1072         struct prio_tree_iter iter;
1073         pgoff_t pgoff;
1074
1075         /*
1076          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1077          * from page cache lookup which is in HPAGE_SIZE units.
1078          */
1079         address = address & huge_page_mask(hstate_vma(vma));
1080         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1081                 + (vma->vm_pgoff >> PAGE_SHIFT);
1082         mapping = (struct address_space *)page_private(page);
1083
1084         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1085                 /* Do not unmap the current VMA */
1086                 if (iter_vma == vma)
1087                         continue;
1088
1089                 /*
1090                  * Unmap the page from other VMAs without their own reserves.
1091                  * They get marked to be SIGKILLed if they fault in these
1092                  * areas. This is because a future no-page fault on this VMA
1093                  * could insert a zeroed page instead of the data existing
1094                  * from the time of fork. This would look like data corruption
1095                  */
1096                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1097                         unmap_hugepage_range(iter_vma,
1098                                 address, address + HPAGE_SIZE,
1099                                 page);
1100         }
1101
1102         return 1;
1103 }
1104
1105 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
1106                         unsigned long address, pte_t *ptep, pte_t pte,
1107                         struct page *pagecache_page)
1108 {
1109         struct page *old_page, *new_page;
1110         int avoidcopy;
1111         int outside_reserve = 0;
1112
1113         old_page = pte_page(pte);
1114
1115 retry_avoidcopy:
1116         /* If no-one else is actually using this page, avoid the copy
1117          * and just make the page writable */
1118         avoidcopy = (page_count(old_page) == 1);
1119         if (avoidcopy) {
1120                 set_huge_ptep_writable(vma, address, ptep);
1121                 return 0;
1122         }
1123
1124         /*
1125          * If the process that created a MAP_PRIVATE mapping is about to
1126          * perform a COW due to a shared page count, attempt to satisfy
1127          * the allocation without using the existing reserves. The pagecache
1128          * page is used to determine if the reserve at this address was
1129          * consumed or not. If reserves were used, a partial faulted mapping
1130          * at the time of fork() could consume its reserves on COW instead
1131          * of the full address range.
1132          */
1133         if (!(vma->vm_flags & VM_SHARED) &&
1134                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1135                         old_page != pagecache_page)
1136                 outside_reserve = 1;
1137
1138         page_cache_get(old_page);
1139         new_page = alloc_huge_page(vma, address, outside_reserve);
1140
1141         if (IS_ERR(new_page)) {
1142                 page_cache_release(old_page);
1143
1144                 /*
1145                  * If a process owning a MAP_PRIVATE mapping fails to COW,
1146                  * it is due to references held by a child and an insufficient
1147                  * huge page pool. To guarantee the original mappers
1148                  * reliability, unmap the page from child processes. The child
1149                  * may get SIGKILLed if it later faults.
1150                  */
1151                 if (outside_reserve) {
1152                         BUG_ON(huge_pte_none(pte));
1153                         if (unmap_ref_private(mm, vma, old_page, address)) {
1154                                 BUG_ON(page_count(old_page) != 1);
1155                                 BUG_ON(huge_pte_none(pte));
1156                                 goto retry_avoidcopy;
1157                         }
1158                         WARN_ON_ONCE(1);
1159                 }
1160
1161                 return -PTR_ERR(new_page);
1162         }
1163
1164         spin_unlock(&mm->page_table_lock);
1165         copy_huge_page(new_page, old_page, address, vma);
1166         __SetPageUptodate(new_page);
1167         spin_lock(&mm->page_table_lock);
1168
1169         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
1170         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
1171                 /* Break COW */
1172                 huge_ptep_clear_flush(vma, address, ptep);
1173                 set_huge_pte_at(mm, address, ptep,
1174                                 make_huge_pte(vma, new_page, 1));
1175                 /* Make the old page be freed below */
1176                 new_page = old_page;
1177         }
1178         page_cache_release(new_page);
1179         page_cache_release(old_page);
1180         return 0;
1181 }
1182
1183 /* Return the pagecache page at a given address within a VMA */
1184 static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
1185                         unsigned long address)
1186 {
1187         struct address_space *mapping;
1188         pgoff_t idx;
1189
1190         mapping = vma->vm_file->f_mapping;
1191         idx = vma_pagecache_offset(vma, address);
1192
1193         return find_lock_page(mapping, idx);
1194 }
1195
1196 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1197                         unsigned long address, pte_t *ptep, int write_access)
1198 {
1199         int ret = VM_FAULT_SIGBUS;
1200         pgoff_t idx;
1201         unsigned long size;
1202         struct page *page;
1203         struct address_space *mapping;
1204         pte_t new_pte;
1205
1206         /*
1207          * Currently, we are forced to kill the process in the event the
1208          * original mapper has unmapped pages from the child due to a failed
1209          * COW. Warn that such a situation has occured as it may not be obvious
1210          */
1211         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
1212                 printk(KERN_WARNING
1213                         "PID %d killed due to inadequate hugepage pool\n",
1214                         current->pid);
1215                 return ret;
1216         }
1217
1218         mapping = vma->vm_file->f_mapping;
1219         idx = vma_pagecache_offset(vma, address);
1220
1221         /*
1222          * Use page lock to guard against racing truncation
1223          * before we get page_table_lock.
1224          */
1225 retry:
1226         page = find_lock_page(mapping, idx);
1227         if (!page) {
1228                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
1229                 if (idx >= size)
1230                         goto out;
1231                 page = alloc_huge_page(vma, address, 0);
1232                 if (IS_ERR(page)) {
1233                         ret = -PTR_ERR(page);
1234                         goto out;
1235                 }
1236                 clear_huge_page(page, address);
1237                 __SetPageUptodate(page);
1238
1239                 if (vma->vm_flags & VM_SHARED) {
1240                         int err;
1241                         struct inode *inode = mapping->host;
1242
1243                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
1244                         if (err) {
1245                                 put_page(page);
1246                                 if (err == -EEXIST)
1247                                         goto retry;
1248                                 goto out;
1249                         }
1250
1251                         spin_lock(&inode->i_lock);
1252                         inode->i_blocks += BLOCKS_PER_HUGEPAGE;
1253                         spin_unlock(&inode->i_lock);
1254                 } else
1255                         lock_page(page);
1256         }
1257
1258         spin_lock(&mm->page_table_lock);
1259         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
1260         if (idx >= size)
1261                 goto backout;
1262
1263         ret = 0;
1264         if (!huge_pte_none(huge_ptep_get(ptep)))
1265                 goto backout;
1266
1267         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
1268                                 && (vma->vm_flags & VM_SHARED)));
1269         set_huge_pte_at(mm, address, ptep, new_pte);
1270
1271         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1272                 /* Optimization, do the COW without a second fault */
1273                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
1274         }
1275
1276         spin_unlock(&mm->page_table_lock);
1277         unlock_page(page);
1278 out:
1279         return ret;
1280
1281 backout:
1282         spin_unlock(&mm->page_table_lock);
1283         unlock_page(page);
1284         put_page(page);
1285         goto out;
1286 }
1287
1288 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1289                         unsigned long address, int write_access)
1290 {
1291         pte_t *ptep;
1292         pte_t entry;
1293         int ret;
1294         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
1295
1296         ptep = huge_pte_alloc(mm, address);
1297         if (!ptep)
1298                 return VM_FAULT_OOM;
1299
1300         /*
1301          * Serialize hugepage allocation and instantiation, so that we don't
1302          * get spurious allocation failures if two CPUs race to instantiate
1303          * the same page in the page cache.
1304          */
1305         mutex_lock(&hugetlb_instantiation_mutex);
1306         entry = huge_ptep_get(ptep);
1307         if (huge_pte_none(entry)) {
1308                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
1309                 mutex_unlock(&hugetlb_instantiation_mutex);
1310                 return ret;
1311         }
1312
1313         ret = 0;
1314
1315         spin_lock(&mm->page_table_lock);
1316         /* Check for a racing update before calling hugetlb_cow */
1317         if (likely(pte_same(entry, huge_ptep_get(ptep))))
1318                 if (write_access && !pte_write(entry)) {
1319                         struct page *page;
1320                         page = hugetlbfs_pagecache_page(vma, address);
1321                         ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
1322                         if (page) {
1323                                 unlock_page(page);
1324                                 put_page(page);
1325                         }
1326                 }
1327         spin_unlock(&mm->page_table_lock);
1328         mutex_unlock(&hugetlb_instantiation_mutex);
1329
1330         return ret;
1331 }
1332
1333 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1334                         struct page **pages, struct vm_area_struct **vmas,
1335                         unsigned long *position, int *length, int i,
1336                         int write)
1337 {
1338         unsigned long pfn_offset;
1339         unsigned long vaddr = *position;
1340         int remainder = *length;
1341
1342         spin_lock(&mm->page_table_lock);
1343         while (vaddr < vma->vm_end && remainder) {
1344                 pte_t *pte;
1345                 struct page *page;
1346
1347                 /*
1348                  * Some archs (sparc64, sh*) have multiple pte_ts to
1349                  * each hugepage.  We have to make * sure we get the
1350                  * first, for the page indexing below to work.
1351                  */
1352                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
1353
1354                 if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
1355                     (write && !pte_write(huge_ptep_get(pte)))) {
1356                         int ret;
1357
1358                         spin_unlock(&mm->page_table_lock);
1359                         ret = hugetlb_fault(mm, vma, vaddr, write);
1360                         spin_lock(&mm->page_table_lock);
1361                         if (!(ret & VM_FAULT_ERROR))
1362                                 continue;
1363
1364                         remainder = 0;
1365                         if (!i)
1366                                 i = -EFAULT;
1367                         break;
1368                 }
1369
1370                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
1371                 page = pte_page(huge_ptep_get(pte));
1372 same_page:
1373                 if (pages) {
1374                         get_page(page);
1375                         pages[i] = page + pfn_offset;
1376                 }
1377
1378                 if (vmas)
1379                         vmas[i] = vma;
1380
1381                 vaddr += PAGE_SIZE;
1382                 ++pfn_offset;
1383                 --remainder;
1384                 ++i;
1385                 if (vaddr < vma->vm_end && remainder &&
1386                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
1387                         /*
1388                          * We use pfn_offset to avoid touching the pageframes
1389                          * of this compound page.
1390                          */
1391                         goto same_page;
1392                 }
1393         }
1394         spin_unlock(&mm->page_table_lock);
1395         *length = remainder;
1396         *position = vaddr;
1397
1398         return i;
1399 }
1400
1401 void hugetlb_change_protection(struct vm_area_struct *vma,
1402                 unsigned long address, unsigned long end, pgprot_t newprot)
1403 {
1404         struct mm_struct *mm = vma->vm_mm;
1405         unsigned long start = address;
1406         pte_t *ptep;
1407         pte_t pte;
1408
1409         BUG_ON(address >= end);
1410         flush_cache_range(vma, address, end);
1411
1412         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1413         spin_lock(&mm->page_table_lock);
1414         for (; address < end; address += HPAGE_SIZE) {
1415                 ptep = huge_pte_offset(mm, address);
1416                 if (!ptep)
1417                         continue;
1418                 if (huge_pmd_unshare(mm, &address, ptep))
1419                         continue;
1420                 if (!huge_pte_none(huge_ptep_get(ptep))) {
1421                         pte = huge_ptep_get_and_clear(mm, address, ptep);
1422                         pte = pte_mkhuge(pte_modify(pte, newprot));
1423                         set_huge_pte_at(mm, address, ptep, pte);
1424                 }
1425         }
1426         spin_unlock(&mm->page_table_lock);
1427         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1428
1429         flush_tlb_range(vma, start, end);
1430 }
1431
1432 struct file_region {
1433         struct list_head link;
1434         long from;
1435         long to;
1436 };
1437
1438 static long region_add(struct list_head *head, long f, long t)
1439 {
1440         struct file_region *rg, *nrg, *trg;
1441
1442         /* Locate the region we are either in or before. */
1443         list_for_each_entry(rg, head, link)
1444                 if (f <= rg->to)
1445                         break;
1446
1447         /* Round our left edge to the current segment if it encloses us. */
1448         if (f > rg->from)
1449                 f = rg->from;
1450
1451         /* Check for and consume any regions we now overlap with. */
1452         nrg = rg;
1453         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1454                 if (&rg->link == head)
1455                         break;
1456                 if (rg->from > t)
1457                         break;
1458
1459                 /* If this area reaches higher then extend our area to
1460                  * include it completely.  If this is not the first area
1461                  * which we intend to reuse, free it. */
1462                 if (rg->to > t)
1463                         t = rg->to;
1464                 if (rg != nrg) {
1465                         list_del(&rg->link);
1466                         kfree(rg);
1467                 }
1468         }
1469         nrg->from = f;
1470         nrg->to = t;
1471         return 0;
1472 }
1473
1474 static long region_chg(struct list_head *head, long f, long t)
1475 {
1476         struct file_region *rg, *nrg;
1477         long chg = 0;
1478
1479         /* Locate the region we are before or in. */
1480         list_for_each_entry(rg, head, link)
1481                 if (f <= rg->to)
1482                         break;
1483
1484         /* If we are below the current region then a new region is required.
1485          * Subtle, allocate a new region at the position but make it zero
1486          * size such that we can guarantee to record the reservation. */
1487         if (&rg->link == head || t < rg->from) {
1488                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1489                 if (!nrg)
1490                         return -ENOMEM;
1491                 nrg->from = f;
1492                 nrg->to   = f;
1493                 INIT_LIST_HEAD(&nrg->link);
1494                 list_add(&nrg->link, rg->link.prev);
1495
1496                 return t - f;
1497         }
1498
1499         /* Round our left edge to the current segment if it encloses us. */
1500         if (f > rg->from)
1501                 f = rg->from;
1502         chg = t - f;
1503
1504         /* Check for and consume any regions we now overlap with. */
1505         list_for_each_entry(rg, rg->link.prev, link) {
1506                 if (&rg->link == head)
1507                         break;
1508                 if (rg->from > t)
1509                         return chg;
1510
1511                 /* We overlap with this area, if it extends futher than
1512                  * us then we must extend ourselves.  Account for its
1513                  * existing reservation. */
1514                 if (rg->to > t) {
1515                         chg += rg->to - t;
1516                         t = rg->to;
1517                 }
1518                 chg -= rg->to - rg->from;
1519         }
1520         return chg;
1521 }
1522
1523 static long region_truncate(struct list_head *head, long end)
1524 {
1525         struct file_region *rg, *trg;
1526         long chg = 0;
1527
1528         /* Locate the region we are either in or before. */
1529         list_for_each_entry(rg, head, link)
1530                 if (end <= rg->to)
1531                         break;
1532         if (&rg->link == head)
1533                 return 0;
1534
1535         /* If we are in the middle of a region then adjust it. */
1536         if (end > rg->from) {
1537                 chg = rg->to - end;
1538                 rg->to = end;
1539                 rg = list_entry(rg->link.next, typeof(*rg), link);
1540         }
1541
1542         /* Drop any remaining regions. */
1543         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1544                 if (&rg->link == head)
1545                         break;
1546                 chg += rg->to - rg->from;
1547                 list_del(&rg->link);
1548                 kfree(rg);
1549         }
1550         return chg;
1551 }
1552
1553 int hugetlb_reserve_pages(struct inode *inode,
1554                                         long from, long to,
1555                                         struct vm_area_struct *vma)
1556 {
1557         long ret, chg;
1558
1559         /*
1560          * Shared mappings base their reservation on the number of pages that
1561          * are already allocated on behalf of the file. Private mappings need
1562          * to reserve the full area even if read-only as mprotect() may be
1563          * called to make the mapping read-write. Assume !vma is a shm mapping
1564          */
1565         if (!vma || vma->vm_flags & VM_SHARED)
1566                 chg = region_chg(&inode->i_mapping->private_list, from, to);
1567         else {
1568                 chg = to - from;
1569                 set_vma_resv_huge_pages(vma, chg);
1570                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
1571         }
1572
1573         if (chg < 0)
1574                 return chg;
1575
1576         if (hugetlb_get_quota(inode->i_mapping, chg))
1577                 return -ENOSPC;
1578         ret = hugetlb_acct_memory(chg);
1579         if (ret < 0) {
1580                 hugetlb_put_quota(inode->i_mapping, chg);
1581                 return ret;
1582         }
1583         if (!vma || vma->vm_flags & VM_SHARED)
1584                 region_add(&inode->i_mapping->private_list, from, to);
1585         return 0;
1586 }
1587
1588 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1589 {
1590         long chg = region_truncate(&inode->i_mapping->private_list, offset);
1591
1592         spin_lock(&inode->i_lock);
1593         inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
1594         spin_unlock(&inode->i_lock);
1595
1596         hugetlb_put_quota(inode->i_mapping, (chg - freed));
1597         hugetlb_acct_memory(-(chg - freed));
1598 }