mm: fault feedback #1
[safe/jmp/linux-2.6] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 unsigned long max_huge_pages;
27 static struct list_head hugepage_freelists[MAX_NUMNODES];
28 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29 static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31 unsigned long hugepages_treat_as_movable;
32
33 /*
34  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35  */
36 static DEFINE_SPINLOCK(hugetlb_lock);
37
38 static void clear_huge_page(struct page *page, unsigned long addr)
39 {
40         int i;
41
42         might_sleep();
43         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44                 cond_resched();
45                 clear_user_highpage(page + i, addr);
46         }
47 }
48
49 static void copy_huge_page(struct page *dst, struct page *src,
50                            unsigned long addr, struct vm_area_struct *vma)
51 {
52         int i;
53
54         might_sleep();
55         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56                 cond_resched();
57                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
58         }
59 }
60
61 static void enqueue_huge_page(struct page *page)
62 {
63         int nid = page_to_nid(page);
64         list_add(&page->lru, &hugepage_freelists[nid]);
65         free_huge_pages++;
66         free_huge_pages_node[nid]++;
67 }
68
69 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70                                 unsigned long address)
71 {
72         int nid;
73         struct page *page = NULL;
74         struct zonelist *zonelist = huge_zonelist(vma, address,
75                                                 htlb_alloc_mask);
76         struct zone **z;
77
78         for (z = zonelist->zones; *z; z++) {
79                 nid = zone_to_nid(*z);
80                 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
81                     !list_empty(&hugepage_freelists[nid]))
82                         break;
83         }
84
85         if (*z) {
86                 page = list_entry(hugepage_freelists[nid].next,
87                                   struct page, lru);
88                 list_del(&page->lru);
89                 free_huge_pages--;
90                 free_huge_pages_node[nid]--;
91         }
92         return page;
93 }
94
95 static void free_huge_page(struct page *page)
96 {
97         BUG_ON(page_count(page));
98
99         INIT_LIST_HEAD(&page->lru);
100
101         spin_lock(&hugetlb_lock);
102         enqueue_huge_page(page);
103         spin_unlock(&hugetlb_lock);
104 }
105
106 static int alloc_fresh_huge_page(void)
107 {
108         static int prev_nid;
109         struct page *page;
110         static DEFINE_SPINLOCK(nid_lock);
111         int nid;
112
113         spin_lock(&nid_lock);
114         nid = next_node(prev_nid, node_online_map);
115         if (nid == MAX_NUMNODES)
116                 nid = first_node(node_online_map);
117         prev_nid = nid;
118         spin_unlock(&nid_lock);
119
120         page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
121                                         HUGETLB_PAGE_ORDER);
122         if (page) {
123                 set_compound_page_dtor(page, free_huge_page);
124                 spin_lock(&hugetlb_lock);
125                 nr_huge_pages++;
126                 nr_huge_pages_node[page_to_nid(page)]++;
127                 spin_unlock(&hugetlb_lock);
128                 put_page(page); /* free it into the hugepage allocator */
129                 return 1;
130         }
131         return 0;
132 }
133
134 static struct page *alloc_huge_page(struct vm_area_struct *vma,
135                                     unsigned long addr)
136 {
137         struct page *page;
138
139         spin_lock(&hugetlb_lock);
140         if (vma->vm_flags & VM_MAYSHARE)
141                 resv_huge_pages--;
142         else if (free_huge_pages <= resv_huge_pages)
143                 goto fail;
144
145         page = dequeue_huge_page(vma, addr);
146         if (!page)
147                 goto fail;
148
149         spin_unlock(&hugetlb_lock);
150         set_page_refcounted(page);
151         return page;
152
153 fail:
154         if (vma->vm_flags & VM_MAYSHARE)
155                 resv_huge_pages++;
156         spin_unlock(&hugetlb_lock);
157         return NULL;
158 }
159
160 static int __init hugetlb_init(void)
161 {
162         unsigned long i;
163
164         if (HPAGE_SHIFT == 0)
165                 return 0;
166
167         for (i = 0; i < MAX_NUMNODES; ++i)
168                 INIT_LIST_HEAD(&hugepage_freelists[i]);
169
170         for (i = 0; i < max_huge_pages; ++i) {
171                 if (!alloc_fresh_huge_page())
172                         break;
173         }
174         max_huge_pages = free_huge_pages = nr_huge_pages = i;
175         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
176         return 0;
177 }
178 module_init(hugetlb_init);
179
180 static int __init hugetlb_setup(char *s)
181 {
182         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
183                 max_huge_pages = 0;
184         return 1;
185 }
186 __setup("hugepages=", hugetlb_setup);
187
188 static unsigned int cpuset_mems_nr(unsigned int *array)
189 {
190         int node;
191         unsigned int nr = 0;
192
193         for_each_node_mask(node, cpuset_current_mems_allowed)
194                 nr += array[node];
195
196         return nr;
197 }
198
199 #ifdef CONFIG_SYSCTL
200 static void update_and_free_page(struct page *page)
201 {
202         int i;
203         nr_huge_pages--;
204         nr_huge_pages_node[page_to_nid(page)]--;
205         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
206                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
207                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
208                                 1 << PG_private | 1<< PG_writeback);
209         }
210         page[1].lru.next = NULL;
211         set_page_refcounted(page);
212         __free_pages(page, HUGETLB_PAGE_ORDER);
213 }
214
215 #ifdef CONFIG_HIGHMEM
216 static void try_to_free_low(unsigned long count)
217 {
218         int i;
219
220         for (i = 0; i < MAX_NUMNODES; ++i) {
221                 struct page *page, *next;
222                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
223                         if (PageHighMem(page))
224                                 continue;
225                         list_del(&page->lru);
226                         update_and_free_page(page);
227                         free_huge_pages--;
228                         free_huge_pages_node[page_to_nid(page)]--;
229                         if (count >= nr_huge_pages)
230                                 return;
231                 }
232         }
233 }
234 #else
235 static inline void try_to_free_low(unsigned long count)
236 {
237 }
238 #endif
239
240 static unsigned long set_max_huge_pages(unsigned long count)
241 {
242         while (count > nr_huge_pages) {
243                 if (!alloc_fresh_huge_page())
244                         return nr_huge_pages;
245         }
246         if (count >= nr_huge_pages)
247                 return nr_huge_pages;
248
249         spin_lock(&hugetlb_lock);
250         count = max(count, resv_huge_pages);
251         try_to_free_low(count);
252         while (count < nr_huge_pages) {
253                 struct page *page = dequeue_huge_page(NULL, 0);
254                 if (!page)
255                         break;
256                 update_and_free_page(page);
257         }
258         spin_unlock(&hugetlb_lock);
259         return nr_huge_pages;
260 }
261
262 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
263                            struct file *file, void __user *buffer,
264                            size_t *length, loff_t *ppos)
265 {
266         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
267         max_huge_pages = set_max_huge_pages(max_huge_pages);
268         return 0;
269 }
270
271 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
272                         struct file *file, void __user *buffer,
273                         size_t *length, loff_t *ppos)
274 {
275         proc_dointvec(table, write, file, buffer, length, ppos);
276         if (hugepages_treat_as_movable)
277                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
278         else
279                 htlb_alloc_mask = GFP_HIGHUSER;
280         return 0;
281 }
282
283 #endif /* CONFIG_SYSCTL */
284
285 int hugetlb_report_meminfo(char *buf)
286 {
287         return sprintf(buf,
288                         "HugePages_Total: %5lu\n"
289                         "HugePages_Free:  %5lu\n"
290                         "HugePages_Rsvd:  %5lu\n"
291                         "Hugepagesize:    %5lu kB\n",
292                         nr_huge_pages,
293                         free_huge_pages,
294                         resv_huge_pages,
295                         HPAGE_SIZE/1024);
296 }
297
298 int hugetlb_report_node_meminfo(int nid, char *buf)
299 {
300         return sprintf(buf,
301                 "Node %d HugePages_Total: %5u\n"
302                 "Node %d HugePages_Free:  %5u\n",
303                 nid, nr_huge_pages_node[nid],
304                 nid, free_huge_pages_node[nid]);
305 }
306
307 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
308 unsigned long hugetlb_total_pages(void)
309 {
310         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
311 }
312
313 /*
314  * We cannot handle pagefaults against hugetlb pages at all.  They cause
315  * handle_mm_fault() to try to instantiate regular-sized pages in the
316  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
317  * this far.
318  */
319 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
320 {
321         BUG();
322         return 0;
323 }
324
325 struct vm_operations_struct hugetlb_vm_ops = {
326         .fault = hugetlb_vm_op_fault,
327 };
328
329 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
330                                 int writable)
331 {
332         pte_t entry;
333
334         if (writable) {
335                 entry =
336                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
337         } else {
338                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
339         }
340         entry = pte_mkyoung(entry);
341         entry = pte_mkhuge(entry);
342
343         return entry;
344 }
345
346 static void set_huge_ptep_writable(struct vm_area_struct *vma,
347                                    unsigned long address, pte_t *ptep)
348 {
349         pte_t entry;
350
351         entry = pte_mkwrite(pte_mkdirty(*ptep));
352         if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
353                 update_mmu_cache(vma, address, entry);
354                 lazy_mmu_prot_update(entry);
355         }
356 }
357
358
359 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
360                             struct vm_area_struct *vma)
361 {
362         pte_t *src_pte, *dst_pte, entry;
363         struct page *ptepage;
364         unsigned long addr;
365         int cow;
366
367         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
368
369         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
370                 src_pte = huge_pte_offset(src, addr);
371                 if (!src_pte)
372                         continue;
373                 dst_pte = huge_pte_alloc(dst, addr);
374                 if (!dst_pte)
375                         goto nomem;
376                 spin_lock(&dst->page_table_lock);
377                 spin_lock(&src->page_table_lock);
378                 if (!pte_none(*src_pte)) {
379                         if (cow)
380                                 ptep_set_wrprotect(src, addr, src_pte);
381                         entry = *src_pte;
382                         ptepage = pte_page(entry);
383                         get_page(ptepage);
384                         set_huge_pte_at(dst, addr, dst_pte, entry);
385                 }
386                 spin_unlock(&src->page_table_lock);
387                 spin_unlock(&dst->page_table_lock);
388         }
389         return 0;
390
391 nomem:
392         return -ENOMEM;
393 }
394
395 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
396                             unsigned long end)
397 {
398         struct mm_struct *mm = vma->vm_mm;
399         unsigned long address;
400         pte_t *ptep;
401         pte_t pte;
402         struct page *page;
403         struct page *tmp;
404         /*
405          * A page gathering list, protected by per file i_mmap_lock. The
406          * lock is used to avoid list corruption from multiple unmapping
407          * of the same page since we are using page->lru.
408          */
409         LIST_HEAD(page_list);
410
411         WARN_ON(!is_vm_hugetlb_page(vma));
412         BUG_ON(start & ~HPAGE_MASK);
413         BUG_ON(end & ~HPAGE_MASK);
414
415         spin_lock(&mm->page_table_lock);
416         for (address = start; address < end; address += HPAGE_SIZE) {
417                 ptep = huge_pte_offset(mm, address);
418                 if (!ptep)
419                         continue;
420
421                 if (huge_pmd_unshare(mm, &address, ptep))
422                         continue;
423
424                 pte = huge_ptep_get_and_clear(mm, address, ptep);
425                 if (pte_none(pte))
426                         continue;
427
428                 page = pte_page(pte);
429                 if (pte_dirty(pte))
430                         set_page_dirty(page);
431                 list_add(&page->lru, &page_list);
432         }
433         spin_unlock(&mm->page_table_lock);
434         flush_tlb_range(vma, start, end);
435         list_for_each_entry_safe(page, tmp, &page_list, lru) {
436                 list_del(&page->lru);
437                 put_page(page);
438         }
439 }
440
441 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
442                           unsigned long end)
443 {
444         /*
445          * It is undesirable to test vma->vm_file as it should be non-null
446          * for valid hugetlb area. However, vm_file will be NULL in the error
447          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
448          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
449          * to clean up. Since no pte has actually been setup, it is safe to
450          * do nothing in this case.
451          */
452         if (vma->vm_file) {
453                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
454                 __unmap_hugepage_range(vma, start, end);
455                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
456         }
457 }
458
459 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
460                         unsigned long address, pte_t *ptep, pte_t pte)
461 {
462         struct page *old_page, *new_page;
463         int avoidcopy;
464
465         old_page = pte_page(pte);
466
467         /* If no-one else is actually using this page, avoid the copy
468          * and just make the page writable */
469         avoidcopy = (page_count(old_page) == 1);
470         if (avoidcopy) {
471                 set_huge_ptep_writable(vma, address, ptep);
472                 return VM_FAULT_MINOR;
473         }
474
475         page_cache_get(old_page);
476         new_page = alloc_huge_page(vma, address);
477
478         if (!new_page) {
479                 page_cache_release(old_page);
480                 return VM_FAULT_OOM;
481         }
482
483         spin_unlock(&mm->page_table_lock);
484         copy_huge_page(new_page, old_page, address, vma);
485         spin_lock(&mm->page_table_lock);
486
487         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
488         if (likely(pte_same(*ptep, pte))) {
489                 /* Break COW */
490                 set_huge_pte_at(mm, address, ptep,
491                                 make_huge_pte(vma, new_page, 1));
492                 /* Make the old page be freed below */
493                 new_page = old_page;
494         }
495         page_cache_release(new_page);
496         page_cache_release(old_page);
497         return VM_FAULT_MINOR;
498 }
499
500 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
501                         unsigned long address, pte_t *ptep, int write_access)
502 {
503         int ret = VM_FAULT_SIGBUS;
504         unsigned long idx;
505         unsigned long size;
506         struct page *page;
507         struct address_space *mapping;
508         pte_t new_pte;
509
510         mapping = vma->vm_file->f_mapping;
511         idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
512                 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
513
514         /*
515          * Use page lock to guard against racing truncation
516          * before we get page_table_lock.
517          */
518 retry:
519         page = find_lock_page(mapping, idx);
520         if (!page) {
521                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
522                 if (idx >= size)
523                         goto out;
524                 if (hugetlb_get_quota(mapping))
525                         goto out;
526                 page = alloc_huge_page(vma, address);
527                 if (!page) {
528                         hugetlb_put_quota(mapping);
529                         ret = VM_FAULT_OOM;
530                         goto out;
531                 }
532                 clear_huge_page(page, address);
533
534                 if (vma->vm_flags & VM_SHARED) {
535                         int err;
536
537                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
538                         if (err) {
539                                 put_page(page);
540                                 hugetlb_put_quota(mapping);
541                                 if (err == -EEXIST)
542                                         goto retry;
543                                 goto out;
544                         }
545                 } else
546                         lock_page(page);
547         }
548
549         spin_lock(&mm->page_table_lock);
550         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
551         if (idx >= size)
552                 goto backout;
553
554         ret = VM_FAULT_MINOR;
555         if (!pte_none(*ptep))
556                 goto backout;
557
558         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
559                                 && (vma->vm_flags & VM_SHARED)));
560         set_huge_pte_at(mm, address, ptep, new_pte);
561
562         if (write_access && !(vma->vm_flags & VM_SHARED)) {
563                 /* Optimization, do the COW without a second fault */
564                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
565         }
566
567         spin_unlock(&mm->page_table_lock);
568         unlock_page(page);
569 out:
570         return ret;
571
572 backout:
573         spin_unlock(&mm->page_table_lock);
574         hugetlb_put_quota(mapping);
575         unlock_page(page);
576         put_page(page);
577         goto out;
578 }
579
580 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
581                         unsigned long address, int write_access)
582 {
583         pte_t *ptep;
584         pte_t entry;
585         int ret;
586         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
587
588         ptep = huge_pte_alloc(mm, address);
589         if (!ptep)
590                 return VM_FAULT_OOM;
591
592         /*
593          * Serialize hugepage allocation and instantiation, so that we don't
594          * get spurious allocation failures if two CPUs race to instantiate
595          * the same page in the page cache.
596          */
597         mutex_lock(&hugetlb_instantiation_mutex);
598         entry = *ptep;
599         if (pte_none(entry)) {
600                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
601                 mutex_unlock(&hugetlb_instantiation_mutex);
602                 return ret;
603         }
604
605         ret = VM_FAULT_MINOR;
606
607         spin_lock(&mm->page_table_lock);
608         /* Check for a racing update before calling hugetlb_cow */
609         if (likely(pte_same(entry, *ptep)))
610                 if (write_access && !pte_write(entry))
611                         ret = hugetlb_cow(mm, vma, address, ptep, entry);
612         spin_unlock(&mm->page_table_lock);
613         mutex_unlock(&hugetlb_instantiation_mutex);
614
615         return ret;
616 }
617
618 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
619                         struct page **pages, struct vm_area_struct **vmas,
620                         unsigned long *position, int *length, int i)
621 {
622         unsigned long pfn_offset;
623         unsigned long vaddr = *position;
624         int remainder = *length;
625
626         spin_lock(&mm->page_table_lock);
627         while (vaddr < vma->vm_end && remainder) {
628                 pte_t *pte;
629                 struct page *page;
630
631                 /*
632                  * Some archs (sparc64, sh*) have multiple pte_ts to
633                  * each hugepage.  We have to make * sure we get the
634                  * first, for the page indexing below to work.
635                  */
636                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
637
638                 if (!pte || pte_none(*pte)) {
639                         int ret;
640
641                         spin_unlock(&mm->page_table_lock);
642                         ret = hugetlb_fault(mm, vma, vaddr, 0);
643                         spin_lock(&mm->page_table_lock);
644                         if (ret == VM_FAULT_MINOR)
645                                 continue;
646
647                         remainder = 0;
648                         if (!i)
649                                 i = -EFAULT;
650                         break;
651                 }
652
653                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
654                 page = pte_page(*pte);
655 same_page:
656                 if (pages) {
657                         get_page(page);
658                         pages[i] = page + pfn_offset;
659                 }
660
661                 if (vmas)
662                         vmas[i] = vma;
663
664                 vaddr += PAGE_SIZE;
665                 ++pfn_offset;
666                 --remainder;
667                 ++i;
668                 if (vaddr < vma->vm_end && remainder &&
669                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
670                         /*
671                          * We use pfn_offset to avoid touching the pageframes
672                          * of this compound page.
673                          */
674                         goto same_page;
675                 }
676         }
677         spin_unlock(&mm->page_table_lock);
678         *length = remainder;
679         *position = vaddr;
680
681         return i;
682 }
683
684 void hugetlb_change_protection(struct vm_area_struct *vma,
685                 unsigned long address, unsigned long end, pgprot_t newprot)
686 {
687         struct mm_struct *mm = vma->vm_mm;
688         unsigned long start = address;
689         pte_t *ptep;
690         pte_t pte;
691
692         BUG_ON(address >= end);
693         flush_cache_range(vma, address, end);
694
695         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
696         spin_lock(&mm->page_table_lock);
697         for (; address < end; address += HPAGE_SIZE) {
698                 ptep = huge_pte_offset(mm, address);
699                 if (!ptep)
700                         continue;
701                 if (huge_pmd_unshare(mm, &address, ptep))
702                         continue;
703                 if (!pte_none(*ptep)) {
704                         pte = huge_ptep_get_and_clear(mm, address, ptep);
705                         pte = pte_mkhuge(pte_modify(pte, newprot));
706                         set_huge_pte_at(mm, address, ptep, pte);
707                         lazy_mmu_prot_update(pte);
708                 }
709         }
710         spin_unlock(&mm->page_table_lock);
711         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
712
713         flush_tlb_range(vma, start, end);
714 }
715
716 struct file_region {
717         struct list_head link;
718         long from;
719         long to;
720 };
721
722 static long region_add(struct list_head *head, long f, long t)
723 {
724         struct file_region *rg, *nrg, *trg;
725
726         /* Locate the region we are either in or before. */
727         list_for_each_entry(rg, head, link)
728                 if (f <= rg->to)
729                         break;
730
731         /* Round our left edge to the current segment if it encloses us. */
732         if (f > rg->from)
733                 f = rg->from;
734
735         /* Check for and consume any regions we now overlap with. */
736         nrg = rg;
737         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
738                 if (&rg->link == head)
739                         break;
740                 if (rg->from > t)
741                         break;
742
743                 /* If this area reaches higher then extend our area to
744                  * include it completely.  If this is not the first area
745                  * which we intend to reuse, free it. */
746                 if (rg->to > t)
747                         t = rg->to;
748                 if (rg != nrg) {
749                         list_del(&rg->link);
750                         kfree(rg);
751                 }
752         }
753         nrg->from = f;
754         nrg->to = t;
755         return 0;
756 }
757
758 static long region_chg(struct list_head *head, long f, long t)
759 {
760         struct file_region *rg, *nrg;
761         long chg = 0;
762
763         /* Locate the region we are before or in. */
764         list_for_each_entry(rg, head, link)
765                 if (f <= rg->to)
766                         break;
767
768         /* If we are below the current region then a new region is required.
769          * Subtle, allocate a new region at the position but make it zero
770          * size such that we can guarentee to record the reservation. */
771         if (&rg->link == head || t < rg->from) {
772                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
773                 if (nrg == 0)
774                         return -ENOMEM;
775                 nrg->from = f;
776                 nrg->to   = f;
777                 INIT_LIST_HEAD(&nrg->link);
778                 list_add(&nrg->link, rg->link.prev);
779
780                 return t - f;
781         }
782
783         /* Round our left edge to the current segment if it encloses us. */
784         if (f > rg->from)
785                 f = rg->from;
786         chg = t - f;
787
788         /* Check for and consume any regions we now overlap with. */
789         list_for_each_entry(rg, rg->link.prev, link) {
790                 if (&rg->link == head)
791                         break;
792                 if (rg->from > t)
793                         return chg;
794
795                 /* We overlap with this area, if it extends futher than
796                  * us then we must extend ourselves.  Account for its
797                  * existing reservation. */
798                 if (rg->to > t) {
799                         chg += rg->to - t;
800                         t = rg->to;
801                 }
802                 chg -= rg->to - rg->from;
803         }
804         return chg;
805 }
806
807 static long region_truncate(struct list_head *head, long end)
808 {
809         struct file_region *rg, *trg;
810         long chg = 0;
811
812         /* Locate the region we are either in or before. */
813         list_for_each_entry(rg, head, link)
814                 if (end <= rg->to)
815                         break;
816         if (&rg->link == head)
817                 return 0;
818
819         /* If we are in the middle of a region then adjust it. */
820         if (end > rg->from) {
821                 chg = rg->to - end;
822                 rg->to = end;
823                 rg = list_entry(rg->link.next, typeof(*rg), link);
824         }
825
826         /* Drop any remaining regions. */
827         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
828                 if (&rg->link == head)
829                         break;
830                 chg += rg->to - rg->from;
831                 list_del(&rg->link);
832                 kfree(rg);
833         }
834         return chg;
835 }
836
837 static int hugetlb_acct_memory(long delta)
838 {
839         int ret = -ENOMEM;
840
841         spin_lock(&hugetlb_lock);
842         if ((delta + resv_huge_pages) <= free_huge_pages) {
843                 resv_huge_pages += delta;
844                 ret = 0;
845         }
846         spin_unlock(&hugetlb_lock);
847         return ret;
848 }
849
850 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
851 {
852         long ret, chg;
853
854         chg = region_chg(&inode->i_mapping->private_list, from, to);
855         if (chg < 0)
856                 return chg;
857         /*
858          * When cpuset is configured, it breaks the strict hugetlb page
859          * reservation as the accounting is done on a global variable. Such
860          * reservation is completely rubbish in the presence of cpuset because
861          * the reservation is not checked against page availability for the
862          * current cpuset. Application can still potentially OOM'ed by kernel
863          * with lack of free htlb page in cpuset that the task is in.
864          * Attempt to enforce strict accounting with cpuset is almost
865          * impossible (or too ugly) because cpuset is too fluid that
866          * task or memory node can be dynamically moved between cpusets.
867          *
868          * The change of semantics for shared hugetlb mapping with cpuset is
869          * undesirable. However, in order to preserve some of the semantics,
870          * we fall back to check against current free page availability as
871          * a best attempt and hopefully to minimize the impact of changing
872          * semantics that cpuset has.
873          */
874         if (chg > cpuset_mems_nr(free_huge_pages_node))
875                 return -ENOMEM;
876
877         ret = hugetlb_acct_memory(chg);
878         if (ret < 0)
879                 return ret;
880         region_add(&inode->i_mapping->private_list, from, to);
881         return 0;
882 }
883
884 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
885 {
886         long chg = region_truncate(&inode->i_mapping->private_list, offset);
887         hugetlb_acct_memory(freed - chg);
888 }