2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
15 #include <asm/pgtable.h>
17 #include <linux/hugetlb.h>
19 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
20 static unsigned long nr_huge_pages, free_huge_pages;
21 unsigned long max_huge_pages;
22 static struct list_head hugepage_freelists[MAX_NUMNODES];
23 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
24 static unsigned int free_huge_pages_node[MAX_NUMNODES];
27 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
29 static DEFINE_SPINLOCK(hugetlb_lock);
31 static void enqueue_huge_page(struct page *page)
33 int nid = page_to_nid(page);
34 list_add(&page->lru, &hugepage_freelists[nid]);
36 free_huge_pages_node[nid]++;
39 static struct page *dequeue_huge_page(void)
41 int nid = numa_node_id();
42 struct page *page = NULL;
43 struct zonelist *zonelist = NODE_DATA(nid)->node_zonelists;
46 for (z = zonelist->zones; *z; z++) {
47 nid = (*z)->zone_pgdat->node_id;
48 if (!list_empty(&hugepage_freelists[nid]))
53 page = list_entry(hugepage_freelists[nid].next,
57 free_huge_pages_node[nid]--;
62 static struct page *alloc_fresh_huge_page(void)
66 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
68 nid = (nid + 1) % num_online_nodes();
70 spin_lock(&hugetlb_lock);
72 nr_huge_pages_node[page_to_nid(page)]++;
73 spin_unlock(&hugetlb_lock);
78 void free_huge_page(struct page *page)
80 BUG_ON(page_count(page));
82 INIT_LIST_HEAD(&page->lru);
83 page[1].mapping = NULL;
85 spin_lock(&hugetlb_lock);
86 enqueue_huge_page(page);
87 spin_unlock(&hugetlb_lock);
90 struct page *alloc_huge_page(void)
95 spin_lock(&hugetlb_lock);
96 page = dequeue_huge_page();
98 spin_unlock(&hugetlb_lock);
101 spin_unlock(&hugetlb_lock);
102 set_page_count(page, 1);
103 page[1].mapping = (void *)free_huge_page;
104 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
105 clear_highpage(&page[i]);
109 static int __init hugetlb_init(void)
114 if (HPAGE_SHIFT == 0)
117 for (i = 0; i < MAX_NUMNODES; ++i)
118 INIT_LIST_HEAD(&hugepage_freelists[i]);
120 for (i = 0; i < max_huge_pages; ++i) {
121 page = alloc_fresh_huge_page();
124 spin_lock(&hugetlb_lock);
125 enqueue_huge_page(page);
126 spin_unlock(&hugetlb_lock);
128 max_huge_pages = free_huge_pages = nr_huge_pages = i;
129 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
132 module_init(hugetlb_init);
134 static int __init hugetlb_setup(char *s)
136 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
140 __setup("hugepages=", hugetlb_setup);
143 static void update_and_free_page(struct page *page)
147 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
148 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
149 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
150 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
151 1 << PG_private | 1<< PG_writeback);
152 set_page_count(&page[i], 0);
154 set_page_count(page, 1);
155 __free_pages(page, HUGETLB_PAGE_ORDER);
158 #ifdef CONFIG_HIGHMEM
159 static void try_to_free_low(unsigned long count)
162 for (i = 0; i < MAX_NUMNODES; ++i) {
163 struct page *page, *next;
164 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
165 if (PageHighMem(page))
167 list_del(&page->lru);
168 update_and_free_page(page);
169 nid = page_zone(page)->zone_pgdat->node_id;
171 free_huge_pages_node[nid]--;
172 if (count >= nr_huge_pages)
178 static inline void try_to_free_low(unsigned long count)
183 static unsigned long set_max_huge_pages(unsigned long count)
185 while (count > nr_huge_pages) {
186 struct page *page = alloc_fresh_huge_page();
188 return nr_huge_pages;
189 spin_lock(&hugetlb_lock);
190 enqueue_huge_page(page);
191 spin_unlock(&hugetlb_lock);
193 if (count >= nr_huge_pages)
194 return nr_huge_pages;
196 spin_lock(&hugetlb_lock);
197 try_to_free_low(count);
198 while (count < nr_huge_pages) {
199 struct page *page = dequeue_huge_page();
202 update_and_free_page(page);
204 spin_unlock(&hugetlb_lock);
205 return nr_huge_pages;
208 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
209 struct file *file, void __user *buffer,
210 size_t *length, loff_t *ppos)
212 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
213 max_huge_pages = set_max_huge_pages(max_huge_pages);
216 #endif /* CONFIG_SYSCTL */
218 int hugetlb_report_meminfo(char *buf)
221 "HugePages_Total: %5lu\n"
222 "HugePages_Free: %5lu\n"
223 "Hugepagesize: %5lu kB\n",
229 int hugetlb_report_node_meminfo(int nid, char *buf)
232 "Node %d HugePages_Total: %5u\n"
233 "Node %d HugePages_Free: %5u\n",
234 nid, nr_huge_pages_node[nid],
235 nid, free_huge_pages_node[nid]);
238 int is_hugepage_mem_enough(size_t size)
240 return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
243 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
244 unsigned long hugetlb_total_pages(void)
246 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
250 * We cannot handle pagefaults against hugetlb pages at all. They cause
251 * handle_mm_fault() to try to instantiate regular-sized pages in the
252 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
255 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
256 unsigned long address, int *unused)
262 struct vm_operations_struct hugetlb_vm_ops = {
263 .nopage = hugetlb_nopage,
266 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
273 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
275 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
277 entry = pte_mkyoung(entry);
278 entry = pte_mkhuge(entry);
283 static void set_huge_ptep_writable(struct vm_area_struct *vma,
284 unsigned long address, pte_t *ptep)
288 entry = pte_mkwrite(pte_mkdirty(*ptep));
289 ptep_set_access_flags(vma, address, ptep, entry, 1);
290 update_mmu_cache(vma, address, entry);
291 lazy_mmu_prot_update(entry);
295 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
296 struct vm_area_struct *vma)
298 pte_t *src_pte, *dst_pte, entry;
299 struct page *ptepage;
303 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
305 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
306 src_pte = huge_pte_offset(src, addr);
309 dst_pte = huge_pte_alloc(dst, addr);
312 spin_lock(&dst->page_table_lock);
313 spin_lock(&src->page_table_lock);
314 if (!pte_none(*src_pte)) {
316 ptep_set_wrprotect(src, addr, src_pte);
318 ptepage = pte_page(entry);
320 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
321 set_huge_pte_at(dst, addr, dst_pte, entry);
323 spin_unlock(&src->page_table_lock);
324 spin_unlock(&dst->page_table_lock);
332 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
335 struct mm_struct *mm = vma->vm_mm;
336 unsigned long address;
341 WARN_ON(!is_vm_hugetlb_page(vma));
342 BUG_ON(start & ~HPAGE_MASK);
343 BUG_ON(end & ~HPAGE_MASK);
345 spin_lock(&mm->page_table_lock);
347 /* Update high watermark before we lower rss */
348 update_hiwater_rss(mm);
350 for (address = start; address < end; address += HPAGE_SIZE) {
351 ptep = huge_pte_offset(mm, address);
355 pte = huge_ptep_get_and_clear(mm, address, ptep);
359 page = pte_page(pte);
361 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
364 spin_unlock(&mm->page_table_lock);
365 flush_tlb_range(vma, start, end);
368 static struct page *find_or_alloc_huge_page(struct address_space *mapping,
369 unsigned long idx, int shared)
375 page = find_lock_page(mapping, idx);
379 if (hugetlb_get_quota(mapping))
381 page = alloc_huge_page();
383 hugetlb_put_quota(mapping);
388 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
391 hugetlb_put_quota(mapping);
397 /* Caller expects a locked page */
404 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
405 unsigned long address, pte_t *ptep, pte_t pte)
407 struct page *old_page, *new_page;
410 old_page = pte_page(pte);
412 /* If no-one else is actually using this page, avoid the copy
413 * and just make the page writable */
414 avoidcopy = (page_count(old_page) == 1);
416 set_huge_ptep_writable(vma, address, ptep);
417 return VM_FAULT_MINOR;
420 page_cache_get(old_page);
421 new_page = alloc_huge_page();
424 page_cache_release(old_page);
426 /* Logically this is OOM, not a SIGBUS, but an OOM
427 * could cause the kernel to go killing other
428 * processes which won't help the hugepage situation
430 return VM_FAULT_SIGBUS;
433 spin_unlock(&mm->page_table_lock);
434 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
435 copy_user_highpage(new_page + i, old_page + i,
436 address + i*PAGE_SIZE);
437 spin_lock(&mm->page_table_lock);
439 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
440 if (likely(pte_same(*ptep, pte))) {
442 set_huge_pte_at(mm, address, ptep,
443 make_huge_pte(vma, new_page, 1));
444 /* Make the old page be freed below */
447 page_cache_release(new_page);
448 page_cache_release(old_page);
449 return VM_FAULT_MINOR;
452 int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
453 unsigned long address, pte_t *ptep, int write_access)
455 int ret = VM_FAULT_SIGBUS;
459 struct address_space *mapping;
462 mapping = vma->vm_file->f_mapping;
463 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
464 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
467 * Use page lock to guard against racing truncation
468 * before we get page_table_lock.
470 page = find_or_alloc_huge_page(mapping, idx,
471 vma->vm_flags & VM_SHARED);
475 BUG_ON(!PageLocked(page));
477 spin_lock(&mm->page_table_lock);
478 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
482 ret = VM_FAULT_MINOR;
483 if (!pte_none(*ptep))
486 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
487 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
488 && (vma->vm_flags & VM_SHARED)));
489 set_huge_pte_at(mm, address, ptep, new_pte);
491 if (write_access && !(vma->vm_flags & VM_SHARED)) {
492 /* Optimization, do the COW without a second fault */
493 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
496 spin_unlock(&mm->page_table_lock);
502 spin_unlock(&mm->page_table_lock);
503 hugetlb_put_quota(mapping);
509 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
510 unsigned long address, int write_access)
516 ptep = huge_pte_alloc(mm, address);
522 return hugetlb_no_page(mm, vma, address, ptep, write_access);
524 ret = VM_FAULT_MINOR;
526 spin_lock(&mm->page_table_lock);
527 /* Check for a racing update before calling hugetlb_cow */
528 if (likely(pte_same(entry, *ptep)))
529 if (write_access && !pte_write(entry))
530 ret = hugetlb_cow(mm, vma, address, ptep, entry);
531 spin_unlock(&mm->page_table_lock);
536 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
537 struct page **pages, struct vm_area_struct **vmas,
538 unsigned long *position, int *length, int i)
540 unsigned long vpfn, vaddr = *position;
541 int remainder = *length;
543 vpfn = vaddr/PAGE_SIZE;
544 spin_lock(&mm->page_table_lock);
545 while (vaddr < vma->vm_end && remainder) {
550 * Some archs (sparc64, sh*) have multiple pte_ts to
551 * each hugepage. We have to make * sure we get the
552 * first, for the page indexing below to work.
554 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
556 if (!pte || pte_none(*pte)) {
559 spin_unlock(&mm->page_table_lock);
560 ret = hugetlb_fault(mm, vma, vaddr, 0);
561 spin_lock(&mm->page_table_lock);
562 if (ret == VM_FAULT_MINOR)
572 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
585 spin_unlock(&mm->page_table_lock);