X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=bbf953eeb58bafbad0016a6f73db4844e5514c22;hb=aed6abd662c2903733bea7fcd3856c306e650680;hp=efd78527ad1ee62159d8955069756fdaddf893a1;hpb=54f9f80d6543fb7b157d3b11e2e7911dc1379790;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index efd7852..bbf953e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -24,14 +24,16 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; static unsigned long surplus_huge_pages; +static unsigned long nr_overcommit_huge_pages; unsigned long max_huge_pages; +unsigned long sysctl_overcommit_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; -int hugetlb_dynamic_pool; +static int hugetlb_next_nid; /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages @@ -69,19 +71,40 @@ static void enqueue_huge_page(struct page *page) free_huge_pages_node[nid]++; } -static struct page *dequeue_huge_page(struct vm_area_struct *vma, +static struct page *dequeue_huge_page(void) +{ + int nid; + struct page *page = NULL; + + for (nid = 0; nid < MAX_NUMNODES; ++nid) { + if (!list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + free_huge_pages--; + free_huge_pages_node[nid]--; + break; + } + } + return page; +} + +static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, unsigned long address) { int nid; struct page *page = NULL; struct mempolicy *mpol; + nodemask_t *nodemask; struct zonelist *zonelist = huge_zonelist(vma, address, - htlb_alloc_mask, &mpol); - struct zone **z; - - for (z = zonelist->zones; *z; z++) { - nid = zone_to_nid(*z); - if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && + htlb_alloc_mask, &mpol, &nodemask); + struct zone *zone; + struct zoneref *z; + + for_each_zone_zonelist_nodemask(zone, z, zonelist, + MAX_NR_ZONES - 1, nodemask) { + nid = zone_to_nid(zone); + if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && !list_empty(&hugepage_freelists[nid])) { page = list_entry(hugepage_freelists[nid].next, struct page, lru); @@ -93,7 +116,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, break; } } - mpol_free(mpol); /* unref if mpol !NULL */ + mpol_cond_put(mpol); return page; } @@ -109,13 +132,17 @@ static void update_and_free_page(struct page *page) } set_compound_page_dtor(page, NULL); set_page_refcounted(page); + arch_release_hugepage(page); __free_pages(page, HUGETLB_PAGE_ORDER); } static void free_huge_page(struct page *page) { int nid = page_to_nid(page); + struct address_space *mapping; + mapping = (struct address_space *) page_private(page); + set_page_private(page, 0); BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); @@ -128,6 +155,8 @@ static void free_huge_page(struct page *page) enqueue_huge_page(page); } spin_unlock(&hugetlb_lock); + if (mapping) + hugetlb_put_quota(mapping, 1); } /* @@ -165,58 +194,133 @@ static int adjust_pool_surplus(int delta) return ret; } -static int alloc_fresh_huge_page(void) +static struct page *alloc_fresh_huge_page_node(int nid) { - static int prev_nid; struct page *page; - int nid; - /* - * Copy static prev_nid to local nid, work on that, then copy it - * back to prev_nid afterwards: otherwise there's a window in which - * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node. - * But we don't need to use a spin_lock here: it really doesn't - * matter if occasionally a racer chooses the same nid as we do. - */ - nid = next_node(prev_nid, node_online_map); - if (nid == MAX_NUMNODES) - nid = first_node(node_online_map); - prev_nid = nid; - - page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, - HUGETLB_PAGE_ORDER); + page = alloc_pages_node(nid, + htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| + __GFP_REPEAT|__GFP_NOWARN, + HUGETLB_PAGE_ORDER); if (page) { + if (arch_prepare_hugepage(page)) { + __free_pages(page, HUGETLB_PAGE_ORDER); + return NULL; + } set_compound_page_dtor(page, free_huge_page); spin_lock(&hugetlb_lock); nr_huge_pages++; - nr_huge_pages_node[page_to_nid(page)]++; + nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ - return 1; } - return 0; + + return page; +} + +static int alloc_fresh_huge_page(void) +{ + struct page *page; + int start_nid; + int next_nid; + int ret = 0; + + start_nid = hugetlb_next_nid; + + do { + page = alloc_fresh_huge_page_node(hugetlb_next_nid); + if (page) + ret = 1; + /* + * Use a helper variable to find the next node and then + * copy it back to hugetlb_next_nid afterwards: + * otherwise there's a window in which a racer might + * pass invalid nid MAX_NUMNODES to alloc_pages_node. + * But we don't need to use a spin_lock here: it really + * doesn't matter if occasionally a racer chooses the + * same nid as we do. Move nid forward in the mask even + * if we just successfully allocated a hugepage so that + * the next caller gets hugepages on the next node. + */ + next_nid = next_node(hugetlb_next_nid, node_online_map); + if (next_nid == MAX_NUMNODES) + next_nid = first_node(node_online_map); + hugetlb_next_nid = next_nid; + } while (!page && hugetlb_next_nid != start_nid); + + if (ret) + count_vm_event(HTLB_BUDDY_PGALLOC); + else + count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); + + return ret; } static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, unsigned long address) { struct page *page; + unsigned int nid; - /* Check if the dynamic pool is enabled */ - if (!hugetlb_dynamic_pool) + /* + * Assume we will successfully allocate the surplus page to + * prevent racing processes from causing the surplus to exceed + * overcommit + * + * This however introduces a different race, where a process B + * tries to grow the static hugepage pool while alloc_pages() is + * called by process A. B will only examine the per-node + * counters in determining if surplus huge pages can be + * converted to normal huge pages in adjust_pool_surplus(). A + * won't be able to increment the per-node counter, until the + * lock is dropped by B, but B doesn't drop hugetlb_lock until + * no more huge pages can be converted from surplus to normal + * state (and doesn't try to convert again). Thus, we have a + * case where a surplus huge page exists, the pool is grown, and + * the surplus huge page still exists after, even though it + * should just have been converted to a normal huge page. This + * does not leak memory, though, as the hugepage will be freed + * once it is out of use. It also does not allow the counters to + * go out of whack in adjust_pool_surplus() as we don't modify + * the node values until we've gotten the hugepage and only the + * per-node value is checked there. + */ + spin_lock(&hugetlb_lock); + if (surplus_huge_pages >= nr_overcommit_huge_pages) { + spin_unlock(&hugetlb_lock); return NULL; + } else { + nr_huge_pages++; + surplus_huge_pages++; + } + spin_unlock(&hugetlb_lock); - page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, + page = alloc_pages(htlb_alloc_mask|__GFP_COMP| + __GFP_REPEAT|__GFP_NOWARN, HUGETLB_PAGE_ORDER); + + spin_lock(&hugetlb_lock); if (page) { + /* + * This page is now managed by the hugetlb allocator and has + * no users -- drop the buddy allocator's reference. + */ + put_page_testzero(page); + VM_BUG_ON(page_count(page)); + nid = page_to_nid(page); set_compound_page_dtor(page, free_huge_page); - spin_lock(&hugetlb_lock); - nr_huge_pages++; - nr_huge_pages_node[page_to_nid(page)]++; - surplus_huge_pages++; - surplus_huge_pages_node[page_to_nid(page)]++; - spin_unlock(&hugetlb_lock); + /* + * We incremented the global counters already + */ + nr_huge_pages_node[nid]++; + surplus_huge_pages_node[nid]++; + __count_vm_event(HTLB_BUDDY_PGALLOC); + } else { + nr_huge_pages--; + surplus_huge_pages--; + __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); } + spin_unlock(&hugetlb_lock); return page; } @@ -233,8 +337,10 @@ static int gather_surplus_pages(int delta) int needed, allocated; needed = (resv_huge_pages + delta) - free_huge_pages; - if (needed <= 0) + if (needed <= 0) { + resv_huge_pages += delta; return 0; + } allocated = 0; INIT_LIST_HEAD(&surplus_list); @@ -272,17 +378,37 @@ retry: * The surplus_list now contains _at_least_ the number of extra pages * needed to accomodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy - * allocator. + * allocator. Commit the entire reservation here to prevent another + * process from stealing the pages as they are added to the pool but + * before they are reserved. */ needed += allocated; + resv_huge_pages += delta; ret = 0; free: + /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + if ((--needed) < 0) + break; list_del(&page->lru); - if ((--needed) >= 0) - enqueue_huge_page(page); - else - update_and_free_page(page); + enqueue_huge_page(page); + } + + /* Free unnecessary surplus pages to the buddy allocator */ + if (!list_empty(&surplus_list)) { + spin_unlock(&hugetlb_lock); + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); + /* + * The page has a reference count of zero already, so + * call free_huge_page directly instead of using + * put_page. This must be done with hugetlb_lock + * unlocked which is safe because free_huge_page takes + * hugetlb_lock before deciding how to free the page. + */ + free_huge_page(page); + } + spin_lock(&hugetlb_lock); } return ret; @@ -293,15 +419,26 @@ free: * allocated to satisfy the reservation must be explicitly freed if they were * never used. */ -void return_unused_surplus_pages(unsigned long unused_resv_pages) +static void return_unused_surplus_pages(unsigned long unused_resv_pages) { static int nid = -1; struct page *page; unsigned long nr_pages; + /* + * We want to release as many surplus pages as possible, spread + * evenly across all nodes. Iterate across all nodes until we + * can no longer free unreserved surplus pages. This occurs when + * the nodes with surplus pages have no free pages. + */ + unsigned long remaining_iterations = num_online_nodes(); + + /* Uncommit the reservation */ + resv_huge_pages -= unused_resv_pages; + nr_pages = min(unused_resv_pages, surplus_huge_pages); - while (nr_pages) { + while (remaining_iterations-- && nr_pages) { nid = next_node(nid, node_online_map); if (nid == MAX_NUMNODES) nid = first_node(node_online_map); @@ -319,39 +456,60 @@ void return_unused_surplus_pages(unsigned long unused_resv_pages) surplus_huge_pages--; surplus_huge_pages_node[nid]--; nr_pages--; + remaining_iterations = num_online_nodes(); } } } -static struct page *alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr) + +static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, + unsigned long addr) { - struct page *page = NULL; - int use_reserved_page = vma->vm_flags & VM_MAYSHARE; + struct page *page; spin_lock(&hugetlb_lock); - if (!use_reserved_page && (free_huge_pages <= resv_huge_pages)) - goto fail; + page = dequeue_huge_page_vma(vma, addr); + spin_unlock(&hugetlb_lock); + return page ? page : ERR_PTR(-VM_FAULT_OOM); +} - page = dequeue_huge_page(vma, addr); - if (!page) - goto fail; +static struct page *alloc_huge_page_private(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page = NULL; + if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) + return ERR_PTR(-VM_FAULT_SIGBUS); + + spin_lock(&hugetlb_lock); + if (free_huge_pages > resv_huge_pages) + page = dequeue_huge_page_vma(vma, addr); spin_unlock(&hugetlb_lock); - set_page_refcounted(page); + if (!page) { + page = alloc_buddy_huge_page(vma, addr); + if (!page) { + hugetlb_put_quota(vma->vm_file->f_mapping, 1); + return ERR_PTR(-VM_FAULT_OOM); + } + } return page; +} -fail: - spin_unlock(&hugetlb_lock); +static struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + struct address_space *mapping = vma->vm_file->f_mapping; - /* - * Private mappings do not use reserved huge pages so the allocation - * may have failed due to an undersized hugetlb pool. Try to grab a - * surplus huge page from the buddy allocator. - */ - if (!use_reserved_page) - page = alloc_buddy_huge_page(vma, addr); + if (vma->vm_flags & VM_MAYSHARE) + page = alloc_huge_page_shared(vma, addr); + else + page = alloc_huge_page_private(vma, addr); + if (!IS_ERR(page)) { + set_page_refcounted(page); + set_page_private(page, (unsigned long) mapping); + } return page; } @@ -365,6 +523,8 @@ static int __init hugetlb_init(void) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&hugepage_freelists[i]); + hugetlb_next_nid = first_node(node_online_map); + for (i = 0; i < max_huge_pages; ++i) { if (!alloc_fresh_huge_page()) break; @@ -403,14 +563,14 @@ static void try_to_free_low(unsigned long count) for (i = 0; i < MAX_NUMNODES; ++i) { struct page *page, *next; list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { + if (count >= nr_huge_pages) + return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(page); free_huge_pages--; free_huge_pages_node[page_to_nid(page)]--; - if (count >= nr_huge_pages) - return; } } } @@ -429,6 +589,12 @@ static unsigned long set_max_huge_pages(unsigned long count) * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. + * + * We might race with alloc_buddy_huge_page() here and be unable + * to convert a surplus huge page to a normal huge page. That is + * not critical, though, it just means the overall size of the + * pool might be one hugepage larger than it needs to be, but + * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (surplus_huge_pages && count > persistent_huge_pages) { @@ -450,8 +616,6 @@ static unsigned long set_max_huge_pages(unsigned long count) goto out; } - if (count >= persistent_huge_pages) - goto out; /* * Decrease the pool size @@ -459,11 +623,20 @@ static unsigned long set_max_huge_pages(unsigned long count) * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. + * + * By placing pages into the surplus state independent of the + * overcommit value, we are allowing the surplus pool size to + * exceed overcommit. There are few sane options here. Since + * alloc_buddy_huge_page() is checking the global counter, + * though, we'll note that we're not allowed to exceed surplus + * and won't grow the pool anywhere else. Not until one of the + * sysctls are changed, or the surplus pages go out of use. */ - min_count = max(count, resv_huge_pages); + min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; + min_count = max(count, min_count); try_to_free_low(min_count); while (min_count < persistent_huge_pages) { - struct page *page = dequeue_huge_page(NULL, 0); + struct page *page = dequeue_huge_page(); if (!page) break; update_and_free_page(page); @@ -499,6 +672,17 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write, return 0; } +int hugetlb_overcommit_handler(struct ctl_table *table, int write, + struct file *file, void __user *buffer, + size_t *length, loff_t *ppos) +{ + proc_doulongvec_minmax(table, write, file, buffer, length, ppos); + spin_lock(&hugetlb_lock); + nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; + spin_unlock(&hugetlb_lock); + return 0; +} + #endif /* CONFIG_SYSCTL */ int hugetlb_report_meminfo(char *buf) @@ -520,9 +704,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf) { return sprintf(buf, "Node %d HugePages_Total: %5u\n" - "Node %d HugePages_Free: %5u\n", + "Node %d HugePages_Free: %5u\n" + "Node %d HugePages_Surp: %5u\n", nid, nr_huge_pages_node[nid], - nid, free_huge_pages_node[nid]); + nid, free_huge_pages_node[nid], + nid, surplus_huge_pages_node[nid]); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ @@ -556,7 +742,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); } else { - entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); + entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); @@ -569,8 +755,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, { pte_t entry; - entry = pte_mkwrite(pte_mkdirty(*ptep)); - if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { + entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); + if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { update_mmu_cache(vma, address, entry); } } @@ -593,12 +779,17 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, dst_pte = huge_pte_alloc(dst, addr); if (!dst_pte) goto nomem; + + /* If the pagetables are shared don't copy or take references */ + if (dst_pte == src_pte) + continue; + spin_lock(&dst->page_table_lock); spin_lock(&src->page_table_lock); - if (!pte_none(*src_pte)) { + if (!huge_pte_none(huge_ptep_get(src_pte))) { if (cow) - ptep_set_wrprotect(src, addr, src_pte); - entry = *src_pte; + huge_ptep_set_wrprotect(src, addr, src_pte); + entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); set_huge_pte_at(dst, addr, dst_pte, entry); @@ -642,7 +833,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, continue; pte = huge_ptep_get_and_clear(mm, address, ptep); - if (pte_none(pte)) + if (huge_pte_none(pte)) continue; page = pte_page(pte); @@ -695,18 +886,20 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_get(old_page); new_page = alloc_huge_page(vma, address); - if (!new_page) { + if (IS_ERR(new_page)) { page_cache_release(old_page); - return VM_FAULT_OOM; + return -PTR_ERR(new_page); } spin_unlock(&mm->page_table_lock); copy_huge_page(new_page, old_page, address, vma); + __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & HPAGE_MASK); - if (likely(pte_same(*ptep, pte))) { + if (likely(pte_same(huge_ptep_get(ptep), pte))) { /* Break COW */ + huge_ptep_clear_flush(vma, address, ptep); set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, new_page, 1)); /* Make the old page be freed below */ @@ -741,27 +934,29 @@ retry: size = i_size_read(mapping->host) >> HPAGE_SHIFT; if (idx >= size) goto out; - if (hugetlb_get_quota(mapping)) - goto out; page = alloc_huge_page(vma, address); - if (!page) { - hugetlb_put_quota(mapping); - ret = VM_FAULT_OOM; + if (IS_ERR(page)) { + ret = -PTR_ERR(page); goto out; } clear_huge_page(page, address); + __SetPageUptodate(page); if (vma->vm_flags & VM_SHARED) { int err; + struct inode *inode = mapping->host; err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) { put_page(page); - hugetlb_put_quota(mapping); if (err == -EEXIST) goto retry; goto out; } + + spin_lock(&inode->i_lock); + inode->i_blocks += BLOCKS_PER_HUGEPAGE; + spin_unlock(&inode->i_lock); } else lock_page(page); } @@ -772,7 +967,7 @@ retry: goto backout; ret = 0; - if (!pte_none(*ptep)) + if (!huge_pte_none(huge_ptep_get(ptep))) goto backout; new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) @@ -791,7 +986,6 @@ out: backout: spin_unlock(&mm->page_table_lock); - hugetlb_put_quota(mapping); unlock_page(page); put_page(page); goto out; @@ -815,8 +1009,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * the same page in the page cache. */ mutex_lock(&hugetlb_instantiation_mutex); - entry = *ptep; - if (pte_none(entry)) { + entry = huge_ptep_get(ptep); + if (huge_pte_none(entry)) { ret = hugetlb_no_page(mm, vma, address, ptep, write_access); mutex_unlock(&hugetlb_instantiation_mutex); return ret; @@ -826,7 +1020,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, spin_lock(&mm->page_table_lock); /* Check for a racing update before calling hugetlb_cow */ - if (likely(pte_same(entry, *ptep))) + if (likely(pte_same(entry, huge_ptep_get(ptep)))) if (write_access && !pte_write(entry)) ret = hugetlb_cow(mm, vma, address, ptep, entry); spin_unlock(&mm->page_table_lock); @@ -837,7 +1031,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, - unsigned long *position, int *length, int i) + unsigned long *position, int *length, int i, + int write) { unsigned long pfn_offset; unsigned long vaddr = *position; @@ -855,11 +1050,12 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, */ pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); - if (!pte || pte_none(*pte)) { + if (!pte || huge_pte_none(huge_ptep_get(pte)) || + (write && !pte_write(huge_ptep_get(pte)))) { int ret; spin_unlock(&mm->page_table_lock); - ret = hugetlb_fault(mm, vma, vaddr, 0); + ret = hugetlb_fault(mm, vma, vaddr, write); spin_lock(&mm->page_table_lock); if (!(ret & VM_FAULT_ERROR)) continue; @@ -871,7 +1067,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, } pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; - page = pte_page(*pte); + page = pte_page(huge_ptep_get(pte)); same_page: if (pages) { get_page(page); @@ -920,7 +1116,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma, continue; if (huge_pmd_unshare(mm, &address, ptep)) continue; - if (!pte_none(*ptep)) { + if (!huge_pte_none(huge_ptep_get(ptep))) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); @@ -986,10 +1182,10 @@ static long region_chg(struct list_head *head, long f, long t) /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero - * size such that we can guarentee to record the reservation. */ + * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); - if (nrg == 0) + if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; @@ -1079,12 +1275,13 @@ static int hugetlb_acct_memory(long delta) if (gather_surplus_pages(delta) < 0) goto out; - if (delta > cpuset_mems_nr(free_huge_pages_node)) + if (delta > cpuset_mems_nr(free_huge_pages_node)) { + return_unused_surplus_pages(delta); goto out; + } } ret = 0; - resv_huge_pages += delta; if (delta < 0) return_unused_surplus_pages((unsigned long) -delta); @@ -1101,9 +1298,13 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) if (chg < 0) return chg; + if (hugetlb_get_quota(inode->i_mapping, chg)) + return -ENOSPC; ret = hugetlb_acct_memory(chg); - if (ret < 0) + if (ret < 0) { + hugetlb_put_quota(inode->i_mapping, chg); return ret; + } region_add(&inode->i_mapping->private_list, from, to); return 0; } @@ -1111,5 +1312,11 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) { long chg = region_truncate(&inode->i_mapping->private_list, offset); - hugetlb_acct_memory(freed - chg); + + spin_lock(&inode->i_lock); + inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; + spin_unlock(&inode->i_lock); + + hugetlb_put_quota(inode->i_mapping, (chg - freed)); + hugetlb_acct_memory(-(chg - freed)); }