X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=51c9e2c0164068681b299b37f48840af59d5c79d;hb=c0d43990768b6ca83604ff4be80425b89d317e2f;hp=27fad5d9bcf648b27564465b35c3cd72969257fa;hpb=b45b5bd65f668a665db40d093e4e1fe563533608;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 27fad5d..51c9e2c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -22,11 +22,19 @@ #include "internal.h" const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; -static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages; +static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; +static unsigned long surplus_huge_pages; +static unsigned long nr_overcommit_huge_pages; unsigned long max_huge_pages; +unsigned long sysctl_overcommit_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; +static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; +static gfp_t htlb_alloc_mask = GFP_HIGHUSER; +unsigned long hugepages_treat_as_movable; +static int hugetlb_next_nid; + /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages */ @@ -39,19 +47,19 @@ static void clear_huge_page(struct page *page, unsigned long addr) might_sleep(); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { cond_resched(); - clear_user_highpage(page + i, addr); + clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } static void copy_huge_page(struct page *dst, struct page *src, - unsigned long addr) + unsigned long addr, struct vm_area_struct *vma) { int i; might_sleep(); for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { cond_resched(); - copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); + copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } @@ -63,193 +71,422 @@ static void enqueue_huge_page(struct page *page) free_huge_pages_node[nid]++; } -static struct page *dequeue_huge_page(struct vm_area_struct *vma, +static struct page *dequeue_huge_page(void) +{ + int nid; + struct page *page = NULL; + + for (nid = 0; nid < MAX_NUMNODES; ++nid) { + if (!list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + free_huge_pages--; + free_huge_pages_node[nid]--; + break; + } + } + return page; +} + +static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, unsigned long address) { - int nid = numa_node_id(); + int nid; struct page *page = NULL; - struct zonelist *zonelist = huge_zonelist(vma, address); + struct mempolicy *mpol; + struct zonelist *zonelist = huge_zonelist(vma, address, + htlb_alloc_mask, &mpol); struct zone **z; for (z = zonelist->zones; *z; z++) { - nid = (*z)->zone_pgdat->node_id; - if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && - !list_empty(&hugepage_freelists[nid])) + nid = zone_to_nid(*z); + if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && + !list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + free_huge_pages--; + free_huge_pages_node[nid]--; + if (vma && vma->vm_flags & VM_MAYSHARE) + resv_huge_pages--; break; + } } + mpol_free(mpol); /* unref if mpol !NULL */ + return page; +} - if (*z) { - page = list_entry(hugepage_freelists[nid].next, - struct page, lru); - list_del(&page->lru); - free_huge_pages--; - free_huge_pages_node[nid]--; +static void update_and_free_page(struct page *page) +{ + int i; + nr_huge_pages--; + nr_huge_pages_node[page_to_nid(page)]--; + for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { + page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | + 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | + 1 << PG_private | 1<< PG_writeback); } - return page; + set_compound_page_dtor(page, NULL); + set_page_refcounted(page); + __free_pages(page, HUGETLB_PAGE_ORDER); } -static int alloc_fresh_huge_page(void) +static void free_huge_page(struct page *page) +{ + int nid = page_to_nid(page); + struct address_space *mapping; + + mapping = (struct address_space *) page_private(page); + set_page_private(page, 0); + BUG_ON(page_count(page)); + INIT_LIST_HEAD(&page->lru); + + spin_lock(&hugetlb_lock); + if (surplus_huge_pages_node[nid]) { + update_and_free_page(page); + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + } else { + enqueue_huge_page(page); + } + spin_unlock(&hugetlb_lock); + if (mapping) + hugetlb_put_quota(mapping, 1); +} + +/* + * Increment or decrement surplus_huge_pages. Keep node-specific counters + * balanced by operating on them in a round-robin fashion. + * Returns 1 if an adjustment was made. + */ +static int adjust_pool_surplus(int delta) +{ + static int prev_nid; + int nid = prev_nid; + int ret = 0; + + VM_BUG_ON(delta != -1 && delta != 1); + do { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + /* To shrink on this node, there must be a surplus page */ + if (delta < 0 && !surplus_huge_pages_node[nid]) + continue; + /* Surplus cannot exceed the total number of pages */ + if (delta > 0 && surplus_huge_pages_node[nid] >= + nr_huge_pages_node[nid]) + continue; + + surplus_huge_pages += delta; + surplus_huge_pages_node[nid] += delta; + ret = 1; + break; + } while (nid != prev_nid); + + prev_nid = nid; + return ret; +} + +static struct page *alloc_fresh_huge_page_node(int nid) { - static int nid = 0; struct page *page; - page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, - HUGETLB_PAGE_ORDER); - nid = (nid + 1) % num_online_nodes(); + + page = alloc_pages_node(nid, + htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, + HUGETLB_PAGE_ORDER); if (page) { - page[1].lru.next = (void *)free_huge_page; /* dtor */ + set_compound_page_dtor(page, free_huge_page); spin_lock(&hugetlb_lock); nr_huge_pages++; - nr_huge_pages_node[page_to_nid(page)]++; + nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ - return 1; } - return 0; + + return page; } -void free_huge_page(struct page *page) +static int alloc_fresh_huge_page(void) { - BUG_ON(page_count(page)); + struct page *page; + int start_nid; + int next_nid; + int ret = 0; - INIT_LIST_HEAD(&page->lru); + start_nid = hugetlb_next_nid; - spin_lock(&hugetlb_lock); - enqueue_huge_page(page); - spin_unlock(&hugetlb_lock); + do { + page = alloc_fresh_huge_page_node(hugetlb_next_nid); + if (page) + ret = 1; + /* + * Use a helper variable to find the next node and then + * copy it back to hugetlb_next_nid afterwards: + * otherwise there's a window in which a racer might + * pass invalid nid MAX_NUMNODES to alloc_pages_node. + * But we don't need to use a spin_lock here: it really + * doesn't matter if occasionally a racer chooses the + * same nid as we do. Move nid forward in the mask even + * if we just successfully allocated a hugepage so that + * the next caller gets hugepages on the next node. + */ + next_nid = next_node(hugetlb_next_nid, node_online_map); + if (next_nid == MAX_NUMNODES) + next_nid = first_node(node_online_map); + hugetlb_next_nid = next_nid; + } while (!page && hugetlb_next_nid != start_nid); + + return ret; } -struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) +static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, + unsigned long address) { - struct inode *inode = vma->vm_file->f_dentry->d_inode; struct page *page; - int use_reserve = 0; - unsigned long idx; + unsigned int nid; + /* + * Assume we will successfully allocate the surplus page to + * prevent racing processes from causing the surplus to exceed + * overcommit + * + * This however introduces a different race, where a process B + * tries to grow the static hugepage pool while alloc_pages() is + * called by process A. B will only examine the per-node + * counters in determining if surplus huge pages can be + * converted to normal huge pages in adjust_pool_surplus(). A + * won't be able to increment the per-node counter, until the + * lock is dropped by B, but B doesn't drop hugetlb_lock until + * no more huge pages can be converted from surplus to normal + * state (and doesn't try to convert again). Thus, we have a + * case where a surplus huge page exists, the pool is grown, and + * the surplus huge page still exists after, even though it + * should just have been converted to a normal huge page. This + * does not leak memory, though, as the hugepage will be freed + * once it is out of use. It also does not allow the counters to + * go out of whack in adjust_pool_surplus() as we don't modify + * the node values until we've gotten the hugepage and only the + * per-node value is checked there. + */ spin_lock(&hugetlb_lock); - - if (vma->vm_flags & VM_MAYSHARE) { - - /* idx = radix tree index, i.e. offset into file in - * HPAGE_SIZE units */ - idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) - + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); - - /* The hugetlbfs specific inode info stores the number - * of "guaranteed available" (huge) pages. That is, - * the first 'prereserved_hpages' pages of the inode - * are either already instantiated, or have been - * pre-reserved (by hugetlb_reserve_for_inode()). Here - * we're in the process of instantiating the page, so - * we use this to determine whether to draw from the - * pre-reserved pool or the truly free pool. */ - if (idx < HUGETLBFS_I(inode)->prereserved_hpages) - use_reserve = 1; - } - - if (!use_reserve) { - if (free_huge_pages <= reserved_huge_pages) - goto fail; + if (surplus_huge_pages >= nr_overcommit_huge_pages) { + spin_unlock(&hugetlb_lock); + return NULL; } else { - BUG_ON(reserved_huge_pages == 0); - reserved_huge_pages--; + nr_huge_pages++; + surplus_huge_pages++; } + spin_unlock(&hugetlb_lock); - page = dequeue_huge_page(vma, addr); - if (!page) - goto fail; + page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, + HUGETLB_PAGE_ORDER); + spin_lock(&hugetlb_lock); + if (page) { + /* + * This page is now managed by the hugetlb allocator and has + * no users -- drop the buddy allocator's reference. + */ + put_page_testzero(page); + VM_BUG_ON(page_count(page)); + nid = page_to_nid(page); + set_compound_page_dtor(page, free_huge_page); + /* + * We incremented the global counters already + */ + nr_huge_pages_node[nid]++; + surplus_huge_pages_node[nid]++; + } else { + nr_huge_pages--; + surplus_huge_pages--; + } spin_unlock(&hugetlb_lock); - set_page_refcounted(page); - return page; - fail: - WARN_ON(use_reserve); /* reserved allocations shouldn't fail */ - spin_unlock(&hugetlb_lock); - return NULL; + return page; } -/* hugetlb_extend_reservation() - * - * Ensure that at least 'atleast' hugepages are, and will remain, - * available to instantiate the first 'atleast' pages of the given - * inode. If the inode doesn't already have this many pages reserved - * or instantiated, set aside some hugepages in the reserved pool to - * satisfy later faults (or fail now if there aren't enough, rather - * than getting the SIGBUS later). +/* + * Increase the hugetlb pool such that it can accomodate a reservation + * of size 'delta'. */ -int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, - unsigned long atleast) +static int gather_surplus_pages(int delta) { - struct inode *inode = &info->vfs_inode; - unsigned long change_in_reserve = 0; - int ret = 0; - - spin_lock(&hugetlb_lock); - read_lock_irq(&inode->i_mapping->tree_lock); + struct list_head surplus_list; + struct page *page, *tmp; + int ret, i; + int needed, allocated; + + needed = (resv_huge_pages + delta) - free_huge_pages; + if (needed <= 0) { + resv_huge_pages += delta; + return 0; + } - if (info->prereserved_hpages >= atleast) - goto out; + allocated = 0; + INIT_LIST_HEAD(&surplus_list); - /* Because we always call this on shared mappings, none of the - * pages beyond info->prereserved_hpages can have been - * instantiated, so we need to reserve all of them now. */ - change_in_reserve = atleast - info->prereserved_hpages; + ret = -ENOMEM; +retry: + spin_unlock(&hugetlb_lock); + for (i = 0; i < needed; i++) { + page = alloc_buddy_huge_page(NULL, 0); + if (!page) { + /* + * We were not able to allocate enough pages to + * satisfy the entire reservation so we free what + * we've allocated so far. + */ + spin_lock(&hugetlb_lock); + needed = 0; + goto free; + } - if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) { - ret = -ENOMEM; - goto out; + list_add(&page->lru, &surplus_list); } + allocated += needed; - reserved_huge_pages += change_in_reserve; - info->prereserved_hpages = atleast; + /* + * After retaking hugetlb_lock, we need to recalculate 'needed' + * because either resv_huge_pages or free_huge_pages may have changed. + */ + spin_lock(&hugetlb_lock); + needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); + if (needed > 0) + goto retry; - out: - read_unlock_irq(&inode->i_mapping->tree_lock); - spin_unlock(&hugetlb_lock); + /* + * The surplus_list now contains _at_least_ the number of extra pages + * needed to accomodate the reservation. Add the appropriate number + * of pages to the hugetlb pool and free the extras back to the buddy + * allocator. Commit the entire reservation here to prevent another + * process from stealing the pages as they are added to the pool but + * before they are reserved. + */ + needed += allocated; + resv_huge_pages += delta; + ret = 0; +free: + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); + if ((--needed) >= 0) + enqueue_huge_page(page); + else { + /* + * The page has a reference count of zero already, so + * call free_huge_page directly instead of using + * put_page. This must be done with hugetlb_lock + * unlocked which is safe because free_huge_page takes + * hugetlb_lock before deciding how to free the page. + */ + spin_unlock(&hugetlb_lock); + free_huge_page(page); + spin_lock(&hugetlb_lock); + } + } return ret; } -/* hugetlb_truncate_reservation() - * - * This returns pages reserved for the given inode to the general free - * hugepage pool. If the inode has any pages prereserved, but not - * instantiated, beyond offset (atmost << HPAGE_SIZE), then release - * them. +/* + * When releasing a hugetlb pool reservation, any surplus pages that were + * allocated to satisfy the reservation must be explicitly freed if they were + * never used. */ -void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info, - unsigned long atmost) +static void return_unused_surplus_pages(unsigned long unused_resv_pages) { - struct inode *inode = &info->vfs_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long idx; - unsigned long change_in_reserve = 0; + static int nid = -1; struct page *page; + unsigned long nr_pages; - spin_lock(&hugetlb_lock); - read_lock_irq(&inode->i_mapping->tree_lock); + /* + * We want to release as many surplus pages as possible, spread + * evenly across all nodes. Iterate across all nodes until we + * can no longer free unreserved surplus pages. This occurs when + * the nodes with surplus pages have no free pages. + */ + unsigned long remaining_iterations = num_online_nodes(); - if (info->prereserved_hpages <= atmost) - goto out; + /* Uncommit the reservation */ + resv_huge_pages -= unused_resv_pages; - /* Count pages which were reserved, but not instantiated, and - * which we can now release. */ - for (idx = atmost; idx < info->prereserved_hpages; idx++) { - page = radix_tree_lookup(&mapping->page_tree, idx); - if (!page) - /* Pages which are already instantiated can't - * be unreserved (and in fact have already - * been removed from the reserved pool) */ - change_in_reserve++; + nr_pages = min(unused_resv_pages, surplus_huge_pages); + + while (remaining_iterations-- && nr_pages) { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + if (!surplus_huge_pages_node[nid]) + continue; + + if (!list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + update_and_free_page(page); + free_huge_pages--; + free_huge_pages_node[nid]--; + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + nr_pages--; + remaining_iterations = num_online_nodes(); + } } +} - BUG_ON(reserved_huge_pages < change_in_reserve); - reserved_huge_pages -= change_in_reserve; - info->prereserved_hpages = atmost; - out: - read_unlock_irq(&inode->i_mapping->tree_lock); +static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + + spin_lock(&hugetlb_lock); + page = dequeue_huge_page_vma(vma, addr); spin_unlock(&hugetlb_lock); + return page ? page : ERR_PTR(-VM_FAULT_OOM); +} + +static struct page *alloc_huge_page_private(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page = NULL; + + if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) + return ERR_PTR(-VM_FAULT_SIGBUS); + + spin_lock(&hugetlb_lock); + if (free_huge_pages > resv_huge_pages) + page = dequeue_huge_page_vma(vma, addr); + spin_unlock(&hugetlb_lock); + if (!page) { + page = alloc_buddy_huge_page(vma, addr); + if (!page) { + hugetlb_put_quota(vma->vm_file->f_mapping, 1); + return ERR_PTR(-VM_FAULT_OOM); + } + } + return page; +} + +static struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + struct address_space *mapping = vma->vm_file->f_mapping; + + if (vma->vm_flags & VM_MAYSHARE) + page = alloc_huge_page_shared(vma, addr); + else + page = alloc_huge_page_private(vma, addr); + + if (!IS_ERR(page)) { + set_page_refcounted(page); + set_page_private(page, (unsigned long) mapping); + } + return page; } static int __init hugetlb_init(void) @@ -262,6 +499,8 @@ static int __init hugetlb_init(void) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&hugepage_freelists[i]); + hugetlb_next_nid = first_node(node_online_map); + for (i = 0; i < max_huge_pages; ++i) { if (!alloc_fresh_huge_page()) break; @@ -280,38 +519,34 @@ static int __init hugetlb_setup(char *s) } __setup("hugepages=", hugetlb_setup); -#ifdef CONFIG_SYSCTL -static void update_and_free_page(struct page *page) +static unsigned int cpuset_mems_nr(unsigned int *array) { - int i; - nr_huge_pages--; - nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--; - for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { - page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | - 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | - 1 << PG_private | 1<< PG_writeback); - } - page[1].lru.next = NULL; - set_page_refcounted(page); - __free_pages(page, HUGETLB_PAGE_ORDER); + int node; + unsigned int nr = 0; + + for_each_node_mask(node, cpuset_current_mems_allowed) + nr += array[node]; + + return nr; } +#ifdef CONFIG_SYSCTL #ifdef CONFIG_HIGHMEM static void try_to_free_low(unsigned long count) { - int i, nid; + int i; + for (i = 0; i < MAX_NUMNODES; ++i) { struct page *page, *next; list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { + if (count >= nr_huge_pages) + return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(page); - nid = page_zone(page)->zone_pgdat->node_id; free_huge_pages--; - free_huge_pages_node[nid]--; - if (count >= nr_huge_pages) - return; + free_huge_pages_node[page_to_nid(page)]--; } } } @@ -321,25 +556,75 @@ static inline void try_to_free_low(unsigned long count) } #endif +#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) static unsigned long set_max_huge_pages(unsigned long count) { - while (count > nr_huge_pages) { - if (!alloc_fresh_huge_page()) - return nr_huge_pages; - } - if (count >= nr_huge_pages) - return nr_huge_pages; + unsigned long min_count, ret; + /* + * Increase the pool size + * First take pages out of surplus state. Then make up the + * remaining difference by allocating fresh huge pages. + * + * We might race with alloc_buddy_huge_page() here and be unable + * to convert a surplus huge page to a normal huge page. That is + * not critical, though, it just means the overall size of the + * pool might be one hugepage larger than it needs to be, but + * within all the constraints specified by the sysctls. + */ spin_lock(&hugetlb_lock); - try_to_free_low(count); - while (count < nr_huge_pages) { - struct page *page = dequeue_huge_page(NULL, 0); + while (surplus_huge_pages && count > persistent_huge_pages) { + if (!adjust_pool_surplus(-1)) + break; + } + + while (count > persistent_huge_pages) { + int ret; + /* + * If this allocation races such that we no longer need the + * page, free_huge_page will handle it by freeing the page + * and reducing the surplus. + */ + spin_unlock(&hugetlb_lock); + ret = alloc_fresh_huge_page(); + spin_lock(&hugetlb_lock); + if (!ret) + goto out; + + } + + /* + * Decrease the pool size + * First return free pages to the buddy allocator (being careful + * to keep enough around to satisfy reservations). Then place + * pages into surplus state as needed so the pool will shrink + * to the desired size as pages become free. + * + * By placing pages into the surplus state independent of the + * overcommit value, we are allowing the surplus pool size to + * exceed overcommit. There are few sane options here. Since + * alloc_buddy_huge_page() is checking the global counter, + * though, we'll note that we're not allowed to exceed surplus + * and won't grow the pool anywhere else. Not until one of the + * sysctls are changed, or the surplus pages go out of use. + */ + min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; + min_count = max(count, min_count); + try_to_free_low(min_count); + while (min_count < persistent_huge_pages) { + struct page *page = dequeue_huge_page(); if (!page) break; update_and_free_page(page); } + while (count < persistent_huge_pages) { + if (!adjust_pool_surplus(1)) + break; + } +out: + ret = persistent_huge_pages; spin_unlock(&hugetlb_lock); - return nr_huge_pages; + return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, @@ -350,6 +635,30 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write, max_huge_pages = set_max_huge_pages(max_huge_pages); return 0; } + +int hugetlb_treat_movable_handler(struct ctl_table *table, int write, + struct file *file, void __user *buffer, + size_t *length, loff_t *ppos) +{ + proc_dointvec(table, write, file, buffer, length, ppos); + if (hugepages_treat_as_movable) + htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; + else + htlb_alloc_mask = GFP_HIGHUSER; + return 0; +} + +int hugetlb_overcommit_handler(struct ctl_table *table, int write, + struct file *file, void __user *buffer, + size_t *length, loff_t *ppos) +{ + proc_doulongvec_minmax(table, write, file, buffer, length, ppos); + spin_lock(&hugetlb_lock); + nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; + spin_unlock(&hugetlb_lock); + return 0; +} + #endif /* CONFIG_SYSCTL */ int hugetlb_report_meminfo(char *buf) @@ -357,11 +666,13 @@ int hugetlb_report_meminfo(char *buf) return sprintf(buf, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" - "HugePages_Rsvd: %5lu\n" + "HugePages_Rsvd: %5lu\n" + "HugePages_Surp: %5lu\n" "Hugepagesize: %5lu kB\n", nr_huge_pages, free_huge_pages, - reserved_huge_pages, + resv_huge_pages, + surplus_huge_pages, HPAGE_SIZE/1024); } @@ -369,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf) { return sprintf(buf, "Node %d HugePages_Total: %5u\n" - "Node %d HugePages_Free: %5u\n", + "Node %d HugePages_Free: %5u\n" + "Node %d HugePages_Surp: %5u\n", nid, nr_huge_pages_node[nid], - nid, free_huge_pages_node[nid]); + nid, free_huge_pages_node[nid], + nid, surplus_huge_pages_node[nid]); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ @@ -386,15 +699,14 @@ unsigned long hugetlb_total_pages(void) * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ -static struct page *hugetlb_nopage(struct vm_area_struct *vma, - unsigned long address, int *unused) +static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { BUG(); - return NULL; + return 0; } struct vm_operations_struct hugetlb_vm_ops = { - .nopage = hugetlb_nopage, + .fault = hugetlb_vm_op_fault, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, @@ -420,9 +732,9 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, pte_t entry; entry = pte_mkwrite(pte_mkdirty(*ptep)); - ptep_set_access_flags(vma, address, ptep, entry, 1); - update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); + if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { + update_mmu_cache(vma, address, entry); + } } @@ -443,6 +755,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, dst_pte = huge_pte_alloc(dst, addr); if (!dst_pte) goto nomem; + + /* If the pagetables are shared don't copy or take references */ + if (dst_pte == src_pte) + continue; + spin_lock(&dst->page_table_lock); spin_lock(&src->page_table_lock); if (!pte_none(*src_pte)) { @@ -451,7 +768,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, entry = *src_pte; ptepage = pte_page(entry); get_page(ptepage); - add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); set_huge_pte_at(dst, addr, dst_pte, entry); } spin_unlock(&src->page_table_lock); @@ -463,40 +779,68 @@ nomem: return -ENOMEM; } -void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; pte_t pte; struct page *page; + struct page *tmp; + /* + * A page gathering list, protected by per file i_mmap_lock. The + * lock is used to avoid list corruption from multiple unmapping + * of the same page since we are using page->lru. + */ + LIST_HEAD(page_list); WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~HPAGE_MASK); BUG_ON(end & ~HPAGE_MASK); spin_lock(&mm->page_table_lock); - - /* Update high watermark before we lower rss */ - update_hiwater_rss(mm); - for (address = start; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; + if (huge_pmd_unshare(mm, &address, ptep)) + continue; + pte = huge_ptep_get_and_clear(mm, address, ptep); if (pte_none(pte)) continue; page = pte_page(pte); - put_page(page); - add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); + if (pte_dirty(pte)) + set_page_dirty(page); + list_add(&page->lru, &page_list); } - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); + list_for_each_entry_safe(page, tmp, &page_list, lru) { + list_del(&page->lru); + put_page(page); + } +} + +void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + /* + * It is undesirable to test vma->vm_file as it should be non-null + * for valid hugetlb area. However, vm_file will be NULL in the error + * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, + * do_mmap_pgoff() nullifies vma->vm_file before calling this function + * to clean up. Since no pte has actually been setup, it is safe to + * do nothing in this case. + */ + if (vma->vm_file) { + spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); + __unmap_hugepage_range(vma, start, end); + spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); + } } static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, @@ -512,19 +856,20 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, avoidcopy = (page_count(old_page) == 1); if (avoidcopy) { set_huge_ptep_writable(vma, address, ptep); - return VM_FAULT_MINOR; + return 0; } page_cache_get(old_page); new_page = alloc_huge_page(vma, address); - if (!new_page) { + if (IS_ERR(new_page)) { page_cache_release(old_page); - return VM_FAULT_OOM; + return -PTR_ERR(new_page); } spin_unlock(&mm->page_table_lock); - copy_huge_page(new_page, old_page, address); + copy_huge_page(new_page, old_page, address, vma); + __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & HPAGE_MASK); @@ -537,10 +882,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, } page_cache_release(new_page); page_cache_release(old_page); - return VM_FAULT_MINOR; + return 0; } -int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, +static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, int write_access) { int ret = VM_FAULT_SIGBUS; @@ -561,27 +906,32 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, retry: page = find_lock_page(mapping, idx); if (!page) { - if (hugetlb_get_quota(mapping)) + size = i_size_read(mapping->host) >> HPAGE_SHIFT; + if (idx >= size) goto out; page = alloc_huge_page(vma, address); - if (!page) { - hugetlb_put_quota(mapping); - ret = VM_FAULT_OOM; + if (IS_ERR(page)) { + ret = -PTR_ERR(page); goto out; } clear_huge_page(page, address); + __SetPageUptodate(page); if (vma->vm_flags & VM_SHARED) { int err; + struct inode *inode = mapping->host; err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) { put_page(page); - hugetlb_put_quota(mapping); if (err == -EEXIST) goto retry; goto out; } + + spin_lock(&inode->i_lock); + inode->i_blocks += BLOCKS_PER_HUGEPAGE; + spin_unlock(&inode->i_lock); } else lock_page(page); } @@ -591,11 +941,10 @@ retry: if (idx >= size) goto backout; - ret = VM_FAULT_MINOR; + ret = 0; if (!pte_none(*ptep)) goto backout; - add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); @@ -612,7 +961,6 @@ out: backout: spin_unlock(&mm->page_table_lock); - hugetlb_put_quota(mapping); unlock_page(page); put_page(page); goto out; @@ -643,7 +991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, return ret; } - ret = VM_FAULT_MINOR; + ret = 0; spin_lock(&mm->page_table_lock); /* Check for a racing update before calling hugetlb_cow */ @@ -658,12 +1006,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, - unsigned long *position, int *length, int i) + unsigned long *position, int *length, int i, + int write) { - unsigned long vpfn, vaddr = *position; + unsigned long pfn_offset; + unsigned long vaddr = *position; int remainder = *length; - vpfn = vaddr/PAGE_SIZE; spin_lock(&mm->page_table_lock); while (vaddr < vma->vm_end && remainder) { pte_t *pte; @@ -676,13 +1025,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, */ pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); - if (!pte || pte_none(*pte)) { + if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { int ret; spin_unlock(&mm->page_table_lock); - ret = hugetlb_fault(mm, vma, vaddr, 0); + ret = hugetlb_fault(mm, vma, vaddr, write); spin_lock(&mm->page_table_lock); - if (ret == VM_FAULT_MINOR) + if (!(ret & VM_FAULT_ERROR)) continue; remainder = 0; @@ -691,19 +1040,29 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } + pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; + page = pte_page(*pte); +same_page: if (pages) { - page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; get_page(page); - pages[i] = page; + pages[i] = page + pfn_offset; } if (vmas) vmas[i] = vma; vaddr += PAGE_SIZE; - ++vpfn; + ++pfn_offset; --remainder; ++i; + if (vaddr < vma->vm_end && remainder && + pfn_offset < HPAGE_SIZE/PAGE_SIZE) { + /* + * We use pfn_offset to avoid touching the pageframes + * of this compound page. + */ + goto same_page; + } } spin_unlock(&mm->page_table_lock); *length = remainder; @@ -723,20 +1082,215 @@ void hugetlb_change_protection(struct vm_area_struct *vma, BUG_ON(address >= end); flush_cache_range(vma, address, end); + spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); spin_lock(&mm->page_table_lock); for (; address < end; address += HPAGE_SIZE) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; + if (huge_pmd_unshare(mm, &address, ptep)) + continue; if (!pte_none(*ptep)) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); - lazy_mmu_prot_update(pte); } } spin_unlock(&mm->page_table_lock); + spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); flush_tlb_range(vma, start, end); } +struct file_region { + struct list_head link; + long from; + long to; +}; + +static long region_add(struct list_head *head, long f, long t) +{ + struct file_region *rg, *nrg, *trg; + + /* Locate the region we are either in or before. */ + list_for_each_entry(rg, head, link) + if (f <= rg->to) + break; + + /* Round our left edge to the current segment if it encloses us. */ + if (f > rg->from) + f = rg->from; + + /* Check for and consume any regions we now overlap with. */ + nrg = rg; + list_for_each_entry_safe(rg, trg, rg->link.prev, link) { + if (&rg->link == head) + break; + if (rg->from > t) + break; + + /* If this area reaches higher then extend our area to + * include it completely. If this is not the first area + * which we intend to reuse, free it. */ + if (rg->to > t) + t = rg->to; + if (rg != nrg) { + list_del(&rg->link); + kfree(rg); + } + } + nrg->from = f; + nrg->to = t; + return 0; +} + +static long region_chg(struct list_head *head, long f, long t) +{ + struct file_region *rg, *nrg; + long chg = 0; + + /* Locate the region we are before or in. */ + list_for_each_entry(rg, head, link) + if (f <= rg->to) + break; + + /* If we are below the current region then a new region is required. + * Subtle, allocate a new region at the position but make it zero + * size such that we can guarantee to record the reservation. */ + if (&rg->link == head || t < rg->from) { + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); + if (!nrg) + return -ENOMEM; + nrg->from = f; + nrg->to = f; + INIT_LIST_HEAD(&nrg->link); + list_add(&nrg->link, rg->link.prev); + + return t - f; + } + + /* Round our left edge to the current segment if it encloses us. */ + if (f > rg->from) + f = rg->from; + chg = t - f; + + /* Check for and consume any regions we now overlap with. */ + list_for_each_entry(rg, rg->link.prev, link) { + if (&rg->link == head) + break; + if (rg->from > t) + return chg; + + /* We overlap with this area, if it extends futher than + * us then we must extend ourselves. Account for its + * existing reservation. */ + if (rg->to > t) { + chg += rg->to - t; + t = rg->to; + } + chg -= rg->to - rg->from; + } + return chg; +} + +static long region_truncate(struct list_head *head, long end) +{ + struct file_region *rg, *trg; + long chg = 0; + + /* Locate the region we are either in or before. */ + list_for_each_entry(rg, head, link) + if (end <= rg->to) + break; + if (&rg->link == head) + return 0; + + /* If we are in the middle of a region then adjust it. */ + if (end > rg->from) { + chg = rg->to - end; + rg->to = end; + rg = list_entry(rg->link.next, typeof(*rg), link); + } + + /* Drop any remaining regions. */ + list_for_each_entry_safe(rg, trg, rg->link.prev, link) { + if (&rg->link == head) + break; + chg += rg->to - rg->from; + list_del(&rg->link); + kfree(rg); + } + return chg; +} + +static int hugetlb_acct_memory(long delta) +{ + int ret = -ENOMEM; + + spin_lock(&hugetlb_lock); + /* + * When cpuset is configured, it breaks the strict hugetlb page + * reservation as the accounting is done on a global variable. Such + * reservation is completely rubbish in the presence of cpuset because + * the reservation is not checked against page availability for the + * current cpuset. Application can still potentially OOM'ed by kernel + * with lack of free htlb page in cpuset that the task is in. + * Attempt to enforce strict accounting with cpuset is almost + * impossible (or too ugly) because cpuset is too fluid that + * task or memory node can be dynamically moved between cpusets. + * + * The change of semantics for shared hugetlb mapping with cpuset is + * undesirable. However, in order to preserve some of the semantics, + * we fall back to check against current free page availability as + * a best attempt and hopefully to minimize the impact of changing + * semantics that cpuset has. + */ + if (delta > 0) { + if (gather_surplus_pages(delta) < 0) + goto out; + + if (delta > cpuset_mems_nr(free_huge_pages_node)) { + return_unused_surplus_pages(delta); + goto out; + } + } + + ret = 0; + if (delta < 0) + return_unused_surplus_pages((unsigned long) -delta); + +out: + spin_unlock(&hugetlb_lock); + return ret; +} + +int hugetlb_reserve_pages(struct inode *inode, long from, long to) +{ + long ret, chg; + + chg = region_chg(&inode->i_mapping->private_list, from, to); + if (chg < 0) + return chg; + + if (hugetlb_get_quota(inode->i_mapping, chg)) + return -ENOSPC; + ret = hugetlb_acct_memory(chg); + if (ret < 0) { + hugetlb_put_quota(inode->i_mapping, chg); + return ret; + } + region_add(&inode->i_mapping->private_list, from, to); + return 0; +} + +void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) +{ + long chg = region_truncate(&inode->i_mapping->private_list, offset); + + spin_lock(&inode->i_lock); + inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; + spin_unlock(&inode->i_lock); + + hugetlb_put_quota(inode->i_mapping, (chg - freed)); + hugetlb_acct_memory(-(chg - freed)); +}