+ for (i = 0; i < needed; i++) {
+ page = alloc_buddy_huge_page(NULL, 0);
+ if (!page) {
+ /*
+ * We were not able to allocate enough pages to
+ * satisfy the entire reservation so we free what
+ * we've allocated so far.
+ */
+ spin_lock(&hugetlb_lock);
+ needed = 0;
+ goto free;
+ }
+
+ list_add(&page->lru, &surplus_list);
+ }
+ allocated += needed;
+
+ /*
+ * After retaking hugetlb_lock, we need to recalculate 'needed'
+ * because either resv_huge_pages or free_huge_pages may have changed.
+ */
+ spin_lock(&hugetlb_lock);
+ needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
+ if (needed > 0)
+ goto retry;
+
+ /*
+ * The surplus_list now contains _at_least_ the number of extra pages
+ * needed to accomodate the reservation. Add the appropriate number
+ * of pages to the hugetlb pool and free the extras back to the buddy
+ * allocator. Commit the entire reservation here to prevent another
+ * process from stealing the pages as they are added to the pool but
+ * before they are reserved.
+ */
+ needed += allocated;
+ resv_huge_pages += delta;
+ ret = 0;
+free:
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ list_del(&page->lru);
+ if ((--needed) >= 0)
+ enqueue_huge_page(page);
+ else {
+ /*
+ * The page has a reference count of zero already, so
+ * call free_huge_page directly instead of using
+ * put_page. This must be done with hugetlb_lock
+ * unlocked which is safe because free_huge_page takes
+ * hugetlb_lock before deciding how to free the page.
+ */
+ spin_unlock(&hugetlb_lock);
+ free_huge_page(page);
+ spin_lock(&hugetlb_lock);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * When releasing a hugetlb pool reservation, any surplus pages that were
+ * allocated to satisfy the reservation must be explicitly freed if they were
+ * never used.
+ */
+static void return_unused_surplus_pages(unsigned long unused_resv_pages)
+{
+ static int nid = -1;
+ struct page *page;
+ unsigned long nr_pages;
+
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes. Iterate across all nodes until we
+ * can no longer free unreserved surplus pages. This occurs when
+ * the nodes with surplus pages have no free pages.
+ */
+ unsigned long remaining_iterations = num_online_nodes();
+
+ /* Uncommit the reservation */
+ resv_huge_pages -= unused_resv_pages;
+
+ nr_pages = min(unused_resv_pages, surplus_huge_pages);
+
+ while (remaining_iterations-- && nr_pages) {
+ nid = next_node(nid, node_online_map);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(node_online_map);
+
+ if (!surplus_huge_pages_node[nid])
+ continue;
+
+ if (!list_empty(&hugepage_freelists[nid])) {
+ page = list_entry(hugepage_freelists[nid].next,
+ struct page, lru);
+ list_del(&page->lru);
+ update_and_free_page(page);
+ free_huge_pages--;
+ free_huge_pages_node[nid]--;
+ surplus_huge_pages--;
+ surplus_huge_pages_node[nid]--;
+ nr_pages--;
+ remaining_iterations = num_online_nodes();
+ }
+ }
+}
+
+
+static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+
+ spin_lock(&hugetlb_lock);
+ page = dequeue_huge_page_vma(vma, addr);
+ spin_unlock(&hugetlb_lock);
+ return page ? page : ERR_PTR(-VM_FAULT_OOM);
+}
+
+static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page = NULL;
+
+ if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+
+ spin_lock(&hugetlb_lock);
+ if (free_huge_pages > resv_huge_pages)
+ page = dequeue_huge_page_vma(vma, addr);
+ spin_unlock(&hugetlb_lock);
+ if (!page) {
+ page = alloc_buddy_huge_page(vma, addr);
+ if (!page) {
+ hugetlb_put_quota(vma->vm_file->f_mapping, 1);
+ return ERR_PTR(-VM_FAULT_OOM);
+ }
+ }
+ return page;
+}
+
+static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ struct address_space *mapping = vma->vm_file->f_mapping;
+
+ if (vma->vm_flags & VM_MAYSHARE)
+ page = alloc_huge_page_shared(vma, addr);
+ else
+ page = alloc_huge_page_private(vma, addr);
+
+ if (!IS_ERR(page)) {
+ set_page_refcounted(page);
+ set_page_private(page, (unsigned long) mapping);
+ }
+ return page;