mempolicy: disallow static or relative flags for local preferred mode
[safe/jmp/linux-2.6] / mm / hugetlb.c
index 4bced0d..93ea46a 100644 (file)
@@ -95,12 +95,14 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
        int nid;
        struct page *page = NULL;
        struct mempolicy *mpol;
+       nodemask_t *nodemask;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                       htlb_alloc_mask, &mpol);
+                                       htlb_alloc_mask, &mpol, &nodemask);
        struct zone *zone;
        struct zoneref *z;
 
-       for_each_zone_zonelist(zone, z, zonelist, MAX_NR_ZONES - 1) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                               MAX_NR_ZONES - 1, nodemask) {
                nid = zone_to_nid(zone);
                if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
                    !list_empty(&hugepage_freelists[nid])) {
@@ -370,11 +372,19 @@ retry:
        resv_huge_pages += delta;
        ret = 0;
 free:
+       /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               if ((--needed) < 0)
+                       break;
                list_del(&page->lru);
-               if ((--needed) >= 0)
-                       enqueue_huge_page(page);
-               else {
+               enqueue_huge_page(page);
+       }
+
+       /* Free unnecessary surplus pages to the buddy allocator */
+       if (!list_empty(&surplus_list)) {
+               spin_unlock(&hugetlb_lock);
+               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+                       list_del(&page->lru);
                        /*
                         * The page has a reference count of zero already, so
                         * call free_huge_page directly instead of using
@@ -382,10 +392,9 @@ free:
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
-                       spin_unlock(&hugetlb_lock);
                        free_huge_page(page);
-                       spin_lock(&hugetlb_lock);
                }
+               spin_lock(&hugetlb_lock);
        }
 
        return ret;