permit mempool_free(NULL)
[safe/jmp/linux-2.6] / mm / hugetlb.c
index eb7180d..acc0fb3 100644 (file)
@@ -66,7 +66,7 @@ static void enqueue_huge_page(struct page *page)
 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
                                unsigned long address)
 {
-       int nid = numa_node_id();
+       int nid;
        struct page *page = NULL;
        struct zonelist *zonelist = huge_zonelist(vma, address);
        struct zone **z;
@@ -101,13 +101,20 @@ static void free_huge_page(struct page *page)
 
 static int alloc_fresh_huge_page(void)
 {
-       static int nid = 0;
+       static int prev_nid;
        struct page *page;
-       page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
-                                       HUGETLB_PAGE_ORDER);
-       nid = next_node(nid, node_online_map);
+       static DEFINE_SPINLOCK(nid_lock);
+       int nid;
+
+       spin_lock(&nid_lock);
+       nid = next_node(prev_nid, node_online_map);
        if (nid == MAX_NUMNODES)
                nid = first_node(node_online_map);
+       prev_nid = nid;
+       spin_unlock(&nid_lock);
+
+       page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
+                                       HUGETLB_PAGE_ORDER);
        if (page) {
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
@@ -326,9 +333,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(*ptep));
-       ptep_set_access_flags(vma, address, ptep, entry, 1);
-       update_mmu_cache(vma, address, entry);
-       lazy_mmu_prot_update(entry);
+       if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+               update_mmu_cache(vma, address, entry);
+               lazy_mmu_prot_update(entry);
+       }
 }