i915: Enable IMR passthrough of vblank events before enabling it in pipestat.
[safe/jmp/linux-2.6] / mm / hugetlb.c
index 41341c4..3863386 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sysctl.h>
 #include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
@@ -19,6 +20,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/io.h>
 
 #include <linux/hugetlb.h>
 #include "internal.h"
@@ -563,7 +565,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
                huge_page_order(h));
        if (page) {
                if (arch_prepare_hugepage(page)) {
-                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       __free_pages(page, huge_page_order(h));
                        return NULL;
                }
                prep_new_huge_page(h, page, nid);
@@ -663,6 +665,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
                                        __GFP_REPEAT|__GFP_NOWARN,
                                        huge_page_order(h));
 
+       if (page && arch_prepare_hugepage(page)) {
+               __free_pages(page, huge_page_order(h));
+               return NULL;
+       }
+
        spin_lock(&hugetlb_lock);
        if (page) {
                /*
@@ -1026,7 +1033,6 @@ static void __init report_hugepages(void)
        }
 }
 
-#ifdef CONFIG_SYSCTL
 #ifdef CONFIG_HIGHMEM
 static void try_to_free_low(struct hstate *h, unsigned long count)
 {
@@ -1282,7 +1288,12 @@ module_exit(hugetlb_exit);
 
 static int __init hugetlb_init(void)
 {
-       BUILD_BUG_ON(HPAGE_SHIFT == 0);
+       /* Some platform decide whether they support huge pages at boot
+        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+        * there is no such support
+        */
+       if (HPAGE_SHIFT == 0)
+               return 0;
 
        if (!size_to_hstate(default_hstate_size)) {
                default_hstate_size = HPAGE_SIZE;
@@ -1386,6 +1397,7 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
        return nr;
 }
 
+#ifdef CONFIG_SYSCTL
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
                           struct file *file, void __user *buffer,
                           size_t *length, loff_t *ppos)
@@ -1672,6 +1684,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
@@ -1713,6 +1726,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                list_del(&page->lru);
                put_page(page);
@@ -1928,6 +1942,18 @@ retry:
                        lock_page(page);
        }
 
+       /*
+        * If we are going to COW a private mapping later, we examine the
+        * pending reservations for this page now. This will ensure that
+        * any allocations necessary to record that reservation occur outside
+        * the spinlock.
+        */
+       if (write_access && !(vma->vm_flags & VM_SHARED))
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto backout_unlocked;
+               }
+
        spin_lock(&mm->page_table_lock);
        size = i_size_read(mapping->host) >> huge_page_shift(h);
        if (idx >= size)
@@ -1953,6 +1979,7 @@ out:
 
 backout:
        spin_unlock(&mm->page_table_lock);
+backout_unlocked:
        unlock_page(page);
        put_page(page);
        goto out;
@@ -1964,6 +1991,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t *ptep;
        pte_t entry;
        int ret;
+       struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
@@ -1980,25 +2008,57 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        entry = huge_ptep_get(ptep);
        if (huge_pte_none(entry)) {
                ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
-               mutex_unlock(&hugetlb_instantiation_mutex);
-               return ret;
+               goto out_mutex;
        }
 
        ret = 0;
 
+       /*
+        * If we are going to COW the mapping later, we examine the pending
+        * reservations for this page now. This will ensure that any
+        * allocations necessary to record that reservation occur outside the
+        * spinlock. For private mappings, we also lookup the pagecache
+        * page now as it is used to determine if a reservation has been
+        * consumed.
+        */
+       if (write_access && !pte_write(entry)) {
+               if (vma_needs_reservation(h, vma, address) < 0) {
+                       ret = VM_FAULT_OOM;
+                       goto out_mutex;
+               }
+
+               if (!(vma->vm_flags & VM_SHARED))
+                       pagecache_page = hugetlbfs_pagecache_page(h,
+                                                               vma, address);
+       }
+
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
-       if (likely(pte_same(entry, huge_ptep_get(ptep))))
-               if (write_access && !pte_write(entry)) {
-                       struct page *page;
-                       page = hugetlbfs_pagecache_page(h, vma, address);
-                       ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
-                       if (page) {
-                               unlock_page(page);
-                               put_page(page);
-                       }
+       if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+               goto out_page_table_lock;
+
+
+       if (write_access) {
+               if (!pte_write(entry)) {
+                       ret = hugetlb_cow(mm, vma, address, ptep, entry,
+                                                       pagecache_page);
+                       goto out_page_table_lock;
                }
+               entry = pte_mkdirty(entry);
+       }
+       entry = pte_mkyoung(entry);
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
+               update_mmu_cache(vma, address, entry);
+
+out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
+
+       if (pagecache_page) {
+               unlock_page(pagecache_page);
+               put_page(pagecache_page);
+       }
+
+out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
 
        return ret;