x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
Sat, 10 Jan 2009 00:13:11 +0000 (16:13 -0800)
committerIngo Molnar <mingo@elte.hu>
Tue, 13 Jan 2009 18:13:01 +0000 (19:13 +0100)
Impact: cleanup

Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer.
Subsequent patch changes the x86 PAT handling to return a compatible
memtype in pgprot_t, if what was requested cannot be allowed due to conflicts.
No fuctionality change in this patch.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/pat.c
include/asm-generic/pgtable.h
mm/memory.c

index 85cbd3c..f88ac80 100644 (file)
@@ -741,7 +741,7 @@ cleanup_ret:
  * Note that this function can be called with caller trying to map only a
  * subrange/page inside the vma.
  */
-int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
                        unsigned long pfn, unsigned long size)
 {
        int retval = 0;
@@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
        if (is_linear_pfn_mapping(vma)) {
                /* reserve the whole chunk starting from vm_pgoff */
                paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
-               return reserve_pfn_range(paddr, vma_size, prot);
+               return reserve_pfn_range(paddr, vma_size, *prot);
        }
 
        /* reserve page by page using pfn and size */
        base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
        for (i = 0; i < size; i += PAGE_SIZE) {
                paddr = base_paddr + i;
-               retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+               retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot);
                if (retval)
                        goto cleanup_ret;
        }
index 72ebe91..8e6d0ca 100644 (file)
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
  * track_pfn_vma_new is called when a _new_ pfn mapping is being established
  * for physical range indicated by pfn and size.
  */
-static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
                                        unsigned long pfn, unsigned long size)
 {
        return 0;
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
 {
 }
 #else
-extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
                                unsigned long pfn, unsigned long size);
 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
index d3ee2ea..22bfa7a 100644 (file)
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
        int ret;
+       pgprot_t pgprot = vma->vm_page_prot;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
                return -EINVAL;
 
-       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       ret = insert_pfn(vma, addr, pfn, pgprot);
 
        if (ret)
                untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
+       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
        if (err) {
                /*
                 * To indicate that track_pfn related cleanup is not