Merge branches 'x86/urgent' and 'x86/pat' into x86/core
authorIngo Molnar <mingo@elte.hu>
Thu, 26 Feb 2009 05:31:23 +0000 (06:31 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 26 Feb 2009 05:31:23 +0000 (06:31 +0100)
Conflicts:
arch/x86/include/asm/pat.h

arch/x86/include/asm/iomap.h
arch/x86/include/asm/pat.h
arch/x86/mm/iomap_32.c
arch/x86/mm/pat.c
include/linux/io-mapping.h

index 86af260..bd46495 100644 (file)
 #include <asm/tlbflush.h>
 
 int
-is_io_mapping_possible(resource_size_t base, unsigned long size);
+reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot);
+
+void
+free_io_memtype(u64 base, unsigned long size);
 
 void *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
index 9709fdf..b0e7005 100644 (file)
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
                unsigned long req_type, unsigned long *ret_type);
 extern int free_memtype(u64 start, u64 end);
 
+extern int kernel_map_sync_memtype(u64 base, unsigned long size,
+               unsigned long flag);
+
 #endif /* _ASM_X86_PAT_H */
index 6c2b1af..d5e2842 100644 (file)
 #include <linux/module.h>
 
 #ifdef CONFIG_X86_PAE
-int
+static int
 is_io_mapping_possible(resource_size_t base, unsigned long size)
 {
        return 1;
 }
 #else
-int
+static int
 is_io_mapping_possible(resource_size_t base, unsigned long size)
 {
        /* There is no way to map greater than 1 << 32 address without PAE */
@@ -38,6 +38,46 @@ is_io_mapping_possible(resource_size_t base, unsigned long size)
 }
 #endif
 
+int
+reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
+{
+       unsigned long ret_flag;
+
+       if (!is_io_mapping_possible(base, size))
+               goto out_err;
+
+       if (!pat_enabled) {
+               *prot = pgprot_noncached(PAGE_KERNEL);
+               return 0;
+       }
+
+       if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
+               goto out_err;
+
+       if (ret_flag == _PAGE_CACHE_WB)
+               goto out_free;
+
+       if (kernel_map_sync_memtype(base, size, ret_flag))
+               goto out_free;
+
+       *prot = __pgprot(__PAGE_KERNEL | ret_flag);
+       return 0;
+
+out_free:
+       free_memtype(base, base + size);
+out_err:
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(reserve_io_memtype_wc);
+
+void
+free_io_memtype(u64 base, unsigned long size)
+{
+       if (pat_enabled)
+               free_memtype(base, base + size);
+}
+EXPORT_SYMBOL_GPL(free_io_memtype);
+
 /* Map 'pfn' using fixed map 'type' and protections 'prot'
  */
 void *
index 05f9aef..fdfedb6 100644 (file)
@@ -634,6 +634,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
 }
 
 /*
+ * Change the memory type for the physial address range in kernel identity
+ * mapping space if that range is a part of identity map.
+ */
+int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+{
+       unsigned long id_sz;
+
+       if (!pat_enabled || base >= __pa(high_memory))
+               return 0;
+
+       id_sz = (__pa(high_memory) < base + size) ?
+                               __pa(high_memory) - base :
+                               size;
+
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+               printk(KERN_INFO
+                       "%s:%d ioremap_change_attr failed %s "
+                       "for %Lx-%Lx\n",
+                       current->comm, current->pid,
+                       cattr_name(flags),
+                       base, (unsigned long long)(base + size));
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/*
  * Internal interface to reserve a range of physical memory with prot.
  * Reserved non RAM regions only and after successful reserve_memtype,
  * this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +669,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                int strict_prot)
 {
        int is_ram = 0;
-       int id_sz, ret;
+       int ret;
        unsigned long flags;
        unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
 
@@ -679,23 +706,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                     flags);
        }
 
-       /* Need to keep identity mapping in sync */
-       if (paddr >= __pa(high_memory))
-               return 0;
-
-       id_sz = (__pa(high_memory) < paddr + size) ?
-                               __pa(high_memory) - paddr :
-                               size;
-
-       if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
+       if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
                free_memtype(paddr, paddr + size);
-               printk(KERN_ERR
-                       "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
-                       "for %Lx-%Lx\n",
-                       current->comm, current->pid,
-                       cattr_name(flags),
-                       (unsigned long long)paddr,
-                       (unsigned long long)(paddr + size));
                return -EINVAL;
        }
        return 0;
index cbc2f0c..f1ed66c 100644 (file)
@@ -49,8 +49,9 @@ static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
        struct io_mapping *iomap;
+       pgprot_t prot;
 
-       if (!is_io_mapping_possible(base, size))
+       if (!reserve_io_memtype_wc(base, size, &prot))
                return NULL;
 
        iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
@@ -59,13 +60,14 @@ io_mapping_create_wc(resource_size_t base, unsigned long size)
 
        iomap->base = base;
        iomap->size = size;
-       iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL));
+       iomap->prot = prot;
        return iomap;
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
+       free_io_memtype(mapping->base, mapping->size);
        kfree(mapping);
 }