drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes.
authorThomas Hellstrom <thellstrom@vmware.com>
Fri, 24 Jul 2009 07:57:34 +0000 (09:57 +0200)
committerDave Airlie <airlied@redhat.com>
Wed, 29 Jul 2009 05:56:13 +0000 (15:56 +1000)
For x86 this affected highmem pages only, since they were always kmapped
cache-coherent, and this is fixed using kmap_atomic_prot().

For other architectures that may not modify the linear kernel map we
resort to vmap() for now, since kmap_atomic_prot() generally uses the
linear kernel map for lowmem pages. This of course comes with a
performance impact and should be optimized when possible.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/ttm/ttm_bo_util.c

index 3e5d0c4..ce2e6f3 100644 (file)
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
 }
 
 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
-                               unsigned long page)
+                               unsigned long page,
+                               pgprot_t prot)
 {
        struct page *d = ttm_tt_get_page(ttm, page);
        void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
                return -ENOMEM;
 
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-       dst = kmap(d);
+
+#ifdef CONFIG_X86
+       dst = kmap_atomic_prot(d, KM_USER0, prot);
+#else
+       if (prot != PAGE_KERNEL)
+               dst = vmap(&d, 1, 0, prot);
+       else
+               dst = kmap(d);
+#endif
        if (!dst)
                return -ENOMEM;
 
        memcpy_fromio(dst, src, PAGE_SIZE);
-       kunmap(d);
+
+#ifdef CONFIG_X86
+       kunmap_atomic(dst, KM_USER0);
+#else
+       if (prot != PAGE_KERNEL)
+               vunmap(dst);
+       else
+               kunmap(d);
+#endif
+
        return 0;
 }
 
 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
-                               unsigned long page)
+                               unsigned long page,
+                               pgprot_t prot)
 {
        struct page *s = ttm_tt_get_page(ttm, page);
        void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
                return -ENOMEM;
 
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-       src = kmap(s);
+#ifdef CONFIG_X86
+       src = kmap_atomic_prot(s, KM_USER0, prot);
+#else
+       if (prot != PAGE_KERNEL)
+               src = vmap(&s, 1, 0, prot);
+       else
+               src = kmap(s);
+#endif
        if (!src)
                return -ENOMEM;
 
        memcpy_toio(dst, src, PAGE_SIZE);
-       kunmap(s);
+
+#ifdef CONFIG_X86
+       kunmap_atomic(src, KM_USER0);
+#else
+       if (prot != PAGE_KERNEL)
+               vunmap(src);
+       else
+               kunmap(s);
+#endif
+
        return 0;
 }
 
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        for (i = 0; i < new_mem->num_pages; ++i) {
                page = i * dir + add;
-               if (old_iomap == NULL)
-                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
-               else if (new_iomap == NULL)
-                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
-               else
+               if (old_iomap == NULL) {
+                       pgprot_t prot = ttm_io_prot(old_mem->placement,
+                                                   PAGE_KERNEL);
+                       ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+                                                  prot);
+               } else if (new_iomap == NULL) {
+                       pgprot_t prot = ttm_io_prot(new_mem->placement,
+                                                   PAGE_KERNEL);
+                       ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+                                                  prot);
+               } else
                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
                if (ret)
                        goto out1;