include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_object.c
index d9b239b..ffce2c9 100644 (file)
@@ -30,6 +30,7 @@
  *    Dave Airlie
  */
 #include <linux/list.h>
+#include <linux/slab.h>
 #include <drm/drmP.h>
 #include "radeon_drm.h"
 #include "radeon.h"
@@ -56,23 +57,11 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
-static inline u32 radeon_ttm_flags_from_domain(u32 domain)
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 {
-       u32 flags = 0;
-
-       if (domain & RADEON_GEM_DOMAIN_VRAM) {
-               flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
-       }
-       if (domain & RADEON_GEM_DOMAIN_GTT) {
-               flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-       }
-       if (domain & RADEON_GEM_DOMAIN_CPU) {
-               flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
-       }
-       if (!flags) {
-               flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
-       }
-       return flags;
+       if (bo->destroy == &radeon_ttm_bo_destroy)
+               return true;
+       return false;
 }
 
 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
@@ -90,6 +79,8 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        if (domain & RADEON_GEM_DOMAIN_CPU)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (!c)
+               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
        rbo->placement.num_placement = c;
        rbo->placement.num_busy_placement = c;
 }
@@ -100,7 +91,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
 {
        struct radeon_bo *bo;
        enum ttm_bo_type type;
-       u32 flags;
        int r;
 
        if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -120,17 +110,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
 
-       flags = radeon_ttm_flags_from_domain(domain);
-retry:
-       r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
-                                       flags, 0, 0, true, NULL, size,
-                                       &radeon_ttm_bo_destroy);
+       radeon_ttm_placement_from_domain(bo, domain);
+       /* Kernel allocation are uninterruptible */
+       r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+                       &bo->placement, 0, 0, !kernel, NULL, size,
+                       &radeon_ttm_bo_destroy);
        if (unlikely(r != 0)) {
-               if (r == -ERESTART)
-                       goto retry;
-               /* ttm call radeon_ttm_object_object_destroy if error happen */
-               dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
-                       size, flags);
+               if (r != -ERESTARTSYS)
+                       dev_err(rdev->dev,
+                               "object_init failed for (%lu, 0x%08X)\n",
+                               size, domain);
                return r;
        }
        *bo_ptr = bo;
@@ -190,7 +179,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 {
        int r, i;
 
-       radeon_ttm_placement_from_domain(bo, domain);
        if (bo->pin_count) {
                bo->pin_count++;
                if (gpu_addr)
@@ -198,20 +186,18 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
                return 0;
        }
        radeon_ttm_placement_from_domain(bo, domain);
+       /* force to pin into visible video ram */
+       bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-retry:
-       r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, true, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
                        *gpu_addr = radeon_bo_gpu_offset(bo);
        }
-       if (unlikely(r != 0)) {
-               if (r == -ERESTART)
-                       goto retry;
+       if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p pin failed\n", bo);
-       }
        return r;
 }
 
@@ -228,22 +214,19 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-retry:
-       r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, true, false);
-       if (unlikely(r != 0)) {
-               if (r == -ERESTART)
-                       goto retry;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
-               return r;
-       }
-       return 0;
+       return r;
 }
 
 int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
-       if (rdev->flags & RADEON_IS_IGP) {
-               /* Useless to evict on IGP chips */
-               return 0;
+       /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+       if (0 && (rdev->flags & RADEON_IS_IGP)) {
+               if (rdev->mc.igp_sideport_enabled == false)
+                       /* Useless to evict on IGP chips */
+                       return 0;
        }
        return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 }
@@ -325,11 +308,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
        }
 }
 
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
-       struct radeon_fence *old_fence = NULL;
        int r;
 
        r = radeon_bo_list_reserve(head);
@@ -346,44 +328,34 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
                                radeon_ttm_placement_from_domain(bo,
                                                                lobj->rdomain);
                        }
-retry:
-                       r = ttm_buffer_object_validate(&bo->tbo,
-                                               &bo->placement,
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement,
                                                true, false);
-                       if (unlikely(r)) {
-                               if (r == -ERESTART)
-                                       goto retry;
+                       if (unlikely(r))
                                return r;
-                       }
                }
                lobj->gpu_offset = radeon_bo_gpu_offset(bo);
                lobj->tiling_flags = bo->tiling_flags;
-               if (fence) {
-                       old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-                       bo->tbo.sync_obj = radeon_fence_ref(fence);
-                       bo->tbo.sync_obj_arg = NULL;
-               }
-               if (old_fence) {
-                       radeon_fence_unref(&old_fence);
-               }
        }
        return 0;
 }
 
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
        struct radeon_bo_list *lobj;
-       struct radeon_fence *old_fence;
-
-       if (fence)
-               list_for_each_entry(lobj, head, list) {
-                       old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
-                       if (old_fence == fence) {
-                               lobj->bo->tbo.sync_obj = NULL;
-                               radeon_fence_unref(&old_fence);
-                       }
+       struct radeon_bo *bo;
+       struct radeon_fence *old_fence = NULL;
+
+       list_for_each_entry(lobj, head, list) {
+               bo = lobj->bo;
+               spin_lock(&bo->tbo.lock);
+               old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+               bo->tbo.sync_obj = radeon_fence_ref(fence);
+               bo->tbo.sync_obj_arg = NULL;
+               spin_unlock(&bo->tbo.lock);
+               if (old_fence) {
+                       radeon_fence_unref(&old_fence);
                }
-       radeon_bo_list_unreserve(head);
+       }
 }
 
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
@@ -392,7 +364,7 @@ int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
        return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
-static int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 {
        struct radeon_device *rdev = bo->rdev;
        struct radeon_surface_reg *reg;
@@ -516,14 +488,20 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 }
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-                               struct ttm_mem_reg *mem)
+                          struct ttm_mem_reg *mem)
 {
-       struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+       struct radeon_bo *rbo;
+       if (!radeon_ttm_bo_is_radeon_bo(bo))
+               return;
+       rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 1);
 }
 
 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-       struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+       struct radeon_bo *rbo;
+       if (!radeon_ttm_bo_is_radeon_bo(bo))
+               return;
+       rbo = container_of(bo, struct radeon_bo, tbo);
        radeon_bo_check_tiling(rbo, 0, 0);
 }