include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_object.c
index 983e8df..ffce2c9 100644 (file)
  *    Dave Airlie
  */
 #include <linux/list.h>
+#include <linux/slab.h>
 #include <drm/drmP.h>
 #include "radeon_drm.h"
 #include "radeon.h"
 
-struct radeon_object {
-       struct ttm_buffer_object        tobj;
-       struct list_head                list;
-       struct radeon_device            *rdev;
-       struct drm_gem_object           *gobj;
-       struct ttm_bo_kmap_obj          kmap;
-       unsigned                        pin_count;
-       uint64_t                        gpu_addr;
-       void                            *kptr;
-       bool                            is_iomem;
-};
 
 int radeon_ttm_init(struct radeon_device *rdev);
 void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 
 /*
  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
  * function are calling it.
  */
 
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
-       return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+       struct radeon_bo *bo;
+
+       bo = container_of(tbo, struct radeon_bo, tbo);
+       mutex_lock(&bo->rdev->gem.mutex);
+       list_del_init(&bo->list);
+       mutex_unlock(&bo->rdev->gem.mutex);
+       radeon_bo_clear_surface_reg(bo);
+       kfree(bo);
 }
 
-static void radeon_object_unreserve(struct radeon_object *robj)
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 {
-       ttm_bo_unreserve(&robj->tobj);
+       if (bo->destroy == &radeon_ttm_bo_destroy)
+               return true;
+       return false;
 }
 
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 {
-       struct radeon_object *robj;
-
-       robj = container_of(tobj, struct radeon_object, tobj);
-       list_del_init(&robj->list);
-       kfree(robj);
-}
-
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
-{
-       /* Default gpu address */
-       robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-       if (robj->tobj.mem.mm_node == NULL) {
-               return;
-       }
-       robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
-       switch (robj->tobj.mem.mem_type) {
-       case TTM_PL_VRAM:
-               robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
-               break;
-       case TTM_PL_TT:
-               robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
-               break;
-       default:
-               DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
-               robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-               return;
-       }
+       u32 c = 0;
+
+       rbo->placement.fpfn = 0;
+       rbo->placement.lpfn = 0;
+       rbo->placement.placement = rbo->placements;
+       rbo->placement.busy_placement = rbo->placements;
+       if (domain & RADEON_GEM_DOMAIN_VRAM)
+               rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+                                       TTM_PL_FLAG_VRAM;
+       if (domain & RADEON_GEM_DOMAIN_GTT)
+               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+       if (domain & RADEON_GEM_DOMAIN_CPU)
+               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (!c)
+               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       rbo->placement.num_placement = c;
+       rbo->placement.num_busy_placement = c;
 }
 
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+                       unsigned long size, bool kernel, u32 domain,
+                       struct radeon_bo **bo_ptr)
 {
-       uint32_t flags = 0;
-       if (domain & RADEON_GEM_DOMAIN_VRAM) {
-               flags |= TTM_PL_FLAG_VRAM;
-       }
-       if (domain & RADEON_GEM_DOMAIN_GTT) {
-               flags |= TTM_PL_FLAG_TT;
-       }
-       if (domain & RADEON_GEM_DOMAIN_CPU) {
-               flags |= TTM_PL_FLAG_SYSTEM;
-       }
-       if (!flags) {
-               flags |= TTM_PL_FLAG_SYSTEM;
-       }
-       return flags;
-}
-
-int radeon_object_create(struct radeon_device *rdev,
-                        struct drm_gem_object *gobj,
-                        unsigned long size,
-                        bool kernel,
-                        uint32_t domain,
-                        bool interruptible,
-                        struct radeon_object **robj_ptr)
-{
-       struct radeon_object *robj;
+       struct radeon_bo *bo;
        enum ttm_bo_type type;
-       uint32_t flags;
        int r;
 
        if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -134,257 +101,181 @@ int radeon_object_create(struct radeon_device *rdev,
        } else {
                type = ttm_bo_type_device;
        }
-       *robj_ptr = NULL;
-       robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
-       if (robj == NULL) {
+       *bo_ptr = NULL;
+       bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+       if (bo == NULL)
                return -ENOMEM;
-       }
-       robj->rdev = rdev;
-       robj->gobj = gobj;
-       INIT_LIST_HEAD(&robj->list);
-
-       flags = radeon_object_flags_from_domain(domain);
-       r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
-                                  0, 0, false, NULL, size,
-                                  &radeon_ttm_object_object_destroy);
+       bo->rdev = rdev;
+       bo->gobj = gobj;
+       bo->surface_reg = -1;
+       INIT_LIST_HEAD(&bo->list);
+
+       radeon_ttm_placement_from_domain(bo, domain);
+       /* Kernel allocation are uninterruptible */
+       r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+                       &bo->placement, 0, 0, !kernel, NULL, size,
+                       &radeon_ttm_bo_destroy);
        if (unlikely(r != 0)) {
-               /* ttm call radeon_ttm_object_object_destroy if error happen */
-               DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
-                         size, flags, 0);
+               if (r != -ERESTARTSYS)
+                       dev_err(rdev->dev,
+                               "object_init failed for (%lu, 0x%08X)\n",
+                               size, domain);
                return r;
        }
-       *robj_ptr = robj;
+       *bo_ptr = bo;
        if (gobj) {
-               list_add_tail(&robj->list, &rdev->gem.objects);
+               mutex_lock(&bo->rdev->gem.mutex);
+               list_add_tail(&bo->list, &rdev->gem.objects);
+               mutex_unlock(&bo->rdev->gem.mutex);
        }
        return 0;
 }
 
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 {
+       bool is_iomem;
        int r;
 
-       spin_lock(&robj->tobj.lock);
-       if (robj->kptr) {
+       if (bo->kptr) {
                if (ptr) {
-                       *ptr = robj->kptr;
+                       *ptr = bo->kptr;
                }
-               spin_unlock(&robj->tobj.lock);
                return 0;
        }
-       spin_unlock(&robj->tobj.lock);
-       r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
        if (r) {
                return r;
        }
-       spin_lock(&robj->tobj.lock);
-       robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
-       spin_unlock(&robj->tobj.lock);
+       bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
        if (ptr) {
-               *ptr = robj->kptr;
+               *ptr = bo->kptr;
        }
+       radeon_bo_check_tiling(bo, 0, 0);
        return 0;
 }
 
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
 {
-       spin_lock(&robj->tobj.lock);
-       if (robj->kptr == NULL) {
-               spin_unlock(&robj->tobj.lock);
+       if (bo->kptr == NULL)
                return;
-       }
-       robj->kptr = NULL;
-       spin_unlock(&robj->tobj.lock);
-       ttm_bo_kunmap(&robj->kmap);
+       bo->kptr = NULL;
+       radeon_bo_check_tiling(bo, 0, 0);
+       ttm_bo_kunmap(&bo->kmap);
 }
 
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
 {
-       struct ttm_buffer_object *tobj;
+       struct ttm_buffer_object *tbo;
 
-       if ((*robj) == NULL) {
+       if ((*bo) == NULL)
                return;
-       }
-       tobj = &((*robj)->tobj);
-       ttm_bo_unref(&tobj);
-       if (tobj == NULL) {
-               *robj = NULL;
-       }
-}
-
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
-{
-       *offset = robj->tobj.addr_space_offset;
-       return 0;
+       tbo = &((*bo)->tbo);
+       ttm_bo_unref(&tbo);
+       if (tbo == NULL)
+               *bo = NULL;
 }
 
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
-                     uint64_t *gpu_addr)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 {
-       uint32_t flags;
-       uint32_t tmp;
-       void *fbptr;
-       int r;
+       int r, i;
 
-       flags = radeon_object_flags_from_domain(domain);
-       spin_lock(&robj->tobj.lock);
-       if (robj->pin_count) {
-               robj->pin_count++;
-               if (gpu_addr != NULL) {
-                       *gpu_addr = robj->gpu_addr;
-               }
-               spin_unlock(&robj->tobj.lock);
+       if (bo->pin_count) {
+               bo->pin_count++;
+               if (gpu_addr)
+                       *gpu_addr = radeon_bo_gpu_offset(bo);
                return 0;
        }
-       spin_unlock(&robj->tobj.lock);
-       r = radeon_object_reserve(robj, false);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
-               return r;
-       }
-       if (robj->rdev->fbdev_robj == robj) {
-               mutex_lock(&robj->rdev->fbdev_info->lock);
-               radeon_object_kunmap(robj);
-       }
-       tmp = robj->tobj.mem.placement;
-       ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
-       robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
-       r = ttm_buffer_object_validate(&robj->tobj,
-                                      robj->tobj.proposed_placement,
-                                      false, false);
-       radeon_object_gpu_addr(robj);
-       if (gpu_addr != NULL) {
-               *gpu_addr = robj->gpu_addr;
-       }
-       robj->pin_count = 1;
-       if (unlikely(r != 0)) {
-               DRM_ERROR("radeon: failed to pin object.\n");
-       }
-       radeon_object_unreserve(robj);
-       if (robj->rdev->fbdev_robj == robj) {
-               if (!r) {
-                       r = radeon_object_kmap(robj, &fbptr);
-               }
-               if (!r) {
-                       robj->rdev->fbdev_info->screen_base = fbptr;
-                       robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
-               }
-               mutex_unlock(&robj->rdev->fbdev_info->lock);
-       }
+       radeon_ttm_placement_from_domain(bo, domain);
+       /* force to pin into visible video ram */
+       bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (likely(r == 0)) {
+               bo->pin_count = 1;
+               if (gpu_addr != NULL)
+                       *gpu_addr = radeon_bo_gpu_offset(bo);
+       }
+       if (unlikely(r != 0))
+               dev_err(bo->rdev->dev, "%p pin failed\n", bo);
        return r;
 }
 
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
 {
-       uint32_t flags;
-       void *fbptr;
-       int r;
+       int r, i;
 
-       spin_lock(&robj->tobj.lock);
-       if (!robj->pin_count) {
-               spin_unlock(&robj->tobj.lock);
-               printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
-               return;
-       }
-       robj->pin_count--;
-       if (robj->pin_count) {
-               spin_unlock(&robj->tobj.lock);
-               return;
-       }
-       spin_unlock(&robj->tobj.lock);
-       r = radeon_object_reserve(robj, false);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
-               return;
-       }
-       if (robj->rdev->fbdev_robj == robj) {
-               mutex_lock(&robj->rdev->fbdev_info->lock);
-               radeon_object_kunmap(robj);
-       }
-       flags = robj->tobj.mem.placement;
-       robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_buffer_object_validate(&robj->tobj,
-                                      robj->tobj.proposed_placement,
-                                      false, false);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("radeon: failed to unpin buffer.\n");
-       }
-       radeon_object_unreserve(robj);
-       if (robj->rdev->fbdev_robj == robj) {
-               if (!r) {
-                       r = radeon_object_kmap(robj, &fbptr);
-               }
-               if (!r) {
-                       robj->rdev->fbdev_info->screen_base = fbptr;
-                       robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
-               }
-               mutex_unlock(&robj->rdev->fbdev_info->lock);
-       }
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
-       int r = 0;
-
-       /* FIXME: should use block reservation instead */
-       r = radeon_object_reserve(robj, true);
-       if (unlikely(r != 0)) {
-               DRM_ERROR("radeon: failed to reserve object for waiting.\n");
-               return r;
-       }
-       spin_lock(&robj->tobj.lock);
-       if (robj->tobj.sync_obj) {
-               r = ttm_bo_wait(&robj->tobj, true, false, false);
+       if (!bo->pin_count) {
+               dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+               return 0;
        }
-       spin_unlock(&robj->tobj.lock);
-       radeon_object_unreserve(robj);
+       bo->pin_count--;
+       if (bo->pin_count)
+               return 0;
+       for (i = 0; i < bo->placement.num_placement; i++)
+               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       if (unlikely(r != 0))
+               dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
 }
 
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
-       if (rdev->flags & RADEON_IS_IGP) {
-               /* Useless to evict on IGP chips */
-               return 0;
+       /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+       if (0 && (rdev->flags & RADEON_IS_IGP)) {
+               if (rdev->mc.igp_sideport_enabled == false)
+                       /* Useless to evict on IGP chips */
+                       return 0;
        }
        return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 }
 
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
 {
-       struct radeon_object *robj, *n;
+       struct radeon_bo *bo, *n;
        struct drm_gem_object *gobj;
 
        if (list_empty(&rdev->gem.objects)) {
                return;
        }
-       DRM_ERROR("Userspace still has active objects !\n");
-       list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+       dev_err(rdev->dev, "Userspace still has active objects !\n");
+       list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
                mutex_lock(&rdev->ddev->struct_mutex);
-               gobj = robj->gobj;
-               DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
-                         gobj, robj, (unsigned long)gobj->size,
-                         *((unsigned long *)&gobj->refcount));
-               list_del_init(&robj->list);
-               radeon_object_unref(&robj);
+               gobj = bo->gobj;
+               dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+                       gobj, bo, (unsigned long)gobj->size,
+                       *((unsigned long *)&gobj->refcount));
+               mutex_lock(&bo->rdev->gem.mutex);
+               list_del_init(&bo->list);
+               mutex_unlock(&bo->rdev->gem.mutex);
+               radeon_bo_unref(&bo);
                gobj->driver_private = NULL;
                drm_gem_object_unreference(gobj);
                mutex_unlock(&rdev->ddev->struct_mutex);
        }
 }
 
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
 {
+       /* Add an MTRR for the VRAM */
+       rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+                       MTRR_TYPE_WRCOMB, 1);
+       DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+               rdev->mc.mc_vram_size >> 20,
+               (unsigned long long)rdev->mc.aper_size >> 20);
+       DRM_INFO("RAM width %dbits %cDR\n",
+                       rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
        return radeon_ttm_init(rdev);
 }
 
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
 {
        radeon_ttm_fini(rdev);
 }
 
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
-                                  struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+                               struct list_head *head)
 {
        if (lobj->wdomain) {
                list_add(&lobj->list, head);
@@ -393,119 +284,224 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
        }
 }
 
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
 {
-       struct radeon_object_list *lobj;
-       struct list_head *i;
+       struct radeon_bo_list *lobj;
        int r;
 
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
-               if (!lobj->robj->pin_count) {
-                       r = radeon_object_reserve(lobj->robj, true);
-                       if (unlikely(r != 0)) {
-                               DRM_ERROR("radeon: failed to reserve object.\n");
-                               return r;
-                       }
-               } else {
-               }
+       list_for_each_entry(lobj, head, list){
+               r = radeon_bo_reserve(lobj->bo, false);
+               if (unlikely(r != 0))
+                       return r;
        }
        return 0;
 }
 
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
 {
-       struct radeon_object_list *lobj;
-       struct list_head *i;
-
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
-               if (!lobj->robj->pin_count) {
-                       radeon_object_unreserve(lobj->robj);
-               } else {
-               }
+       struct radeon_bo_list *lobj;
+
+       list_for_each_entry(lobj, head, list) {
+               /* only unreserve object we successfully reserved */
+               if (radeon_bo_is_reserved(lobj->bo))
+                       radeon_bo_unreserve(lobj->bo);
        }
 }
 
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
-       struct radeon_object_list *lobj;
-       struct radeon_object *robj;
-       struct radeon_fence *old_fence = NULL;
-       struct list_head *i;
-       uint32_t flags;
+       struct radeon_bo_list *lobj;
+       struct radeon_bo *bo;
        int r;
 
-       r = radeon_object_list_reserve(head);
+       r = radeon_bo_list_reserve(head);
        if (unlikely(r != 0)) {
-               radeon_object_list_unreserve(head);
                return r;
        }
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
-               robj = lobj->robj;
-               if (lobj->wdomain) {
-                       flags = radeon_object_flags_from_domain(lobj->wdomain);
-                       flags |= TTM_PL_FLAG_TT;
-               } else {
-                       flags = radeon_object_flags_from_domain(lobj->rdomain);
-                       flags |= TTM_PL_FLAG_TT;
-                       flags |= TTM_PL_FLAG_VRAM;
-               }
-               if (!robj->pin_count) {
-                       robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
-                       r = ttm_buffer_object_validate(&robj->tobj,
-                                                      robj->tobj.proposed_placement,
-                                                      true, false);
-                       if (unlikely(r)) {
-                               radeon_object_list_unreserve(head);
-                               DRM_ERROR("radeon: failed to validate.\n");
-                               return r;
+       list_for_each_entry(lobj, head, list) {
+               bo = lobj->bo;
+               if (!bo->pin_count) {
+                       if (lobj->wdomain) {
+                               radeon_ttm_placement_from_domain(bo,
+                                                               lobj->wdomain);
+                       } else {
+                               radeon_ttm_placement_from_domain(bo,
+                                                               lobj->rdomain);
                        }
-                       radeon_object_gpu_addr(robj);
-               }
-               lobj->gpu_offset = robj->gpu_addr;
-               if (fence) {
-                       old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
-                       robj->tobj.sync_obj = radeon_fence_ref(fence);
-                       robj->tobj.sync_obj_arg = NULL;
-               }
-               if (old_fence) {
-                       radeon_fence_unref(&old_fence);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement,
+                                               true, false);
+                       if (unlikely(r))
+                               return r;
                }
+               lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+               lobj->tiling_flags = bo->tiling_flags;
        }
        return 0;
 }
 
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
-       struct radeon_object_list *lobj;
+       struct radeon_bo_list *lobj;
+       struct radeon_bo *bo;
        struct radeon_fence *old_fence = NULL;
-       struct list_head *i;
 
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
-               old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
-               lobj->robj->tobj.sync_obj = NULL;
+       list_for_each_entry(lobj, head, list) {
+               bo = lobj->bo;
+               spin_lock(&bo->tbo.lock);
+               old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+               bo->tbo.sync_obj = radeon_fence_ref(fence);
+               bo->tbo.sync_obj_arg = NULL;
+               spin_unlock(&bo->tbo.lock);
                if (old_fence) {
                        radeon_fence_unref(&old_fence);
                }
        }
-       radeon_object_list_unreserve(head);
 }
 
-void radeon_object_list_clean(struct list_head *head)
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+                            struct vm_area_struct *vma)
 {
-       radeon_object_list_unreserve(head);
+       return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
-                            struct vm_area_struct *vma)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+{
+       struct radeon_device *rdev = bo->rdev;
+       struct radeon_surface_reg *reg;
+       struct radeon_bo *old_object;
+       int steal;
+       int i;
+
+       BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+       if (!bo->tiling_flags)
+               return 0;
+
+       if (bo->surface_reg >= 0) {
+               reg = &rdev->surface_regs[bo->surface_reg];
+               i = bo->surface_reg;
+               goto out;
+       }
+
+       steal = -1;
+       for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+
+               reg = &rdev->surface_regs[i];
+               if (!reg->bo)
+                       break;
+
+               old_object = reg->bo;
+               if (old_object->pin_count == 0)
+                       steal = i;
+       }
+
+       /* if we are all out */
+       if (i == RADEON_GEM_MAX_SURFACES) {
+               if (steal == -1)
+                       return -ENOMEM;
+               /* find someone with a surface reg and nuke their BO */
+               reg = &rdev->surface_regs[steal];
+               old_object = reg->bo;
+               /* blow away the mapping */
+               DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+               ttm_bo_unmap_virtual(&old_object->tbo);
+               old_object->surface_reg = -1;
+               i = steal;
+       }
+
+       bo->surface_reg = i;
+       reg->bo = bo;
+
+out:
+       radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+                              bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+                              bo->tbo.num_pages << PAGE_SHIFT);
+       return 0;
+}
+
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+{
+       struct radeon_device *rdev = bo->rdev;
+       struct radeon_surface_reg *reg;
+
+       if (bo->surface_reg == -1)
+               return;
+
+       reg = &rdev->surface_regs[bo->surface_reg];
+       radeon_clear_surface_reg(rdev, bo->surface_reg);
+
+       reg->bo = NULL;
+       bo->surface_reg = -1;
+}
+
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+                               uint32_t tiling_flags, uint32_t pitch)
+{
+       int r;
+
+       r = radeon_bo_reserve(bo, false);
+       if (unlikely(r != 0))
+               return r;
+       bo->tiling_flags = tiling_flags;
+       bo->pitch = pitch;
+       radeon_bo_unreserve(bo);
+       return 0;
+}
+
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+                               uint32_t *tiling_flags,
+                               uint32_t *pitch)
 {
-       return ttm_fbdev_mmap(vma, &robj->tobj);
+       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       if (tiling_flags)
+               *tiling_flags = bo->tiling_flags;
+       if (pitch)
+               *pitch = bo->pitch;
 }
 
-unsigned long radeon_object_size(struct radeon_object *robj)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+                               bool force_drop)
 {
-       return robj->tobj.num_pages << PAGE_SHIFT;
+       BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+       if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+               return 0;
+
+       if (force_drop) {
+               radeon_bo_clear_surface_reg(bo);
+               return 0;
+       }
+
+       if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+               if (!has_moved)
+                       return 0;
+
+               if (bo->surface_reg >= 0)
+                       radeon_bo_clear_surface_reg(bo);
+               return 0;
+       }
+
+       if ((bo->surface_reg >= 0) && !has_moved)
+               return 0;
+
+       return radeon_bo_get_surface_reg(bo);
+}
+
+void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+                          struct ttm_mem_reg *mem)
+{
+       struct radeon_bo *rbo;
+       if (!radeon_ttm_bo_is_radeon_bo(bo))
+               return;
+       rbo = container_of(bo, struct radeon_bo, tbo);
+       radeon_bo_check_tiling(rbo, 0, 1);
+}
+
+void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       struct radeon_bo *rbo;
+       if (!radeon_ttm_bo_is_radeon_bo(bo))
+               return;
+       rbo = container_of(bo, struct radeon_bo, tbo);
+       radeon_bo_check_tiling(rbo, 0, 0);
 }