drm/vmwgfx: Use bo_driver::move_notify to unbind GMRs.
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 13 Jan 2010 21:28:39 +0000 (22:28 +0100)
committerDave Airlie <airlied@redhat.com>
Thu, 14 Jan 2010 02:18:43 +0000 (12:18 +1000)
This was previously done explicitly for overlay- and fb buffers.
Now it's done for any buffer leaving the SYSTEM memory region.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c

index d6f2d2b..4be47d8 100644 (file)
@@ -172,6 +172,13 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
        return 0;
 }
 
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+                    struct ttm_mem_reg *new_mem)
+{
+       if (new_mem->mem_type != TTM_PL_SYSTEM)
+               vmw_dmabuf_gmr_unbind(bo);
+}
+
 /**
  * FIXME: We're using the old vmware polling method to sync.
  * Do this with fences instead.
@@ -225,5 +232,6 @@ struct ttm_bo_driver vmw_bo_driver = {
        .sync_obj_wait = vmw_sync_obj_wait,
        .sync_obj_flush = vmw_sync_obj_flush,
        .sync_obj_unref = vmw_sync_obj_unref,
-       .sync_obj_ref = vmw_sync_obj_ref
+       .sync_obj_ref = vmw_sync_obj_ref,
+       .move_notify = vmw_move_notify
 };
index e61bd85..4c9d6b2 100644 (file)
@@ -353,6 +353,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
                                       struct vmw_dma_buffer *bo);
 extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
                                struct vmw_dma_buffer *bo);
+extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
index 641dde7..4f4f643 100644 (file)
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
-       if (vmw_bo->gmr_bound) {
-               vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
-               spin_lock(&bo->glob->lru_lock);
-               ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
-               spin_unlock(&bo->glob->lru_lock);
-               vmw_bo->gmr_bound = NULL;
-       }
-
        ret = ttm_bo_validate(bo, &ne_placement, false, false);
        ttm_bo_unreserve(bo);
 err_unlock:
index bb6e6a0..5b6eabe 100644 (file)
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
                                  bool pin, bool interruptible)
 {
        struct ttm_buffer_object *bo = &buf->base;
-       struct ttm_bo_global *glob = bo->glob;
        struct ttm_placement *overlay_placement = &vmw_vram_placement;
        int ret;
 
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       if (buf->gmr_bound) {
-               vmw_gmr_unbind(dev_priv, buf->gmr_id);
-               spin_lock(&glob->lru_lock);
-               ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
-               spin_unlock(&glob->lru_lock);
-               buf->gmr_bound = NULL;
-       }
-
        if (pin)
                overlay_placement = &vmw_vram_ne_placement;
 
index 125c2f4..e087807 100644 (file)
@@ -757,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
        return bo_user_size + page_array_size;
 }
 
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
 {
        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
        struct ttm_bo_global *glob = bo->glob;
        struct vmw_private *dev_priv =
                container_of(bo->bdev, struct vmw_private, bdev);
 
-       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        if (vmw_bo->gmr_bound) {
                vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
                spin_lock(&glob->lru_lock);
                ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
                spin_unlock(&glob->lru_lock);
+               vmw_bo->gmr_bound = false;
        }
+}
+
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+       struct ttm_bo_global *glob = bo->glob;
+
+       vmw_dmabuf_gmr_unbind(bo);
+       ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_bo);
 }
 
@@ -816,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-       struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
        struct ttm_bo_global *glob = bo->glob;
-       struct vmw_private *dev_priv =
-               container_of(bo->bdev, struct vmw_private, bdev);
 
+       vmw_dmabuf_gmr_unbind(bo);
        ttm_mem_global_free(glob->mem_glob, bo->acc_size);
-       if (vmw_bo->gmr_bound) {
-               vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
-               spin_lock(&glob->lru_lock);
-               ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
-               spin_unlock(&glob->lru_lock);
-       }
        kfree(vmw_user_bo);
 }