1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 /* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
41 #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
43 struct vmw_user_context {
44 struct ttm_base_object base;
45 struct vmw_resource res;
48 struct vmw_user_surface {
49 struct ttm_base_object base;
50 struct vmw_surface srf;
53 struct vmw_user_dma_buffer {
54 struct ttm_base_object base;
55 struct vmw_dma_buffer dma;
58 struct vmw_bo_user_rep {
64 struct vmw_resource res;
68 struct vmw_user_stream {
69 struct ttm_base_object base;
70 struct vmw_stream stream;
73 static inline struct vmw_dma_buffer *
74 vmw_dma_buffer(struct ttm_buffer_object *bo)
76 return container_of(bo, struct vmw_dma_buffer, base);
79 static inline struct vmw_user_dma_buffer *
80 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
82 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
86 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
92 static void vmw_resource_release(struct kref *kref)
94 struct vmw_resource *res =
95 container_of(kref, struct vmw_resource, kref);
96 struct vmw_private *dev_priv = res->dev_priv;
98 idr_remove(res->idr, res->id);
99 write_unlock(&dev_priv->resource_lock);
101 if (likely(res->hw_destroy != NULL))
102 res->hw_destroy(res);
104 if (res->res_free != NULL)
109 write_lock(&dev_priv->resource_lock);
112 void vmw_resource_unreference(struct vmw_resource **p_res)
114 struct vmw_resource *res = *p_res;
115 struct vmw_private *dev_priv = res->dev_priv;
118 write_lock(&dev_priv->resource_lock);
119 kref_put(&res->kref, vmw_resource_release);
120 write_unlock(&dev_priv->resource_lock);
123 static int vmw_resource_init(struct vmw_private *dev_priv,
124 struct vmw_resource *res,
126 enum ttm_object_type obj_type,
127 void (*res_free) (struct vmw_resource *res))
131 kref_init(&res->kref);
132 res->hw_destroy = NULL;
133 res->res_free = res_free;
134 res->res_type = obj_type;
137 res->dev_priv = dev_priv;
140 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
143 write_lock(&dev_priv->resource_lock);
144 ret = idr_get_new_above(idr, res, 1, &res->id);
145 write_unlock(&dev_priv->resource_lock);
147 } while (ret == -EAGAIN);
153 * vmw_resource_activate
155 * @res: Pointer to the newly created resource
156 * @hw_destroy: Destroy function. NULL if none.
158 * Activate a resource after the hardware has been made aware of it.
159 * Set tye destroy function to @destroy. Typically this frees the
160 * resource and destroys the hardware resources associated with it.
161 * Activate basically means that the function vmw_resource_lookup will
165 static void vmw_resource_activate(struct vmw_resource *res,
166 void (*hw_destroy) (struct vmw_resource *))
168 struct vmw_private *dev_priv = res->dev_priv;
170 write_lock(&dev_priv->resource_lock);
172 res->hw_destroy = hw_destroy;
173 write_unlock(&dev_priv->resource_lock);
176 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
177 struct idr *idr, int id)
179 struct vmw_resource *res;
181 read_lock(&dev_priv->resource_lock);
182 res = idr_find(idr, id);
183 if (res && res->avail)
184 kref_get(&res->kref);
187 read_unlock(&dev_priv->resource_lock);
189 if (unlikely(res == NULL))
196 * Context management:
199 static void vmw_hw_context_destroy(struct vmw_resource *res)
202 struct vmw_private *dev_priv = res->dev_priv;
204 SVGA3dCmdHeader header;
205 SVGA3dCmdDestroyContext body;
206 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
208 if (unlikely(cmd == NULL)) {
209 DRM_ERROR("Failed reserving FIFO space for surface "
214 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
215 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
216 cmd->body.cid = cpu_to_le32(res->id);
218 vmw_fifo_commit(dev_priv, sizeof(*cmd));
221 static int vmw_context_init(struct vmw_private *dev_priv,
222 struct vmw_resource *res,
223 void (*res_free) (struct vmw_resource *res))
228 SVGA3dCmdHeader header;
229 SVGA3dCmdDefineContext body;
232 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
233 VMW_RES_CONTEXT, res_free);
235 if (unlikely(ret != 0)) {
236 if (res_free == NULL)
243 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
244 if (unlikely(cmd == NULL)) {
245 DRM_ERROR("Fifo reserve failed.\n");
246 vmw_resource_unreference(&res);
250 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
251 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
252 cmd->body.cid = cpu_to_le32(res->id);
254 vmw_fifo_commit(dev_priv, sizeof(*cmd));
255 vmw_resource_activate(res, vmw_hw_context_destroy);
259 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
261 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
264 if (unlikely(res == NULL))
267 ret = vmw_context_init(dev_priv, res, NULL);
268 return (ret == 0) ? res : NULL;
272 * User-space context management:
275 static void vmw_user_context_free(struct vmw_resource *res)
277 struct vmw_user_context *ctx =
278 container_of(res, struct vmw_user_context, res);
284 * This function is called when user space has no more references on the
285 * base object. It releases the base-object's reference on the resource object.
288 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
290 struct ttm_base_object *base = *p_base;
291 struct vmw_user_context *ctx =
292 container_of(base, struct vmw_user_context, base);
293 struct vmw_resource *res = &ctx->res;
296 vmw_resource_unreference(&res);
299 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file_priv)
302 struct vmw_private *dev_priv = vmw_priv(dev);
303 struct vmw_resource *res;
304 struct vmw_user_context *ctx;
305 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
306 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
309 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
310 if (unlikely(res == NULL))
313 if (res->res_free != &vmw_user_context_free) {
318 ctx = container_of(res, struct vmw_user_context, res);
319 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
324 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
326 vmw_resource_unreference(&res);
330 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file_priv)
333 struct vmw_private *dev_priv = vmw_priv(dev);
334 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
335 struct vmw_resource *res;
336 struct vmw_resource *tmp;
337 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
338 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
341 if (unlikely(ctx == NULL))
345 ctx->base.shareable = false;
346 ctx->base.tfile = NULL;
348 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
349 if (unlikely(ret != 0))
352 tmp = vmw_resource_reference(&ctx->res);
353 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
354 &vmw_user_context_base_release, NULL);
356 if (unlikely(ret != 0)) {
357 vmw_resource_unreference(&tmp);
363 vmw_resource_unreference(&res);
368 int vmw_context_check(struct vmw_private *dev_priv,
369 struct ttm_object_file *tfile,
372 struct vmw_resource *res;
375 read_lock(&dev_priv->resource_lock);
376 res = idr_find(&dev_priv->context_idr, id);
377 if (res && res->avail) {
378 struct vmw_user_context *ctx =
379 container_of(res, struct vmw_user_context, res);
380 if (ctx->base.tfile != tfile && !ctx->base.shareable)
384 read_unlock(&dev_priv->resource_lock);
391 * Surface management.
394 static void vmw_hw_surface_destroy(struct vmw_resource *res)
397 struct vmw_private *dev_priv = res->dev_priv;
399 SVGA3dCmdHeader header;
400 SVGA3dCmdDestroySurface body;
401 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
403 if (unlikely(cmd == NULL)) {
404 DRM_ERROR("Failed reserving FIFO space for surface "
409 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
410 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
411 cmd->body.sid = cpu_to_le32(res->id);
413 vmw_fifo_commit(dev_priv, sizeof(*cmd));
416 void vmw_surface_res_free(struct vmw_resource *res)
418 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
421 kfree(srf->snooper.image);
425 int vmw_surface_init(struct vmw_private *dev_priv,
426 struct vmw_surface *srf,
427 void (*res_free) (struct vmw_resource *res))
431 SVGA3dCmdHeader header;
432 SVGA3dCmdDefineSurface body;
434 SVGA3dSize *cmd_size;
435 struct vmw_resource *res = &srf->res;
436 struct drm_vmw_size *src_size;
441 BUG_ON(res_free == NULL);
442 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
443 VMW_RES_SURFACE, res_free);
445 if (unlikely(ret != 0)) {
450 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
451 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
453 cmd = vmw_fifo_reserve(dev_priv, submit_size);
454 if (unlikely(cmd == NULL)) {
455 DRM_ERROR("Fifo reserve failed for create surface.\n");
456 vmw_resource_unreference(&res);
460 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
461 cmd->header.size = cpu_to_le32(cmd_len);
462 cmd->body.sid = cpu_to_le32(res->id);
463 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
464 cmd->body.format = cpu_to_le32(srf->format);
465 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
466 cmd->body.face[i].numMipLevels =
467 cpu_to_le32(srf->mip_levels[i]);
471 cmd_size = (SVGA3dSize *) cmd;
472 src_size = srf->sizes;
474 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
475 cmd_size->width = cpu_to_le32(src_size->width);
476 cmd_size->height = cpu_to_le32(src_size->height);
477 cmd_size->depth = cpu_to_le32(src_size->depth);
480 vmw_fifo_commit(dev_priv, submit_size);
481 vmw_resource_activate(res, vmw_hw_surface_destroy);
485 static void vmw_user_surface_free(struct vmw_resource *res)
487 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
488 struct vmw_user_surface *user_srf =
489 container_of(srf, struct vmw_user_surface, srf);
492 kfree(srf->snooper.image);
496 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
497 struct ttm_object_file *tfile,
498 uint32_t handle, struct vmw_surface **out)
500 struct vmw_resource *res;
501 struct vmw_surface *srf;
502 struct vmw_user_surface *user_srf;
503 struct ttm_base_object *base;
506 base = ttm_base_object_lookup(tfile, handle);
507 if (unlikely(base == NULL))
510 if (unlikely(base->object_type != VMW_RES_SURFACE))
511 goto out_bad_resource;
513 user_srf = container_of(base, struct vmw_user_surface, base);
514 srf = &user_srf->srf;
517 read_lock(&dev_priv->resource_lock);
519 if (!res->avail || res->res_free != &vmw_user_surface_free) {
520 read_unlock(&dev_priv->resource_lock);
521 goto out_bad_resource;
524 kref_get(&res->kref);
525 read_unlock(&dev_priv->resource_lock);
531 ttm_base_object_unref(&base);
536 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
538 struct ttm_base_object *base = *p_base;
539 struct vmw_user_surface *user_srf =
540 container_of(base, struct vmw_user_surface, base);
541 struct vmw_resource *res = &user_srf->srf.res;
544 vmw_resource_unreference(&res);
547 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv)
550 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
551 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
553 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
556 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
557 struct drm_file *file_priv)
559 struct vmw_private *dev_priv = vmw_priv(dev);
560 struct vmw_user_surface *user_srf =
561 kmalloc(sizeof(*user_srf), GFP_KERNEL);
562 struct vmw_surface *srf;
563 struct vmw_resource *res;
564 struct vmw_resource *tmp;
565 union drm_vmw_surface_create_arg *arg =
566 (union drm_vmw_surface_create_arg *)data;
567 struct drm_vmw_surface_create_req *req = &arg->req;
568 struct drm_vmw_surface_arg *rep = &arg->rep;
569 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
570 struct drm_vmw_size __user *user_sizes;
574 if (unlikely(user_srf == NULL))
577 srf = &user_srf->srf;
580 srf->flags = req->flags;
581 srf->format = req->format;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585 srf->num_sizes += srf->mip_levels[i];
587 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588 DRM_VMW_MAX_MIP_LEVELS) {
593 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594 if (unlikely(srf->sizes == NULL)) {
599 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
602 ret = copy_from_user(srf->sizes, user_sizes,
603 srf->num_sizes * sizeof(*srf->sizes));
604 if (unlikely(ret != 0))
607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
614 srf->scanout = false;
618 srf->num_sizes == 1 &&
619 srf->sizes[0].width == 64 &&
620 srf->sizes[0].height == 64 &&
621 srf->format == SVGA3D_A8R8G8B8) {
623 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
624 /* clear the image */
625 if (srf->snooper.image) {
626 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
628 DRM_ERROR("Failed to allocate cursor_image\n");
633 srf->snooper.image = NULL;
635 srf->snooper.crtc = NULL;
637 user_srf->base.shareable = false;
638 user_srf->base.tfile = NULL;
641 * From this point, the generic resource management functions
642 * destroy the object on failure.
645 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
646 if (unlikely(ret != 0))
649 tmp = vmw_resource_reference(&srf->res);
650 ret = ttm_base_object_init(tfile, &user_srf->base,
651 req->shareable, VMW_RES_SURFACE,
652 &vmw_user_surface_base_release, NULL);
654 if (unlikely(ret != 0)) {
655 vmw_resource_unreference(&tmp);
656 vmw_resource_unreference(&res);
660 rep->sid = user_srf->base.hash.key;
661 if (rep->sid == SVGA3D_INVALID_ID)
662 DRM_ERROR("Created bad Surface ID.\n");
664 vmw_resource_unreference(&res);
673 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
674 struct drm_file *file_priv)
676 union drm_vmw_surface_reference_arg *arg =
677 (union drm_vmw_surface_reference_arg *)data;
678 struct drm_vmw_surface_arg *req = &arg->req;
679 struct drm_vmw_surface_create_req *rep = &arg->rep;
680 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
681 struct vmw_surface *srf;
682 struct vmw_user_surface *user_srf;
683 struct drm_vmw_size __user *user_sizes;
684 struct ttm_base_object *base;
687 base = ttm_base_object_lookup(tfile, req->sid);
688 if (unlikely(base == NULL)) {
689 DRM_ERROR("Could not find surface to reference.\n");
693 if (unlikely(base->object_type != VMW_RES_SURFACE))
694 goto out_bad_resource;
696 user_srf = container_of(base, struct vmw_user_surface, base);
697 srf = &user_srf->srf;
699 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
700 if (unlikely(ret != 0)) {
701 DRM_ERROR("Could not add a reference to a surface.\n");
702 goto out_no_reference;
705 rep->flags = srf->flags;
706 rep->format = srf->format;
707 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
708 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
712 ret = copy_to_user(user_sizes, srf->sizes,
713 srf->num_sizes * sizeof(*srf->sizes));
714 if (unlikely(ret != 0))
715 DRM_ERROR("copy_to_user failed %p %u\n",
716 user_sizes, srf->num_sizes);
719 ttm_base_object_unref(&base);
724 int vmw_surface_check(struct vmw_private *dev_priv,
725 struct ttm_object_file *tfile,
726 uint32_t handle, int *id)
728 struct ttm_base_object *base;
729 struct vmw_user_surface *user_srf;
733 base = ttm_base_object_lookup(tfile, handle);
734 if (unlikely(base == NULL))
737 if (unlikely(base->object_type != VMW_RES_SURFACE))
738 goto out_bad_surface;
740 user_srf = container_of(base, struct vmw_user_surface, base);
741 *id = user_srf->srf.res.id;
746 * FIXME: May deadlock here when called from the
747 * command parsing code.
750 ttm_base_object_unref(&base);
758 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
759 unsigned long num_pages)
761 static size_t bo_user_size = ~0;
763 size_t page_array_size =
764 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
766 if (unlikely(bo_user_size == ~0)) {
767 bo_user_size = glob->ttm_bo_extra_size +
768 ttm_round_pot(sizeof(struct vmw_dma_buffer));
771 return bo_user_size + page_array_size;
774 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
776 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
777 struct ttm_bo_global *glob = bo->glob;
778 struct vmw_private *dev_priv =
779 container_of(bo->bdev, struct vmw_private, bdev);
781 if (vmw_bo->gmr_bound) {
782 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
783 spin_lock(&glob->lru_lock);
784 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
785 spin_unlock(&glob->lru_lock);
786 vmw_bo->gmr_bound = false;
790 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
792 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
793 struct ttm_bo_global *glob = bo->glob;
795 vmw_dmabuf_gmr_unbind(bo);
796 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
800 int vmw_dmabuf_init(struct vmw_private *dev_priv,
801 struct vmw_dma_buffer *vmw_bo,
802 size_t size, struct ttm_placement *placement,
804 void (*bo_free) (struct ttm_buffer_object *bo))
806 struct ttm_bo_device *bdev = &dev_priv->bdev;
807 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
814 vmw_dmabuf_acc_size(bdev->glob,
815 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
817 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
818 if (unlikely(ret != 0)) {
819 /* we must free the bo here as
820 * ttm_buffer_object_init does so as well */
821 bo_free(&vmw_bo->base);
825 memset(vmw_bo, 0, sizeof(*vmw_bo));
827 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
828 INIT_LIST_HEAD(&vmw_bo->validate_list);
830 vmw_bo->gmr_bound = false;
832 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
833 ttm_bo_type_device, placement,
835 NULL, acc_size, bo_free);
839 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
841 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
842 struct ttm_bo_global *glob = bo->glob;
844 vmw_dmabuf_gmr_unbind(bo);
845 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
849 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
851 struct vmw_user_dma_buffer *vmw_user_bo;
852 struct ttm_base_object *base = *p_base;
853 struct ttm_buffer_object *bo;
857 if (unlikely(base == NULL))
860 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
861 bo = &vmw_user_bo->dma.base;
865 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv)
868 struct vmw_private *dev_priv = vmw_priv(dev);
869 union drm_vmw_alloc_dmabuf_arg *arg =
870 (union drm_vmw_alloc_dmabuf_arg *)data;
871 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
872 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
873 struct vmw_user_dma_buffer *vmw_user_bo;
874 struct ttm_buffer_object *tmp;
875 struct vmw_master *vmaster = vmw_master(file_priv->master);
878 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
879 if (unlikely(vmw_user_bo == NULL))
882 ret = ttm_read_lock(&vmaster->lock, true);
883 if (unlikely(ret != 0)) {
888 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
889 &vmw_vram_sys_placement, true,
890 &vmw_user_dmabuf_destroy);
891 if (unlikely(ret != 0))
894 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
895 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
899 &vmw_user_dmabuf_release, NULL);
900 if (unlikely(ret != 0)) {
903 rep->handle = vmw_user_bo->base.hash.key;
904 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
905 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
906 rep->cur_gmr_offset = 0;
910 ttm_read_unlock(&vmaster->lock);
915 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file_priv)
918 struct drm_vmw_unref_dmabuf_arg *arg =
919 (struct drm_vmw_unref_dmabuf_arg *)data;
921 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
926 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
927 uint32_t cur_validate_node)
929 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
931 if (likely(vmw_bo->on_validate_list))
932 return vmw_bo->cur_validate_node;
934 vmw_bo->cur_validate_node = cur_validate_node;
935 vmw_bo->on_validate_list = true;
937 return cur_validate_node;
940 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
942 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
944 vmw_bo->on_validate_list = false;
947 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
949 struct vmw_dma_buffer *vmw_bo;
951 if (bo->mem.mem_type == TTM_PL_VRAM)
952 return SVGA_GMR_FRAMEBUFFER;
954 vmw_bo = vmw_dma_buffer(bo);
956 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
959 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
961 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
962 vmw_bo->gmr_bound = true;
966 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
967 uint32_t handle, struct vmw_dma_buffer **out)
969 struct vmw_user_dma_buffer *vmw_user_bo;
970 struct ttm_base_object *base;
972 base = ttm_base_object_lookup(tfile, handle);
973 if (unlikely(base == NULL)) {
974 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
975 (unsigned long)handle);
979 if (unlikely(base->object_type != ttm_buffer_type)) {
980 ttm_base_object_unref(&base);
981 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
982 (unsigned long)handle);
986 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
987 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
988 ttm_base_object_unref(&base);
989 *out = &vmw_user_bo->dma;
995 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
996 * when we're out of ids, causing GMR space to be allocated
1000 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
1002 struct ttm_bo_global *glob = dev_priv->bdev.glob;
1007 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1010 spin_lock(&glob->lru_lock);
1011 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1012 spin_unlock(&glob->lru_lock);
1013 } while (ret == -EAGAIN);
1015 if (unlikely(ret != 0))
1018 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1019 spin_lock(&glob->lru_lock);
1020 ida_remove(&dev_priv->gmr_ida, id);
1021 spin_unlock(&glob->lru_lock);
1025 *p_id = (uint32_t) id;
1033 static void vmw_stream_destroy(struct vmw_resource *res)
1035 struct vmw_private *dev_priv = res->dev_priv;
1036 struct vmw_stream *stream;
1039 DRM_INFO("%s: unref\n", __func__);
1040 stream = container_of(res, struct vmw_stream, res);
1042 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1046 static int vmw_stream_init(struct vmw_private *dev_priv,
1047 struct vmw_stream *stream,
1048 void (*res_free) (struct vmw_resource *res))
1050 struct vmw_resource *res = &stream->res;
1053 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1054 VMW_RES_STREAM, res_free);
1056 if (unlikely(ret != 0)) {
1057 if (res_free == NULL)
1060 res_free(&stream->res);
1064 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1066 vmw_resource_unreference(&res);
1070 DRM_INFO("%s: claimed\n", __func__);
1072 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1077 * User-space context management:
1080 static void vmw_user_stream_free(struct vmw_resource *res)
1082 struct vmw_user_stream *stream =
1083 container_of(res, struct vmw_user_stream, stream.res);
1089 * This function is called when user space has no more references on the
1090 * base object. It releases the base-object's reference on the resource object.
1093 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1095 struct ttm_base_object *base = *p_base;
1096 struct vmw_user_stream *stream =
1097 container_of(base, struct vmw_user_stream, base);
1098 struct vmw_resource *res = &stream->stream.res;
1101 vmw_resource_unreference(&res);
1104 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1105 struct drm_file *file_priv)
1107 struct vmw_private *dev_priv = vmw_priv(dev);
1108 struct vmw_resource *res;
1109 struct vmw_user_stream *stream;
1110 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1111 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1114 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1115 if (unlikely(res == NULL))
1118 if (res->res_free != &vmw_user_stream_free) {
1123 stream = container_of(res, struct vmw_user_stream, stream.res);
1124 if (stream->base.tfile != tfile) {
1129 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1131 vmw_resource_unreference(&res);
1135 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1138 struct vmw_private *dev_priv = vmw_priv(dev);
1139 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1140 struct vmw_resource *res;
1141 struct vmw_resource *tmp;
1142 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1143 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1146 if (unlikely(stream == NULL))
1149 res = &stream->stream.res;
1150 stream->base.shareable = false;
1151 stream->base.tfile = NULL;
1153 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1154 if (unlikely(ret != 0))
1157 tmp = vmw_resource_reference(res);
1158 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1159 &vmw_user_stream_base_release, NULL);
1161 if (unlikely(ret != 0)) {
1162 vmw_resource_unreference(&tmp);
1166 arg->stream_id = res->id;
1168 vmw_resource_unreference(&res);
1172 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1173 struct ttm_object_file *tfile,
1174 uint32_t *inout_id, struct vmw_resource **out)
1176 struct vmw_user_stream *stream;
1177 struct vmw_resource *res;
1180 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1181 if (unlikely(res == NULL))
1184 if (res->res_free != &vmw_user_stream_free) {
1189 stream = container_of(res, struct vmw_user_stream, stream.res);
1190 if (stream->base.tfile != tfile) {
1195 *inout_id = stream->stream.stream_id;
1199 vmw_resource_unreference(&res);