1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
43 struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
48 struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
53 struct vmw_bo_user_rep {
59 struct vmw_resource res;
63 struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
71 return container_of(bo, struct vmw_dma_buffer, base);
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87 static void vmw_resource_release(struct kref *kref)
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
96 if (likely(res->hw_destroy != NULL))
99 if (res->res_free != NULL)
104 write_lock(&dev_priv->resource_lock);
107 void vmw_resource_unreference(struct vmw_resource **p_res)
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
132 res->dev_priv = dev_priv;
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
142 } while (ret == -EAGAIN);
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
160 static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
163 struct vmw_private *dev_priv = res->dev_priv;
165 write_lock(&dev_priv->resource_lock);
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
174 struct vmw_resource *res;
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
182 read_unlock(&dev_priv->resource_lock);
184 if (unlikely(res == NULL))
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
197 struct vmw_private *dev_priv = res->dev_priv;
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 vmw_3d_resource_dec(dev_priv);
217 static int vmw_context_init(struct vmw_private *dev_priv,
218 struct vmw_resource *res,
219 void (*res_free) (struct vmw_resource *res))
224 SVGA3dCmdHeader header;
225 SVGA3dCmdDefineContext body;
228 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
229 VMW_RES_CONTEXT, res_free);
231 if (unlikely(ret != 0)) {
232 if (res_free == NULL)
239 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
240 if (unlikely(cmd == NULL)) {
241 DRM_ERROR("Fifo reserve failed.\n");
242 vmw_resource_unreference(&res);
246 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
247 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
248 cmd->body.cid = cpu_to_le32(res->id);
250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 (void) vmw_3d_resource_inc(dev_priv);
252 vmw_resource_activate(res, vmw_hw_context_destroy);
256 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
258 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
261 if (unlikely(res == NULL))
264 ret = vmw_context_init(dev_priv, res, NULL);
265 return (ret == 0) ? res : NULL;
269 * User-space context management:
272 static void vmw_user_context_free(struct vmw_resource *res)
274 struct vmw_user_context *ctx =
275 container_of(res, struct vmw_user_context, res);
281 * This function is called when user space has no more references on the
282 * base object. It releases the base-object's reference on the resource object.
285 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
287 struct ttm_base_object *base = *p_base;
288 struct vmw_user_context *ctx =
289 container_of(base, struct vmw_user_context, base);
290 struct vmw_resource *res = &ctx->res;
293 vmw_resource_unreference(&res);
296 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *file_priv)
299 struct vmw_private *dev_priv = vmw_priv(dev);
300 struct vmw_resource *res;
301 struct vmw_user_context *ctx;
302 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
303 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
306 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
307 if (unlikely(res == NULL))
310 if (res->res_free != &vmw_user_context_free) {
315 ctx = container_of(res, struct vmw_user_context, res);
316 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
321 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
323 vmw_resource_unreference(&res);
327 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv)
330 struct vmw_private *dev_priv = vmw_priv(dev);
331 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
332 struct vmw_resource *res;
333 struct vmw_resource *tmp;
334 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
335 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
338 if (unlikely(ctx == NULL))
342 ctx->base.shareable = false;
343 ctx->base.tfile = NULL;
345 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
346 if (unlikely(ret != 0))
349 tmp = vmw_resource_reference(&ctx->res);
350 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
351 &vmw_user_context_base_release, NULL);
353 if (unlikely(ret != 0)) {
354 vmw_resource_unreference(&tmp);
360 vmw_resource_unreference(&res);
365 int vmw_context_check(struct vmw_private *dev_priv,
366 struct ttm_object_file *tfile,
369 struct vmw_resource *res;
372 read_lock(&dev_priv->resource_lock);
373 res = idr_find(&dev_priv->context_idr, id);
374 if (res && res->avail) {
375 struct vmw_user_context *ctx =
376 container_of(res, struct vmw_user_context, res);
377 if (ctx->base.tfile != tfile && !ctx->base.shareable)
381 read_unlock(&dev_priv->resource_lock);
388 * Surface management.
391 static void vmw_hw_surface_destroy(struct vmw_resource *res)
394 struct vmw_private *dev_priv = res->dev_priv;
396 SVGA3dCmdHeader header;
397 SVGA3dCmdDestroySurface body;
398 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
400 if (unlikely(cmd == NULL)) {
401 DRM_ERROR("Failed reserving FIFO space for surface "
406 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
407 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
408 cmd->body.sid = cpu_to_le32(res->id);
410 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 vmw_3d_resource_dec(dev_priv);
414 void vmw_surface_res_free(struct vmw_resource *res)
416 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
419 kfree(srf->snooper.image);
423 int vmw_surface_init(struct vmw_private *dev_priv,
424 struct vmw_surface *srf,
425 void (*res_free) (struct vmw_resource *res))
429 SVGA3dCmdHeader header;
430 SVGA3dCmdDefineSurface body;
432 SVGA3dSize *cmd_size;
433 struct vmw_resource *res = &srf->res;
434 struct drm_vmw_size *src_size;
439 BUG_ON(res_free == NULL);
440 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
441 VMW_RES_SURFACE, res_free);
443 if (unlikely(ret != 0)) {
448 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
449 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
451 cmd = vmw_fifo_reserve(dev_priv, submit_size);
452 if (unlikely(cmd == NULL)) {
453 DRM_ERROR("Fifo reserve failed for create surface.\n");
454 vmw_resource_unreference(&res);
458 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
459 cmd->header.size = cpu_to_le32(cmd_len);
460 cmd->body.sid = cpu_to_le32(res->id);
461 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
462 cmd->body.format = cpu_to_le32(srf->format);
463 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
464 cmd->body.face[i].numMipLevels =
465 cpu_to_le32(srf->mip_levels[i]);
469 cmd_size = (SVGA3dSize *) cmd;
470 src_size = srf->sizes;
472 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
473 cmd_size->width = cpu_to_le32(src_size->width);
474 cmd_size->height = cpu_to_le32(src_size->height);
475 cmd_size->depth = cpu_to_le32(src_size->depth);
478 vmw_fifo_commit(dev_priv, submit_size);
479 (void) vmw_3d_resource_inc(dev_priv);
480 vmw_resource_activate(res, vmw_hw_surface_destroy);
484 static void vmw_user_surface_free(struct vmw_resource *res)
486 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
487 struct vmw_user_surface *user_srf =
488 container_of(srf, struct vmw_user_surface, srf);
491 kfree(srf->snooper.image);
495 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
496 struct ttm_object_file *tfile,
497 uint32_t handle, struct vmw_surface **out)
499 struct vmw_resource *res;
500 struct vmw_surface *srf;
501 struct vmw_user_surface *user_srf;
502 struct ttm_base_object *base;
505 base = ttm_base_object_lookup(tfile, handle);
506 if (unlikely(base == NULL))
509 if (unlikely(base->object_type != VMW_RES_SURFACE))
510 goto out_bad_resource;
512 user_srf = container_of(base, struct vmw_user_surface, base);
513 srf = &user_srf->srf;
516 read_lock(&dev_priv->resource_lock);
518 if (!res->avail || res->res_free != &vmw_user_surface_free) {
519 read_unlock(&dev_priv->resource_lock);
520 goto out_bad_resource;
523 kref_get(&res->kref);
524 read_unlock(&dev_priv->resource_lock);
530 ttm_base_object_unref(&base);
535 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
537 struct ttm_base_object *base = *p_base;
538 struct vmw_user_surface *user_srf =
539 container_of(base, struct vmw_user_surface, base);
540 struct vmw_resource *res = &user_srf->srf.res;
543 vmw_resource_unreference(&res);
546 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
547 struct drm_file *file_priv)
549 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
550 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
552 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
555 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
556 struct drm_file *file_priv)
558 struct vmw_private *dev_priv = vmw_priv(dev);
559 struct vmw_user_surface *user_srf =
560 kmalloc(sizeof(*user_srf), GFP_KERNEL);
561 struct vmw_surface *srf;
562 struct vmw_resource *res;
563 struct vmw_resource *tmp;
564 union drm_vmw_surface_create_arg *arg =
565 (union drm_vmw_surface_create_arg *)data;
566 struct drm_vmw_surface_create_req *req = &arg->req;
567 struct drm_vmw_surface_arg *rep = &arg->rep;
568 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
569 struct drm_vmw_size __user *user_sizes;
573 if (unlikely(user_srf == NULL))
576 srf = &user_srf->srf;
579 srf->flags = req->flags;
580 srf->format = req->format;
581 srf->scanout = req->scanout;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585 srf->num_sizes += srf->mip_levels[i];
587 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588 DRM_VMW_MAX_MIP_LEVELS) {
593 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594 if (unlikely(srf->sizes == NULL)) {
599 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
602 ret = copy_from_user(srf->sizes, user_sizes,
603 srf->num_sizes * sizeof(*srf->sizes));
604 if (unlikely(ret != 0)) {
610 srf->num_sizes == 1 &&
611 srf->sizes[0].width == 64 &&
612 srf->sizes[0].height == 64 &&
613 srf->format == SVGA3D_A8R8G8B8) {
615 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
616 /* clear the image */
617 if (srf->snooper.image) {
618 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
620 DRM_ERROR("Failed to allocate cursor_image\n");
625 srf->snooper.image = NULL;
627 srf->snooper.crtc = NULL;
629 user_srf->base.shareable = false;
630 user_srf->base.tfile = NULL;
633 * From this point, the generic resource management functions
634 * destroy the object on failure.
637 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
638 if (unlikely(ret != 0))
641 tmp = vmw_resource_reference(&srf->res);
642 ret = ttm_base_object_init(tfile, &user_srf->base,
643 req->shareable, VMW_RES_SURFACE,
644 &vmw_user_surface_base_release, NULL);
646 if (unlikely(ret != 0)) {
647 vmw_resource_unreference(&tmp);
648 vmw_resource_unreference(&res);
652 rep->sid = user_srf->base.hash.key;
653 if (rep->sid == SVGA3D_INVALID_ID)
654 DRM_ERROR("Created bad Surface ID.\n");
656 vmw_resource_unreference(&res);
665 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
666 struct drm_file *file_priv)
668 union drm_vmw_surface_reference_arg *arg =
669 (union drm_vmw_surface_reference_arg *)data;
670 struct drm_vmw_surface_arg *req = &arg->req;
671 struct drm_vmw_surface_create_req *rep = &arg->rep;
672 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
673 struct vmw_surface *srf;
674 struct vmw_user_surface *user_srf;
675 struct drm_vmw_size __user *user_sizes;
676 struct ttm_base_object *base;
679 base = ttm_base_object_lookup(tfile, req->sid);
680 if (unlikely(base == NULL)) {
681 DRM_ERROR("Could not find surface to reference.\n");
685 if (unlikely(base->object_type != VMW_RES_SURFACE))
686 goto out_bad_resource;
688 user_srf = container_of(base, struct vmw_user_surface, base);
689 srf = &user_srf->srf;
691 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
692 if (unlikely(ret != 0)) {
693 DRM_ERROR("Could not add a reference to a surface.\n");
694 goto out_no_reference;
697 rep->flags = srf->flags;
698 rep->format = srf->format;
699 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
700 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
704 ret = copy_to_user(user_sizes, srf->sizes,
705 srf->num_sizes * sizeof(*srf->sizes));
706 if (unlikely(ret != 0)) {
707 DRM_ERROR("copy_to_user failed %p %u\n",
708 user_sizes, srf->num_sizes);
713 ttm_base_object_unref(&base);
718 int vmw_surface_check(struct vmw_private *dev_priv,
719 struct ttm_object_file *tfile,
720 uint32_t handle, int *id)
722 struct ttm_base_object *base;
723 struct vmw_user_surface *user_srf;
727 base = ttm_base_object_lookup(tfile, handle);
728 if (unlikely(base == NULL))
731 if (unlikely(base->object_type != VMW_RES_SURFACE))
732 goto out_bad_surface;
734 user_srf = container_of(base, struct vmw_user_surface, base);
735 *id = user_srf->srf.res.id;
740 * FIXME: May deadlock here when called from the
741 * command parsing code.
744 ttm_base_object_unref(&base);
752 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
753 unsigned long num_pages)
755 static size_t bo_user_size = ~0;
757 size_t page_array_size =
758 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
760 if (unlikely(bo_user_size == ~0)) {
761 bo_user_size = glob->ttm_bo_extra_size +
762 ttm_round_pot(sizeof(struct vmw_dma_buffer));
765 return bo_user_size + page_array_size;
768 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
770 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
771 struct ttm_bo_global *glob = bo->glob;
772 struct vmw_private *dev_priv =
773 container_of(bo->bdev, struct vmw_private, bdev);
775 if (vmw_bo->gmr_bound) {
776 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
777 spin_lock(&glob->lru_lock);
778 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
779 spin_unlock(&glob->lru_lock);
780 vmw_bo->gmr_bound = false;
784 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
786 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
787 struct ttm_bo_global *glob = bo->glob;
789 vmw_dmabuf_gmr_unbind(bo);
790 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
794 int vmw_dmabuf_init(struct vmw_private *dev_priv,
795 struct vmw_dma_buffer *vmw_bo,
796 size_t size, struct ttm_placement *placement,
798 void (*bo_free) (struct ttm_buffer_object *bo))
800 struct ttm_bo_device *bdev = &dev_priv->bdev;
801 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
808 vmw_dmabuf_acc_size(bdev->glob,
809 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
811 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
812 if (unlikely(ret != 0)) {
813 /* we must free the bo here as
814 * ttm_buffer_object_init does so as well */
815 bo_free(&vmw_bo->base);
819 memset(vmw_bo, 0, sizeof(*vmw_bo));
821 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
822 INIT_LIST_HEAD(&vmw_bo->validate_list);
824 vmw_bo->gmr_bound = false;
826 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
827 ttm_bo_type_device, placement,
829 NULL, acc_size, bo_free);
833 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
835 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
836 struct ttm_bo_global *glob = bo->glob;
838 vmw_dmabuf_gmr_unbind(bo);
839 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
843 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
845 struct vmw_user_dma_buffer *vmw_user_bo;
846 struct ttm_base_object *base = *p_base;
847 struct ttm_buffer_object *bo;
851 if (unlikely(base == NULL))
854 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
855 bo = &vmw_user_bo->dma.base;
859 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
860 struct drm_file *file_priv)
862 struct vmw_private *dev_priv = vmw_priv(dev);
863 union drm_vmw_alloc_dmabuf_arg *arg =
864 (union drm_vmw_alloc_dmabuf_arg *)data;
865 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
866 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
867 struct vmw_user_dma_buffer *vmw_user_bo;
868 struct ttm_buffer_object *tmp;
869 struct vmw_master *vmaster = vmw_master(file_priv->master);
872 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
873 if (unlikely(vmw_user_bo == NULL))
876 ret = ttm_read_lock(&vmaster->lock, true);
877 if (unlikely(ret != 0)) {
882 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
883 &vmw_vram_sys_placement, true,
884 &vmw_user_dmabuf_destroy);
885 if (unlikely(ret != 0))
888 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
889 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
893 &vmw_user_dmabuf_release, NULL);
894 if (unlikely(ret != 0)) {
897 rep->handle = vmw_user_bo->base.hash.key;
898 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
899 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
900 rep->cur_gmr_offset = 0;
904 ttm_read_unlock(&vmaster->lock);
909 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
910 struct drm_file *file_priv)
912 struct drm_vmw_unref_dmabuf_arg *arg =
913 (struct drm_vmw_unref_dmabuf_arg *)data;
915 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
920 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
921 uint32_t cur_validate_node)
923 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
925 if (likely(vmw_bo->on_validate_list))
926 return vmw_bo->cur_validate_node;
928 vmw_bo->cur_validate_node = cur_validate_node;
929 vmw_bo->on_validate_list = true;
931 return cur_validate_node;
934 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
936 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
938 vmw_bo->on_validate_list = false;
941 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
943 struct vmw_dma_buffer *vmw_bo;
945 if (bo->mem.mem_type == TTM_PL_VRAM)
946 return SVGA_GMR_FRAMEBUFFER;
948 vmw_bo = vmw_dma_buffer(bo);
950 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
953 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
955 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
956 vmw_bo->gmr_bound = true;
960 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
961 uint32_t handle, struct vmw_dma_buffer **out)
963 struct vmw_user_dma_buffer *vmw_user_bo;
964 struct ttm_base_object *base;
966 base = ttm_base_object_lookup(tfile, handle);
967 if (unlikely(base == NULL)) {
968 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
969 (unsigned long)handle);
973 if (unlikely(base->object_type != ttm_buffer_type)) {
974 ttm_base_object_unref(&base);
975 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
976 (unsigned long)handle);
980 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
981 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
982 ttm_base_object_unref(&base);
983 *out = &vmw_user_bo->dma;
989 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
990 * when we're out of ids, causing GMR space to be allocated
994 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
996 struct ttm_bo_global *glob = dev_priv->bdev.glob;
1001 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1004 spin_lock(&glob->lru_lock);
1005 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1006 spin_unlock(&glob->lru_lock);
1007 } while (ret == -EAGAIN);
1009 if (unlikely(ret != 0))
1012 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1013 spin_lock(&glob->lru_lock);
1014 ida_remove(&dev_priv->gmr_ida, id);
1015 spin_unlock(&glob->lru_lock);
1019 *p_id = (uint32_t) id;
1027 static void vmw_stream_destroy(struct vmw_resource *res)
1029 struct vmw_private *dev_priv = res->dev_priv;
1030 struct vmw_stream *stream;
1033 DRM_INFO("%s: unref\n", __func__);
1034 stream = container_of(res, struct vmw_stream, res);
1036 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1040 static int vmw_stream_init(struct vmw_private *dev_priv,
1041 struct vmw_stream *stream,
1042 void (*res_free) (struct vmw_resource *res))
1044 struct vmw_resource *res = &stream->res;
1047 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1048 VMW_RES_STREAM, res_free);
1050 if (unlikely(ret != 0)) {
1051 if (res_free == NULL)
1054 res_free(&stream->res);
1058 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1060 vmw_resource_unreference(&res);
1064 DRM_INFO("%s: claimed\n", __func__);
1066 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1071 * User-space context management:
1074 static void vmw_user_stream_free(struct vmw_resource *res)
1076 struct vmw_user_stream *stream =
1077 container_of(res, struct vmw_user_stream, stream.res);
1083 * This function is called when user space has no more references on the
1084 * base object. It releases the base-object's reference on the resource object.
1087 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1089 struct ttm_base_object *base = *p_base;
1090 struct vmw_user_stream *stream =
1091 container_of(base, struct vmw_user_stream, base);
1092 struct vmw_resource *res = &stream->stream.res;
1095 vmw_resource_unreference(&res);
1098 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1099 struct drm_file *file_priv)
1101 struct vmw_private *dev_priv = vmw_priv(dev);
1102 struct vmw_resource *res;
1103 struct vmw_user_stream *stream;
1104 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1105 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1108 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1109 if (unlikely(res == NULL))
1112 if (res->res_free != &vmw_user_stream_free) {
1117 stream = container_of(res, struct vmw_user_stream, stream.res);
1118 if (stream->base.tfile != tfile) {
1123 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1125 vmw_resource_unreference(&res);
1129 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1130 struct drm_file *file_priv)
1132 struct vmw_private *dev_priv = vmw_priv(dev);
1133 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1134 struct vmw_resource *res;
1135 struct vmw_resource *tmp;
1136 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1137 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1140 if (unlikely(stream == NULL))
1143 res = &stream->stream.res;
1144 stream->base.shareable = false;
1145 stream->base.tfile = NULL;
1147 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1148 if (unlikely(ret != 0))
1151 tmp = vmw_resource_reference(res);
1152 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1153 &vmw_user_stream_base_release, NULL);
1155 if (unlikely(ret != 0)) {
1156 vmw_resource_unreference(&tmp);
1160 arg->stream_id = res->id;
1162 vmw_resource_unreference(&res);
1166 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1167 struct ttm_object_file *tfile,
1168 uint32_t *inout_id, struct vmw_resource **out)
1170 struct vmw_user_stream *stream;
1171 struct vmw_resource *res;
1174 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1175 if (unlikely(res == NULL))
1178 if (res->res_free != &vmw_user_stream_free) {
1183 stream = container_of(res, struct vmw_user_stream, stream.res);
1184 if (stream->base.tfile != tfile) {
1189 *inout_id = stream->stream.stream_id;
1193 vmw_resource_unreference(&res);