1 /**************************************************************************
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_HT_ORDER 12
38 * struct vmw_resource_relocation - Relocation info for resources
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
45 struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
52 * struct vmw_resource_val_node - Validation info for resources
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
64 * @switching_backup: The command stream provides a new backup buffer for a
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
70 struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
75 struct vmw_ctx_binding_state *staged_bindings;
76 unsigned long new_backup_offset;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
83 * struct vmw_cmd_entry - Describe a command for the verifier
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
89 struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
97 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
101 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
107 struct vmw_dma_buffer **vmw_bo_p);
108 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
115 * vmw_resources_unreserve - unreserve resources previously reserved for
116 * command submission.
118 * @sw_context: pointer to the software context
119 * @backoff: Whether command submission failed.
121 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
124 struct vmw_resource_val_node *val;
125 struct list_head *list = &sw_context->resource_list;
127 if (sw_context->dx_query_mob && !backoff)
128 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 sw_context->dx_query_mob);
131 list_for_each_entry(val, list, head) {
132 struct vmw_resource *res = val->res;
134 (backoff) ? false : val->switching_backup;
137 * Transfer staged context bindings to the
138 * persistent context binding tracker.
140 if (unlikely(val->staged_bindings)) {
142 vmw_binding_state_commit
143 (vmw_context_binding_state(val->res),
144 val->staged_bindings);
147 if (val->staged_bindings != sw_context->staged_bindings)
148 vmw_binding_state_free(val->staged_bindings);
150 sw_context->staged_bindings_inuse = false;
151 val->staged_bindings = NULL;
153 vmw_resource_unreserve(res, switch_backup, val->new_backup,
154 val->new_backup_offset);
155 vmw_dmabuf_unreference(&val->new_backup);
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
167 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 struct vmw_sw_context *sw_context,
169 struct vmw_resource_val_node *node)
173 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 if (unlikely(ret != 0))
177 if (!sw_context->staged_bindings) {
178 sw_context->staged_bindings =
179 vmw_binding_state_alloc(dev_priv);
180 if (IS_ERR(sw_context->staged_bindings)) {
181 DRM_ERROR("Failed to allocate context binding "
183 ret = PTR_ERR(sw_context->staged_bindings);
184 sw_context->staged_bindings = NULL;
189 if (sw_context->staged_bindings_inuse) {
190 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(node->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
194 ret = PTR_ERR(node->staged_bindings);
195 node->staged_bindings = NULL;
199 node->staged_bindings = sw_context->staged_bindings;
200 sw_context->staged_bindings_inuse = true;
209 * vmw_resource_val_add - Add a resource to the software context's
210 * resource list if it's not already on it.
212 * @sw_context: Pointer to the software context.
213 * @res: Pointer to the resource.
214 * @p_node On successful return points to a valid pointer to a
215 * struct vmw_resource_val_node, if non-NULL on entry.
217 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218 struct vmw_resource *res,
219 struct vmw_resource_val_node **p_node)
221 struct vmw_private *dev_priv = res->dev_priv;
222 struct vmw_resource_val_node *node;
223 struct drm_hash_item *hash;
226 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
228 node = container_of(hash, struct vmw_resource_val_node, hash);
229 node->first_usage = false;
230 if (unlikely(p_node != NULL))
235 node = kzalloc(sizeof(*node), GFP_KERNEL);
236 if (unlikely(node == NULL)) {
237 DRM_ERROR("Failed to allocate a resource validation "
242 node->hash.key = (unsigned long) res;
243 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244 if (unlikely(ret != 0)) {
245 DRM_ERROR("Failed to initialize a resource validation "
250 node->res = vmw_resource_reference(res);
251 node->first_usage = true;
252 if (unlikely(p_node != NULL))
255 if (!dev_priv->has_mob) {
256 list_add_tail(&node->head, &sw_context->resource_list);
260 switch (vmw_res_type(res)) {
261 case vmw_res_context:
262 case vmw_res_dx_context:
263 list_add(&node->head, &sw_context->ctx_resource_list);
264 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
266 case vmw_res_cotable:
267 list_add_tail(&node->head, &sw_context->ctx_resource_list);
270 list_add_tail(&node->head, &sw_context->resource_list);
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
284 * Returns 0 if success, negative error code otherwise.
286 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 struct vmw_resource *view)
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
295 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
299 return vmw_resource_val_add(sw_context, view, NULL);
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
314 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 enum vmw_view_type view_type, u32 id)
317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 struct vmw_resource *view;
322 DRM_ERROR("DX Context not set.\n");
326 view = vmw_view_lookup(sw_context->man, view_type, id);
328 return PTR_ERR(view);
330 ret = vmw_view_res_val_add(sw_context, view);
331 vmw_resource_unreference(&view);
337 * vmw_resource_context_res_add - Put resources previously bound to a context on
338 * the validation list
340 * @dev_priv: Pointer to a device private structure
341 * @sw_context: Pointer to a software context used for this command submission
342 * @ctx: Pointer to the context resource
344 * This function puts all resources that were previously bound to @ctx on
345 * the resource validation list. This is part of the context state reemission
347 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348 struct vmw_sw_context *sw_context,
349 struct vmw_resource *ctx)
351 struct list_head *binding_list;
352 struct vmw_ctx_bindinfo *entry;
354 struct vmw_resource *res;
357 /* Add all cotables to the validation list. */
358 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 res = vmw_context_cotable(ctx, i);
364 ret = vmw_resource_val_add(sw_context, res, NULL);
365 vmw_resource_unreference(&res);
366 if (unlikely(ret != 0))
372 /* Add all resources bound to the context to the validation list */
373 mutex_lock(&dev_priv->binding_mutex);
374 binding_list = vmw_context_binding_list(ctx);
376 list_for_each_entry(entry, binding_list, ctx_list) {
377 /* entry->res is not refcounted */
378 res = vmw_resource_reference_unless_doomed(entry->res);
379 if (unlikely(res == NULL))
382 if (vmw_res_type(entry->res) == vmw_res_view)
383 ret = vmw_view_res_val_add(sw_context, entry->res);
385 ret = vmw_resource_val_add(sw_context, entry->res,
387 vmw_resource_unreference(&res);
388 if (unlikely(ret != 0))
392 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 struct vmw_dma_buffer *dx_query_mob;
395 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
397 ret = vmw_bo_to_validate_list(sw_context,
402 mutex_unlock(&dev_priv->binding_mutex);
407 * vmw_resource_relocation_add - Add a relocation to the relocation list
409 * @list: Pointer to head of relocation list.
410 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes.
414 static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res,
416 unsigned long offset)
418 struct vmw_resource_relocation *rel;
420 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421 if (unlikely(rel == NULL)) {
422 DRM_ERROR("Failed to allocate a resource relocation.\n");
427 rel->offset = offset;
428 list_add_tail(&rel->head, list);
434 * vmw_resource_relocations_free - Free all relocations on a list
436 * @list: Pointer to the head of the relocation list.
438 static void vmw_resource_relocations_free(struct list_head *list)
440 struct vmw_resource_relocation *rel, *n;
442 list_for_each_entry_safe(rel, n, list, head) {
443 list_del(&rel->head);
449 * vmw_resource_relocations_apply - Apply all relocations on a list
451 * @cb: Pointer to the start of the command buffer bein patch. This need
452 * not be the same buffer as the one being parsed when the relocation
453 * list was built, but the contents must be the same modulo the
455 * @list: Pointer to the head of the relocation list.
457 static void vmw_resource_relocations_apply(uint32_t *cb,
458 struct list_head *list)
460 struct vmw_resource_relocation *rel;
462 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL))
464 cb[rel->offset] = rel->res->id;
466 cb[rel->offset] = SVGA_3D_CMD_NOP;
470 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471 struct vmw_sw_context *sw_context,
472 SVGA3dCmdHeader *header)
474 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
477 static int vmw_cmd_ok(struct vmw_private *dev_priv,
478 struct vmw_sw_context *sw_context,
479 SVGA3dCmdHeader *header)
485 * vmw_bo_to_validate_list - add a bo to a validate list
487 * @sw_context: The software context used for this command submission batch.
488 * @bo: The buffer object to add.
489 * @validate_as_mob: Validate this buffer as a MOB.
490 * @p_val_node: If non-NULL Will be updated with the validate node number
493 * Returns -EINVAL if the limit of number of buffer objects per command
494 * submission is reached.
496 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497 struct vmw_dma_buffer *vbo,
498 bool validate_as_mob,
499 uint32_t *p_val_node)
502 struct vmw_validate_buffer *vval_buf;
503 struct ttm_validate_buffer *val_buf;
504 struct drm_hash_item *hash;
507 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
509 vval_buf = container_of(hash, struct vmw_validate_buffer,
511 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512 DRM_ERROR("Inconsistent buffer usage.\n");
515 val_buf = &vval_buf->base;
516 val_node = vval_buf - sw_context->val_bufs;
518 val_node = sw_context->cur_val_buf;
519 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520 DRM_ERROR("Max number of DMA buffers per submission "
524 vval_buf = &sw_context->val_bufs[val_node];
525 vval_buf->hash.key = (unsigned long) vbo;
526 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527 if (unlikely(ret != 0)) {
528 DRM_ERROR("Failed to initialize a buffer validation "
532 ++sw_context->cur_val_buf;
533 val_buf = &vval_buf->base;
534 val_buf->bo = ttm_bo_reference(&vbo->base);
535 val_buf->shared = false;
536 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537 vval_buf->validate_as_mob = validate_as_mob;
541 *p_val_node = val_node;
547 * vmw_resources_reserve - Reserve all resources on the sw_context's
550 * @sw_context: Pointer to the software context.
552 * Note that since vmware's command submission currently is protected by
553 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554 * since only a single thread at once will attempt this.
556 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
558 struct vmw_resource_val_node *val;
561 list_for_each_entry(val, &sw_context->resource_list, head) {
562 struct vmw_resource *res = val->res;
564 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565 if (unlikely(ret != 0))
569 struct vmw_dma_buffer *vbo = res->backup;
571 ret = vmw_bo_to_validate_list
573 vmw_resource_needs_backup(res), NULL);
575 if (unlikely(ret != 0))
580 if (sw_context->dx_query_mob) {
581 struct vmw_dma_buffer *expected_dx_query_mob;
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
595 * vmw_resources_validate - Validate all resources on the sw_context's
598 * @sw_context: Pointer to the software context.
600 * Before this function is called, all resource backup buffers must have
603 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
605 struct vmw_resource_val_node *val;
608 list_for_each_entry(val, &sw_context->resource_list, head) {
609 struct vmw_resource *res = val->res;
610 struct vmw_dma_buffer *backup = res->backup;
612 ret = vmw_resource_validate(res);
613 if (unlikely(ret != 0)) {
614 if (ret != -ERESTARTSYS)
615 DRM_ERROR("Failed to validate resource.\n");
619 /* Check if the resource switched backup buffer */
620 if (backup && res->backup && (backup != res->backup)) {
621 struct vmw_dma_buffer *vbo = res->backup;
623 ret = vmw_bo_to_validate_list
625 vmw_resource_needs_backup(res), NULL);
627 ttm_bo_unreserve(&vbo->base);
636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
637 * relocation- and validation lists.
639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
640 * @sw_context: Pointer to the software context.
641 * @id_loc: Pointer to where the id that needs translation is located.
642 * @res: Valid pointer to a struct vmw_resource.
643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644 * used for this resource is returned here.
646 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647 struct vmw_sw_context *sw_context,
649 struct vmw_resource *res,
650 struct vmw_resource_val_node **p_val)
653 struct vmw_resource_val_node *node;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
658 id_loc - sw_context->buf_start);
659 if (unlikely(ret != 0))
662 ret = vmw_resource_val_add(sw_context, res, &node);
663 if (unlikely(ret != 0))
674 * vmw_cmd_res_check - Check that a resource is present and if so, put it
675 * on the resource validate list unless it's already there.
677 * @dev_priv: Pointer to a device private structure.
678 * @sw_context: Pointer to the software context.
679 * @res_type: Resource type.
680 * @converter: User-space visisble type specific information.
681 * @id_loc: Pointer to the location in the command buffer currently being
682 * parsed from where the user-space resource id handle is located.
683 * @p_val: Pointer to pointer to resource validalidation node. Populated
687 vmw_cmd_res_check(struct vmw_private *dev_priv,
688 struct vmw_sw_context *sw_context,
689 enum vmw_res_type res_type,
690 const struct vmw_user_resource_conv *converter,
692 struct vmw_resource_val_node **p_val)
694 struct vmw_res_cache_entry *rcache =
695 &sw_context->res_cache[res_type];
696 struct vmw_resource *res;
697 struct vmw_resource_val_node *node;
700 if (*id_loc == SVGA3D_INVALID_ID) {
703 if (res_type == vmw_res_context) {
704 DRM_ERROR("Illegal context invalid id.\n");
711 * Fastpath in case of repeated commands referencing the same
715 if (likely(rcache->valid && *id_loc == rcache->handle)) {
716 const struct vmw_resource *res = rcache->res;
718 rcache->node->first_usage = false;
720 *p_val = rcache->node;
722 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start);
727 ret = vmw_user_resource_lookup_handle(dev_priv,
728 sw_context->fp->tfile,
732 if (unlikely(ret != 0)) {
733 DRM_ERROR("Could not find or use resource 0x%08x.\n",
739 rcache->valid = true;
741 rcache->handle = *id_loc;
743 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
745 if (unlikely(ret != 0))
751 vmw_resource_unreference(&res);
755 BUG_ON(sw_context->error_resource != NULL);
756 sw_context->error_resource = res;
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
764 * @ctx_res: context the query belongs to
766 * This function assumes binding_mutex is held.
768 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
770 struct vmw_private *dev_priv = ctx_res->dev_priv;
771 struct vmw_dma_buffer *dx_query_mob;
773 SVGA3dCmdHeader header;
774 SVGA3dCmdDXBindAllQuery body;
778 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
780 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
783 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
786 DRM_ERROR("Failed to rebind queries.\n");
790 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 cmd->header.size = sizeof(cmd->body);
792 cmd->body.cid = ctx_res->id;
793 cmd->body.mobid = dx_query_mob->base.mem.start;
794 vmw_fifo_commit(dev_priv, sizeof(*cmd));
796 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
802 * vmw_rebind_contexts - Rebind all resources previously bound to
803 * referenced contexts.
805 * @sw_context: Pointer to the software context.
807 * Rebind context binding points that have been scrubbed because of eviction.
809 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
811 struct vmw_resource_val_node *val;
814 list_for_each_entry(val, &sw_context->resource_list, head) {
815 if (unlikely(!val->staged_bindings))
818 ret = vmw_binding_rebind_all
819 (vmw_context_binding_state(val->res));
820 if (unlikely(ret != 0)) {
821 if (ret != -ERESTARTSYS)
822 DRM_ERROR("Failed to rebind context.\n");
826 ret = vmw_rebind_all_dx_query(val->res);
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
846 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 enum vmw_view_type view_type,
848 enum vmw_ctx_binding_type binding_type,
850 uint32 view_ids[], u32 num_views,
853 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 struct vmw_cmdbuf_res_manager *man;
859 DRM_ERROR("DX Context not set.\n");
863 man = sw_context->man;
864 for (i = 0; i < num_views; ++i) {
865 struct vmw_ctx_bindinfo_view binding;
866 struct vmw_resource *view = NULL;
868 if (view_ids[i] != SVGA3D_INVALID_ID) {
869 view = vmw_view_lookup(man, view_type, view_ids[i]);
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view);
875 ret = vmw_view_res_val_add(sw_context, view);
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view);
883 binding.bi.ctx = ctx_node->res;
884 binding.bi.res = view;
885 binding.bi.bt = binding_type;
886 binding.shader_slot = shader_slot;
887 binding.slot = first_slot + i;
888 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 shader_slot, binding.slot);
891 vmw_resource_unreference(&view);
898 * vmw_cmd_cid_check - Check a command header for valid context information.
900 * @dev_priv: Pointer to a device private structure.
901 * @sw_context: Pointer to the software context.
902 * @header: A command header with an embedded user-space context handle.
904 * Convenience function: Call vmw_cmd_res_check with the user-space context
905 * handle embedded in @header.
907 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908 struct vmw_sw_context *sw_context,
909 SVGA3dCmdHeader *header)
912 SVGA3dCmdHeader header;
916 cmd = container_of(header, struct vmw_cid_cmd, header);
917 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918 user_context_converter, &cmd->cid, NULL);
921 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922 struct vmw_sw_context *sw_context,
923 SVGA3dCmdHeader *header)
926 SVGA3dCmdHeader header;
927 SVGA3dCmdSetRenderTarget body;
929 struct vmw_resource_val_node *ctx_node;
930 struct vmw_resource_val_node *res_node;
933 cmd = container_of(header, struct vmw_sid_cmd, header);
935 if (cmd->body.type >= SVGA3D_RT_MAX) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd->body.type);
941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942 user_context_converter, &cmd->body.cid,
944 if (unlikely(ret != 0))
947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948 user_surface_converter,
949 &cmd->body.target.sid, &res_node);
950 if (unlikely(ret != 0))
953 if (dev_priv->has_mob) {
954 struct vmw_ctx_bindinfo_view binding;
956 binding.bi.ctx = ctx_node->res;
957 binding.bi.res = res_node ? res_node->res : NULL;
958 binding.bi.bt = vmw_ctx_binding_rt;
959 binding.slot = cmd->body.type;
960 vmw_binding_add(ctx_node->staged_bindings,
961 &binding.bi, 0, binding.slot);
967 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968 struct vmw_sw_context *sw_context,
969 SVGA3dCmdHeader *header)
972 SVGA3dCmdHeader header;
973 SVGA3dCmdSurfaceCopy body;
977 cmd = container_of(header, struct vmw_sid_cmd, header);
979 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980 user_surface_converter,
981 &cmd->body.src.sid, NULL);
985 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986 user_surface_converter,
987 &cmd->body.dest.sid, NULL);
990 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 struct vmw_sw_context *sw_context,
992 SVGA3dCmdHeader *header)
995 SVGA3dCmdHeader header;
996 SVGA3dCmdDXBufferCopy body;
1000 cmd = container_of(header, typeof(*cmd), header);
1001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 user_surface_converter,
1003 &cmd->body.src, NULL);
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 user_surface_converter,
1009 &cmd->body.dest, NULL);
1012 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdDXPredCopyRegion body;
1022 cmd = container_of(header, typeof(*cmd), header);
1023 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 user_surface_converter,
1025 &cmd->body.srcSid, NULL);
1029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 user_surface_converter,
1031 &cmd->body.dstSid, NULL);
1034 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035 struct vmw_sw_context *sw_context,
1036 SVGA3dCmdHeader *header)
1038 struct vmw_sid_cmd {
1039 SVGA3dCmdHeader header;
1040 SVGA3dCmdSurfaceStretchBlt body;
1044 cmd = container_of(header, struct vmw_sid_cmd, header);
1045 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046 user_surface_converter,
1047 &cmd->body.src.sid, NULL);
1048 if (unlikely(ret != 0))
1050 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051 user_surface_converter,
1052 &cmd->body.dest.sid, NULL);
1055 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056 struct vmw_sw_context *sw_context,
1057 SVGA3dCmdHeader *header)
1059 struct vmw_sid_cmd {
1060 SVGA3dCmdHeader header;
1061 SVGA3dCmdBlitSurfaceToScreen body;
1064 cmd = container_of(header, struct vmw_sid_cmd, header);
1066 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067 user_surface_converter,
1068 &cmd->body.srcImage.sid, NULL);
1071 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072 struct vmw_sw_context *sw_context,
1073 SVGA3dCmdHeader *header)
1075 struct vmw_sid_cmd {
1076 SVGA3dCmdHeader header;
1077 SVGA3dCmdPresent body;
1081 cmd = container_of(header, struct vmw_sid_cmd, header);
1083 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084 user_surface_converter, &cmd->body.sid,
1089 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1091 * @dev_priv: The device private structure.
1092 * @new_query_bo: The new buffer holding query results.
1093 * @sw_context: The software context used for this command submission.
1095 * This function checks whether @new_query_bo is suitable for holding
1096 * query results, and if another buffer currently is pinned for query
1097 * results. If so, the function prepares the state of @sw_context for
1098 * switching pinned buffers after successful submission of the current
1101 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102 struct vmw_dma_buffer *new_query_bo,
1103 struct vmw_sw_context *sw_context)
1105 struct vmw_res_cache_entry *ctx_entry =
1106 &sw_context->res_cache[vmw_res_context];
1109 BUG_ON(!ctx_entry->valid);
1110 sw_context->last_query_ctx = ctx_entry->res;
1112 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1114 if (unlikely(new_query_bo->base.num_pages > 4)) {
1115 DRM_ERROR("Query buffer too large.\n");
1119 if (unlikely(sw_context->cur_query_bo != NULL)) {
1120 sw_context->needs_post_query_barrier = true;
1121 ret = vmw_bo_to_validate_list(sw_context,
1122 sw_context->cur_query_bo,
1123 dev_priv->has_mob, NULL);
1124 if (unlikely(ret != 0))
1127 sw_context->cur_query_bo = new_query_bo;
1129 ret = vmw_bo_to_validate_list(sw_context,
1130 dev_priv->dummy_query_bo,
1131 dev_priv->has_mob, NULL);
1132 if (unlikely(ret != 0))
1142 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1144 * @dev_priv: The device private structure.
1145 * @sw_context: The software context used for this command submission batch.
1147 * This function will check if we're switching query buffers, and will then,
1148 * issue a dummy occlusion query wait used as a query barrier. When the fence
1149 * object following that query wait has signaled, we are sure that all
1150 * preceding queries have finished, and the old query buffer can be unpinned.
1151 * However, since both the new query buffer and the old one are fenced with
1152 * that fence, we can do an asynchronus unpin now, and be sure that the
1153 * old query buffer won't be moved until the fence has signaled.
1155 * As mentioned above, both the new - and old query buffers need to be fenced
1156 * using a sequence emitted *after* calling this function.
1158 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159 struct vmw_sw_context *sw_context)
1162 * The validate list should still hold references to all
1166 if (sw_context->needs_post_query_barrier) {
1167 struct vmw_res_cache_entry *ctx_entry =
1168 &sw_context->res_cache[vmw_res_context];
1169 struct vmw_resource *ctx;
1172 BUG_ON(!ctx_entry->valid);
1173 ctx = ctx_entry->res;
1175 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1177 if (unlikely(ret != 0))
1178 DRM_ERROR("Out of fifo space for dummy query.\n");
1181 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182 if (dev_priv->pinned_bo) {
1183 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1187 if (!sw_context->needs_post_query_barrier) {
1188 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1191 * We pin also the dummy_query_bo buffer so that we
1192 * don't need to validate it when emitting
1193 * dummy queries in context destroy paths.
1196 if (!dev_priv->dummy_query_bo_pinned) {
1197 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1199 dev_priv->dummy_query_bo_pinned = true;
1202 BUG_ON(sw_context->last_query_ctx == NULL);
1203 dev_priv->query_cid = sw_context->last_query_ctx->id;
1204 dev_priv->query_cid_valid = true;
1205 dev_priv->pinned_bo =
1206 vmw_dmabuf_reference(sw_context->cur_query_bo);
1212 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213 * handle to a MOB id.
1215 * @dev_priv: Pointer to a device private structure.
1216 * @sw_context: The software context used for this command batch validation.
1217 * @id: Pointer to the user-space handle to be translated.
1218 * @vmw_bo_p: Points to a location that, on successful return will carry
1219 * a reference-counted pointer to the DMA buffer identified by the
1220 * user-space handle in @id.
1222 * This function saves information needed to translate a user-space buffer
1223 * handle to a MOB id. The translation does not take place immediately, but
1224 * during a call to vmw_apply_relocations(). This function builds a relocation
1225 * list and a list of buffers to validate. The former needs to be freed using
1226 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227 * needs to be freed using vmw_clear_validations.
1229 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230 struct vmw_sw_context *sw_context,
1232 struct vmw_dma_buffer **vmw_bo_p)
1234 struct vmw_dma_buffer *vmw_bo = NULL;
1235 uint32_t handle = *id;
1236 struct vmw_relocation *reloc;
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1240 if (unlikely(ret != 0)) {
1241 DRM_ERROR("Could not find or use MOB buffer.\n");
1246 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1247 DRM_ERROR("Max number relocations per submission"
1253 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1254 reloc->mob_loc = id;
1255 reloc->location = NULL;
1257 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1258 if (unlikely(ret != 0))
1265 vmw_dmabuf_unreference(&vmw_bo);
1271 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1272 * handle to a valid SVGAGuestPtr
1274 * @dev_priv: Pointer to a device private structure.
1275 * @sw_context: The software context used for this command batch validation.
1276 * @ptr: Pointer to the user-space handle to be translated.
1277 * @vmw_bo_p: Points to a location that, on successful return will carry
1278 * a reference-counted pointer to the DMA buffer identified by the
1279 * user-space handle in @id.
1281 * This function saves information needed to translate a user-space buffer
1282 * handle to a valid SVGAGuestPtr. The translation does not take place
1283 * immediately, but during a call to vmw_apply_relocations().
1284 * This function builds a relocation list and a list of buffers to validate.
1285 * The former needs to be freed using either vmw_apply_relocations() or
1286 * vmw_free_relocations(). The latter needs to be freed using
1287 * vmw_clear_validations.
1289 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1290 struct vmw_sw_context *sw_context,
1292 struct vmw_dma_buffer **vmw_bo_p)
1294 struct vmw_dma_buffer *vmw_bo = NULL;
1295 uint32_t handle = ptr->gmrId;
1296 struct vmw_relocation *reloc;
1299 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1300 if (unlikely(ret != 0)) {
1301 DRM_ERROR("Could not find or use GMR region.\n");
1306 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1307 DRM_ERROR("Max number relocations per submission"
1313 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1314 reloc->location = ptr;
1316 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1317 if (unlikely(ret != 0))
1324 vmw_dmabuf_unreference(&vmw_bo);
1332 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1338 * This function adds the new query into the query COTABLE
1340 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1341 struct vmw_sw_context *sw_context,
1342 SVGA3dCmdHeader *header)
1344 struct vmw_dx_define_query_cmd {
1345 SVGA3dCmdHeader header;
1346 SVGA3dCmdDXDefineQuery q;
1350 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1351 struct vmw_resource *cotable_res;
1354 if (ctx_node == NULL) {
1355 DRM_ERROR("DX Context not set for query.\n");
1359 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1361 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1362 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1366 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1367 vmw_resource_unreference(&cotable_res);
1375 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1377 * @dev_priv: Pointer to a device private struct.
1378 * @sw_context: The software context used for this command submission.
1379 * @header: Pointer to the command header in the command stream.
1381 * The query bind operation will eventually associate the query ID
1382 * with its backing MOB. In this function, we take the user mode
1383 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1384 * kernel mode equivalent.
1386 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1387 struct vmw_sw_context *sw_context,
1388 SVGA3dCmdHeader *header)
1390 struct vmw_dx_bind_query_cmd {
1391 SVGA3dCmdHeader header;
1392 SVGA3dCmdDXBindQuery q;
1395 struct vmw_dma_buffer *vmw_bo;
1399 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402 * Look up the buffer pointed to by q.mobid, put it on the relocation
1403 * list so its kernel mode MOB ID can be filled in later
1405 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1411 sw_context->dx_query_mob = vmw_bo;
1412 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1414 vmw_dmabuf_unreference(&vmw_bo);
1422 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1424 * @dev_priv: Pointer to a device private struct.
1425 * @sw_context: The software context used for this command submission.
1426 * @header: Pointer to the command header in the command stream.
1428 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1429 struct vmw_sw_context *sw_context,
1430 SVGA3dCmdHeader *header)
1432 struct vmw_begin_gb_query_cmd {
1433 SVGA3dCmdHeader header;
1434 SVGA3dCmdBeginGBQuery q;
1437 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1441 user_context_converter, &cmd->q.cid,
1446 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1448 * @dev_priv: Pointer to a device private struct.
1449 * @sw_context: The software context used for this command submission.
1450 * @header: Pointer to the command header in the command stream.
1452 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1453 struct vmw_sw_context *sw_context,
1454 SVGA3dCmdHeader *header)
1456 struct vmw_begin_query_cmd {
1457 SVGA3dCmdHeader header;
1458 SVGA3dCmdBeginQuery q;
1461 cmd = container_of(header, struct vmw_begin_query_cmd,
1464 if (unlikely(dev_priv->has_mob)) {
1466 SVGA3dCmdHeader header;
1467 SVGA3dCmdBeginGBQuery q;
1470 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1472 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1473 gb_cmd.header.size = cmd->header.size;
1474 gb_cmd.q.cid = cmd->q.cid;
1475 gb_cmd.q.type = cmd->q.type;
1477 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1478 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1482 user_context_converter, &cmd->q.cid,
1487 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1489 * @dev_priv: Pointer to a device private struct.
1490 * @sw_context: The software context used for this command submission.
1491 * @header: Pointer to the command header in the command stream.
1493 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1494 struct vmw_sw_context *sw_context,
1495 SVGA3dCmdHeader *header)
1497 struct vmw_dma_buffer *vmw_bo;
1498 struct vmw_query_cmd {
1499 SVGA3dCmdHeader header;
1500 SVGA3dCmdEndGBQuery q;
1504 cmd = container_of(header, struct vmw_query_cmd, header);
1505 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1506 if (unlikely(ret != 0))
1509 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512 if (unlikely(ret != 0))
1515 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1517 vmw_dmabuf_unreference(&vmw_bo);
1522 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1524 * @dev_priv: Pointer to a device private struct.
1525 * @sw_context: The software context used for this command submission.
1526 * @header: Pointer to the command header in the command stream.
1528 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1529 struct vmw_sw_context *sw_context,
1530 SVGA3dCmdHeader *header)
1532 struct vmw_dma_buffer *vmw_bo;
1533 struct vmw_query_cmd {
1534 SVGA3dCmdHeader header;
1535 SVGA3dCmdEndQuery q;
1539 cmd = container_of(header, struct vmw_query_cmd, header);
1540 if (dev_priv->has_mob) {
1542 SVGA3dCmdHeader header;
1543 SVGA3dCmdEndGBQuery q;
1546 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1548 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1549 gb_cmd.header.size = cmd->header.size;
1550 gb_cmd.q.cid = cmd->q.cid;
1551 gb_cmd.q.type = cmd->q.type;
1552 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1553 gb_cmd.q.offset = cmd->q.guestResult.offset;
1555 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1556 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1560 if (unlikely(ret != 0))
1563 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1564 &cmd->q.guestResult,
1566 if (unlikely(ret != 0))
1569 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571 vmw_dmabuf_unreference(&vmw_bo);
1576 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1578 * @dev_priv: Pointer to a device private struct.
1579 * @sw_context: The software context used for this command submission.
1580 * @header: Pointer to the command header in the command stream.
1582 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1583 struct vmw_sw_context *sw_context,
1584 SVGA3dCmdHeader *header)
1586 struct vmw_dma_buffer *vmw_bo;
1587 struct vmw_query_cmd {
1588 SVGA3dCmdHeader header;
1589 SVGA3dCmdWaitForGBQuery q;
1593 cmd = container_of(header, struct vmw_query_cmd, header);
1594 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1595 if (unlikely(ret != 0))
1598 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601 if (unlikely(ret != 0))
1604 vmw_dmabuf_unreference(&vmw_bo);
1609 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1611 * @dev_priv: Pointer to a device private struct.
1612 * @sw_context: The software context used for this command submission.
1613 * @header: Pointer to the command header in the command stream.
1615 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1616 struct vmw_sw_context *sw_context,
1617 SVGA3dCmdHeader *header)
1619 struct vmw_dma_buffer *vmw_bo;
1620 struct vmw_query_cmd {
1621 SVGA3dCmdHeader header;
1622 SVGA3dCmdWaitForQuery q;
1626 cmd = container_of(header, struct vmw_query_cmd, header);
1627 if (dev_priv->has_mob) {
1629 SVGA3dCmdHeader header;
1630 SVGA3dCmdWaitForGBQuery q;
1633 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1635 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1636 gb_cmd.header.size = cmd->header.size;
1637 gb_cmd.q.cid = cmd->q.cid;
1638 gb_cmd.q.type = cmd->q.type;
1639 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1640 gb_cmd.q.offset = cmd->q.guestResult.offset;
1642 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1643 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1647 if (unlikely(ret != 0))
1650 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1651 &cmd->q.guestResult,
1653 if (unlikely(ret != 0))
1656 vmw_dmabuf_unreference(&vmw_bo);
1660 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1661 struct vmw_sw_context *sw_context,
1662 SVGA3dCmdHeader *header)
1664 struct vmw_dma_buffer *vmw_bo = NULL;
1665 struct vmw_surface *srf = NULL;
1666 struct vmw_dma_cmd {
1667 SVGA3dCmdHeader header;
1668 SVGA3dCmdSurfaceDMA dma;
1671 SVGA3dCmdSurfaceDMASuffix *suffix;
1674 cmd = container_of(header, struct vmw_dma_cmd, header);
1675 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1676 header->size - sizeof(*suffix));
1678 /* Make sure device and verifier stays in sync. */
1679 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1680 DRM_ERROR("Invalid DMA suffix size.\n");
1684 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1685 &cmd->dma.guest.ptr,
1687 if (unlikely(ret != 0))
1690 /* Make sure DMA doesn't cross BO boundaries. */
1691 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1692 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1693 DRM_ERROR("Invalid DMA offset.\n");
1697 bo_size -= cmd->dma.guest.ptr.offset;
1698 if (unlikely(suffix->maximumOffset > bo_size))
1699 suffix->maximumOffset = bo_size;
1701 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1702 user_surface_converter, &cmd->dma.host.sid,
1704 if (unlikely(ret != 0)) {
1705 if (unlikely(ret != -ERESTARTSYS))
1706 DRM_ERROR("could not find surface for DMA.\n");
1707 goto out_no_surface;
1710 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1712 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1716 vmw_dmabuf_unreference(&vmw_bo);
1720 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1721 struct vmw_sw_context *sw_context,
1722 SVGA3dCmdHeader *header)
1724 struct vmw_draw_cmd {
1725 SVGA3dCmdHeader header;
1726 SVGA3dCmdDrawPrimitives body;
1728 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1729 (unsigned long)header + sizeof(*cmd));
1730 SVGA3dPrimitiveRange *range;
1735 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1736 if (unlikely(ret != 0))
1739 cmd = container_of(header, struct vmw_draw_cmd, header);
1740 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1742 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1743 DRM_ERROR("Illegal number of vertex declarations.\n");
1747 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1748 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1749 user_surface_converter,
1750 &decl->array.surfaceId, NULL);
1751 if (unlikely(ret != 0))
1755 maxnum = (header->size - sizeof(cmd->body) -
1756 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1757 if (unlikely(cmd->body.numRanges > maxnum)) {
1758 DRM_ERROR("Illegal number of index ranges.\n");
1762 range = (SVGA3dPrimitiveRange *) decl;
1763 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1764 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1765 user_surface_converter,
1766 &range->indexArray.surfaceId, NULL);
1767 if (unlikely(ret != 0))
1774 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1775 struct vmw_sw_context *sw_context,
1776 SVGA3dCmdHeader *header)
1778 struct vmw_tex_state_cmd {
1779 SVGA3dCmdHeader header;
1780 SVGA3dCmdSetTextureState state;
1783 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1784 ((unsigned long) header + header->size + sizeof(header));
1785 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1786 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1787 struct vmw_resource_val_node *ctx_node;
1788 struct vmw_resource_val_node *res_node;
1791 cmd = container_of(header, struct vmw_tex_state_cmd,
1794 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1795 user_context_converter, &cmd->state.cid,
1797 if (unlikely(ret != 0))
1800 for (; cur_state < last_state; ++cur_state) {
1801 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1805 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1806 (unsigned) cur_state->stage);
1810 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1811 user_surface_converter,
1812 &cur_state->value, &res_node);
1813 if (unlikely(ret != 0))
1816 if (dev_priv->has_mob) {
1817 struct vmw_ctx_bindinfo_tex binding;
1819 binding.bi.ctx = ctx_node->res;
1820 binding.bi.res = res_node ? res_node->res : NULL;
1821 binding.bi.bt = vmw_ctx_binding_tex;
1822 binding.texture_stage = cur_state->stage;
1823 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1824 0, binding.texture_stage);
1831 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1832 struct vmw_sw_context *sw_context,
1835 struct vmw_dma_buffer *vmw_bo;
1840 SVGAFifoCmdDefineGMRFB body;
1843 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846 if (unlikely(ret != 0))
1849 vmw_dmabuf_unreference(&vmw_bo);
1856 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859 * @dev_priv: Pointer to a device private struct.
1860 * @sw_context: The software context being used for this batch.
1861 * @val_node: The validation node representing the resource.
1862 * @buf_id: Pointer to the user-space backup buffer handle in the command
1864 * @backup_offset: Offset of backup into MOB.
1866 * This function prepares for registering a switch of backup buffers
1867 * in the resource metadata just prior to unreserving. It's basically a wrapper
1868 * around vmw_cmd_res_switch_backup with a different interface.
1870 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1871 struct vmw_sw_context *sw_context,
1872 struct vmw_resource_val_node *val_node,
1874 unsigned long backup_offset)
1876 struct vmw_dma_buffer *dma_buf;
1879 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1883 val_node->switching_backup = true;
1884 if (val_node->first_usage)
1885 val_node->no_buffer_needed = true;
1887 vmw_dmabuf_unreference(&val_node->new_backup);
1888 val_node->new_backup = dma_buf;
1889 val_node->new_backup_offset = backup_offset;
1896 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1898 * @dev_priv: Pointer to a device private struct.
1899 * @sw_context: The software context being used for this batch.
1900 * @res_type: The resource type.
1901 * @converter: Information about user-space binding for this resource type.
1902 * @res_id: Pointer to the user-space resource handle in the command stream.
1903 * @buf_id: Pointer to the user-space backup buffer handle in the command
1905 * @backup_offset: Offset of backup into MOB.
1907 * This function prepares for registering a switch of backup buffers
1908 * in the resource metadata just prior to unreserving. It's basically a wrapper
1909 * around vmw_cmd_res_switch_backup with a different interface.
1911 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1912 struct vmw_sw_context *sw_context,
1913 enum vmw_res_type res_type,
1914 const struct vmw_user_resource_conv
1918 unsigned long backup_offset)
1920 struct vmw_resource_val_node *val_node;
1923 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1924 converter, res_id, &val_node);
1928 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1929 buf_id, backup_offset);
1933 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936 * @dev_priv: Pointer to a device private struct.
1937 * @sw_context: The software context being used for this batch.
1938 * @header: Pointer to the command header in the command stream.
1940 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1941 struct vmw_sw_context *sw_context,
1942 SVGA3dCmdHeader *header)
1944 struct vmw_bind_gb_surface_cmd {
1945 SVGA3dCmdHeader header;
1946 SVGA3dCmdBindGBSurface body;
1949 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1951 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1952 user_surface_converter,
1953 &cmd->body.sid, &cmd->body.mobid,
1958 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961 * @dev_priv: Pointer to a device private struct.
1962 * @sw_context: The software context being used for this batch.
1963 * @header: Pointer to the command header in the command stream.
1965 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1966 struct vmw_sw_context *sw_context,
1967 SVGA3dCmdHeader *header)
1969 struct vmw_gb_surface_cmd {
1970 SVGA3dCmdHeader header;
1971 SVGA3dCmdUpdateGBImage body;
1974 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1976 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1977 user_surface_converter,
1978 &cmd->body.image.sid, NULL);
1982 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1989 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1990 struct vmw_sw_context *sw_context,
1991 SVGA3dCmdHeader *header)
1993 struct vmw_gb_surface_cmd {
1994 SVGA3dCmdHeader header;
1995 SVGA3dCmdUpdateGBSurface body;
1998 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2000 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2001 user_surface_converter,
2002 &cmd->body.sid, NULL);
2006 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009 * @dev_priv: Pointer to a device private struct.
2010 * @sw_context: The software context being used for this batch.
2011 * @header: Pointer to the command header in the command stream.
2013 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2014 struct vmw_sw_context *sw_context,
2015 SVGA3dCmdHeader *header)
2017 struct vmw_gb_surface_cmd {
2018 SVGA3dCmdHeader header;
2019 SVGA3dCmdReadbackGBImage body;
2022 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2024 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2025 user_surface_converter,
2026 &cmd->body.image.sid, NULL);
2030 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033 * @dev_priv: Pointer to a device private struct.
2034 * @sw_context: The software context being used for this batch.
2035 * @header: Pointer to the command header in the command stream.
2037 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2038 struct vmw_sw_context *sw_context,
2039 SVGA3dCmdHeader *header)
2041 struct vmw_gb_surface_cmd {
2042 SVGA3dCmdHeader header;
2043 SVGA3dCmdReadbackGBSurface body;
2046 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2048 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2049 user_surface_converter,
2050 &cmd->body.sid, NULL);
2054 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057 * @dev_priv: Pointer to a device private struct.
2058 * @sw_context: The software context being used for this batch.
2059 * @header: Pointer to the command header in the command stream.
2061 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2062 struct vmw_sw_context *sw_context,
2063 SVGA3dCmdHeader *header)
2065 struct vmw_gb_surface_cmd {
2066 SVGA3dCmdHeader header;
2067 SVGA3dCmdInvalidateGBImage body;
2070 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2072 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2073 user_surface_converter,
2074 &cmd->body.image.sid, NULL);
2078 * vmw_cmd_invalidate_gb_surface - Validate an
2079 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2081 * @dev_priv: Pointer to a device private struct.
2082 * @sw_context: The software context being used for this batch.
2083 * @header: Pointer to the command header in the command stream.
2085 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2086 struct vmw_sw_context *sw_context,
2087 SVGA3dCmdHeader *header)
2089 struct vmw_gb_surface_cmd {
2090 SVGA3dCmdHeader header;
2091 SVGA3dCmdInvalidateGBSurface body;
2094 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2096 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2097 user_surface_converter,
2098 &cmd->body.sid, NULL);
2103 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106 * @dev_priv: Pointer to a device private struct.
2107 * @sw_context: The software context being used for this batch.
2108 * @header: Pointer to the command header in the command stream.
2110 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2111 struct vmw_sw_context *sw_context,
2112 SVGA3dCmdHeader *header)
2114 struct vmw_shader_define_cmd {
2115 SVGA3dCmdHeader header;
2116 SVGA3dCmdDefineShader body;
2120 struct vmw_resource_val_node *val;
2122 cmd = container_of(header, struct vmw_shader_define_cmd,
2125 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2126 user_context_converter, &cmd->body.cid,
2128 if (unlikely(ret != 0))
2131 if (unlikely(!dev_priv->has_mob))
2134 size = cmd->header.size - sizeof(cmd->body);
2135 ret = vmw_compat_shader_add(dev_priv,
2136 vmw_context_res_man(val->res),
2137 cmd->body.shid, cmd + 1,
2138 cmd->body.type, size,
2139 &sw_context->staged_cmd_res);
2140 if (unlikely(ret != 0))
2143 return vmw_resource_relocation_add(&sw_context->res_relocations,
2144 NULL, &cmd->header.id -
2145 sw_context->buf_start);
2151 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154 * @dev_priv: Pointer to a device private struct.
2155 * @sw_context: The software context being used for this batch.
2156 * @header: Pointer to the command header in the command stream.
2158 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2159 struct vmw_sw_context *sw_context,
2160 SVGA3dCmdHeader *header)
2162 struct vmw_shader_destroy_cmd {
2163 SVGA3dCmdHeader header;
2164 SVGA3dCmdDestroyShader body;
2167 struct vmw_resource_val_node *val;
2169 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2173 user_context_converter, &cmd->body.cid,
2175 if (unlikely(ret != 0))
2178 if (unlikely(!dev_priv->has_mob))
2181 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184 &sw_context->staged_cmd_res);
2185 if (unlikely(ret != 0))
2188 return vmw_resource_relocation_add(&sw_context->res_relocations,
2189 NULL, &cmd->header.id -
2190 sw_context->buf_start);
2196 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199 * @dev_priv: Pointer to a device private struct.
2200 * @sw_context: The software context being used for this batch.
2201 * @header: Pointer to the command header in the command stream.
2203 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2204 struct vmw_sw_context *sw_context,
2205 SVGA3dCmdHeader *header)
2207 struct vmw_set_shader_cmd {
2208 SVGA3dCmdHeader header;
2209 SVGA3dCmdSetShader body;
2211 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2212 struct vmw_ctx_bindinfo_shader binding;
2213 struct vmw_resource *res = NULL;
2216 cmd = container_of(header, struct vmw_set_shader_cmd,
2219 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2220 DRM_ERROR("Illegal shader type %u.\n",
2221 (unsigned) cmd->body.type);
2225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226 user_context_converter, &cmd->body.cid,
2228 if (unlikely(ret != 0))
2231 if (!dev_priv->has_mob)
2234 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2235 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2240 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2241 &cmd->body.shid, res,
2243 vmw_resource_unreference(&res);
2244 if (unlikely(ret != 0))
2250 ret = vmw_cmd_res_check(dev_priv, sw_context,
2252 user_shader_converter,
2253 &cmd->body.shid, &res_node);
2254 if (unlikely(ret != 0))
2258 binding.bi.ctx = ctx_node->res;
2259 binding.bi.res = res_node ? res_node->res : NULL;
2260 binding.bi.bt = vmw_ctx_binding_shader;
2261 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2262 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2263 binding.shader_slot, 0);
2268 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271 * @dev_priv: Pointer to a device private struct.
2272 * @sw_context: The software context being used for this batch.
2273 * @header: Pointer to the command header in the command stream.
2275 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2276 struct vmw_sw_context *sw_context,
2277 SVGA3dCmdHeader *header)
2279 struct vmw_set_shader_const_cmd {
2280 SVGA3dCmdHeader header;
2281 SVGA3dCmdSetShaderConst body;
2285 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2289 user_context_converter, &cmd->body.cid,
2291 if (unlikely(ret != 0))
2294 if (dev_priv->has_mob)
2295 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2301 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304 * @dev_priv: Pointer to a device private struct.
2305 * @sw_context: The software context being used for this batch.
2306 * @header: Pointer to the command header in the command stream.
2308 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2309 struct vmw_sw_context *sw_context,
2310 SVGA3dCmdHeader *header)
2312 struct vmw_bind_gb_shader_cmd {
2313 SVGA3dCmdHeader header;
2314 SVGA3dCmdBindGBShader body;
2317 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2321 user_shader_converter,
2322 &cmd->body.shid, &cmd->body.mobid,
2323 cmd->body.offsetInBytes);
2327 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2328 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2330 * @dev_priv: Pointer to a device private struct.
2331 * @sw_context: The software context being used for this batch.
2332 * @header: Pointer to the command header in the command stream.
2335 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2336 struct vmw_sw_context *sw_context,
2337 SVGA3dCmdHeader *header)
2340 SVGA3dCmdHeader header;
2341 SVGA3dCmdDXSetSingleConstantBuffer body;
2343 struct vmw_resource_val_node *res_node = NULL;
2344 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2345 struct vmw_ctx_bindinfo_cb binding;
2348 if (unlikely(ctx_node == NULL)) {
2349 DRM_ERROR("DX Context not set.\n");
2353 cmd = container_of(header, typeof(*cmd), header);
2354 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2355 user_surface_converter,
2356 &cmd->body.sid, &res_node);
2357 if (unlikely(ret != 0))
2360 binding.bi.ctx = ctx_node->res;
2361 binding.bi.res = res_node ? res_node->res : NULL;
2362 binding.bi.bt = vmw_ctx_binding_cb;
2363 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2364 binding.offset = cmd->body.offsetInBytes;
2365 binding.size = cmd->body.sizeInBytes;
2366 binding.slot = cmd->body.slot;
2368 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2369 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2370 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2371 (unsigned) cmd->body.type,
2372 (unsigned) binding.slot);
2376 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2377 binding.shader_slot, binding.slot);
2383 * vmw_cmd_dx_set_shader_res - Validate an
2384 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2386 * @dev_priv: Pointer to a device private struct.
2387 * @sw_context: The software context being used for this batch.
2388 * @header: Pointer to the command header in the command stream.
2390 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2395 SVGA3dCmdHeader header;
2396 SVGA3dCmdDXSetShaderResources body;
2397 } *cmd = container_of(header, typeof(*cmd), header);
2398 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2399 sizeof(SVGA3dShaderResourceViewId);
2401 if ((u64) cmd->body.startView + (u64) num_sr_view >
2402 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2403 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2404 DRM_ERROR("Invalid shader binding.\n");
2408 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2410 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2411 (void *) &cmd[1], num_sr_view,
2412 cmd->body.startView);
2416 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419 * @dev_priv: Pointer to a device private struct.
2420 * @sw_context: The software context being used for this batch.
2421 * @header: Pointer to the command header in the command stream.
2423 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2424 struct vmw_sw_context *sw_context,
2425 SVGA3dCmdHeader *header)
2428 SVGA3dCmdHeader header;
2429 SVGA3dCmdDXSetShader body;
2431 struct vmw_resource *res = NULL;
2432 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2433 struct vmw_ctx_bindinfo_shader binding;
2436 if (unlikely(ctx_node == NULL)) {
2437 DRM_ERROR("DX Context not set.\n");
2441 cmd = container_of(header, typeof(*cmd), header);
2443 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2444 DRM_ERROR("Illegal shader type %u.\n",
2445 (unsigned) cmd->body.type);
2449 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2450 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2452 DRM_ERROR("Could not find shader for binding.\n");
2453 return PTR_ERR(res);
2456 ret = vmw_resource_val_add(sw_context, res, NULL);
2461 binding.bi.ctx = ctx_node->res;
2462 binding.bi.res = res;
2463 binding.bi.bt = vmw_ctx_binding_dx_shader;
2464 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2466 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2467 binding.shader_slot, 0);
2470 vmw_resource_unreference(&res);
2476 * vmw_cmd_dx_set_vertex_buffers - Validates an
2477 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2479 * @dev_priv: Pointer to a device private struct.
2480 * @sw_context: The software context being used for this batch.
2481 * @header: Pointer to the command header in the command stream.
2483 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2484 struct vmw_sw_context *sw_context,
2485 SVGA3dCmdHeader *header)
2487 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488 struct vmw_ctx_bindinfo_vb binding;
2489 struct vmw_resource_val_node *res_node;
2491 SVGA3dCmdHeader header;
2492 SVGA3dCmdDXSetVertexBuffers body;
2493 SVGA3dVertexBuffer buf[];
2497 if (unlikely(ctx_node == NULL)) {
2498 DRM_ERROR("DX Context not set.\n");
2502 cmd = container_of(header, typeof(*cmd), header);
2503 num = (cmd->header.size - sizeof(cmd->body)) /
2504 sizeof(SVGA3dVertexBuffer);
2505 if ((u64)num + (u64)cmd->body.startBuffer >
2506 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2507 DRM_ERROR("Invalid number of vertex buffers.\n");
2511 for (i = 0; i < num; i++) {
2512 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2513 user_surface_converter,
2514 &cmd->buf[i].sid, &res_node);
2515 if (unlikely(ret != 0))
2518 binding.bi.ctx = ctx_node->res;
2519 binding.bi.bt = vmw_ctx_binding_vb;
2520 binding.bi.res = ((res_node) ? res_node->res : NULL);
2521 binding.offset = cmd->buf[i].offset;
2522 binding.stride = cmd->buf[i].stride;
2523 binding.slot = i + cmd->body.startBuffer;
2525 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2533 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2534 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2536 * @dev_priv: Pointer to a device private struct.
2537 * @sw_context: The software context being used for this batch.
2538 * @header: Pointer to the command header in the command stream.
2540 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2541 struct vmw_sw_context *sw_context,
2542 SVGA3dCmdHeader *header)
2544 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2545 struct vmw_ctx_bindinfo_ib binding;
2546 struct vmw_resource_val_node *res_node;
2548 SVGA3dCmdHeader header;
2549 SVGA3dCmdDXSetIndexBuffer body;
2553 if (unlikely(ctx_node == NULL)) {
2554 DRM_ERROR("DX Context not set.\n");
2558 cmd = container_of(header, typeof(*cmd), header);
2559 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2560 user_surface_converter,
2561 &cmd->body.sid, &res_node);
2562 if (unlikely(ret != 0))
2565 binding.bi.ctx = ctx_node->res;
2566 binding.bi.res = ((res_node) ? res_node->res : NULL);
2567 binding.bi.bt = vmw_ctx_binding_ib;
2568 binding.offset = cmd->body.offset;
2569 binding.format = cmd->body.format;
2571 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2577 * vmw_cmd_dx_set_rendertarget - Validate an
2578 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2580 * @dev_priv: Pointer to a device private struct.
2581 * @sw_context: The software context being used for this batch.
2582 * @header: Pointer to the command header in the command stream.
2584 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2585 struct vmw_sw_context *sw_context,
2586 SVGA3dCmdHeader *header)
2589 SVGA3dCmdHeader header;
2590 SVGA3dCmdDXSetRenderTargets body;
2591 } *cmd = container_of(header, typeof(*cmd), header);
2593 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2594 sizeof(SVGA3dRenderTargetViewId);
2596 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2597 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2601 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2602 vmw_ctx_binding_ds, 0,
2603 &cmd->body.depthStencilViewId, 1, 0);
2607 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2608 vmw_ctx_binding_dx_rt, 0,
2609 (void *)&cmd[1], num_rt_view, 0);
2613 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2614 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2616 * @dev_priv: Pointer to a device private struct.
2617 * @sw_context: The software context being used for this batch.
2618 * @header: Pointer to the command header in the command stream.
2620 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2621 struct vmw_sw_context *sw_context,
2622 SVGA3dCmdHeader *header)
2625 SVGA3dCmdHeader header;
2626 SVGA3dCmdDXClearRenderTargetView body;
2627 } *cmd = container_of(header, typeof(*cmd), header);
2629 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2630 cmd->body.renderTargetViewId);
2634 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2635 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2637 * @dev_priv: Pointer to a device private struct.
2638 * @sw_context: The software context being used for this batch.
2639 * @header: Pointer to the command header in the command stream.
2641 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2642 struct vmw_sw_context *sw_context,
2643 SVGA3dCmdHeader *header)
2646 SVGA3dCmdHeader header;
2647 SVGA3dCmdDXClearDepthStencilView body;
2648 } *cmd = container_of(header, typeof(*cmd), header);
2650 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2651 cmd->body.depthStencilViewId);
2654 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2655 struct vmw_sw_context *sw_context,
2656 SVGA3dCmdHeader *header)
2658 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2659 struct vmw_resource_val_node *srf_node;
2660 struct vmw_resource *res;
2661 enum vmw_view_type view_type;
2664 * This is based on the fact that all affected define commands have
2665 * the same initial command body layout.
2668 SVGA3dCmdHeader header;
2673 if (unlikely(ctx_node == NULL)) {
2674 DRM_ERROR("DX Context not set.\n");
2678 view_type = vmw_view_cmd_to_type(header->id);
2679 cmd = container_of(header, typeof(*cmd), header);
2680 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2681 user_surface_converter,
2682 &cmd->sid, &srf_node);
2683 if (unlikely(ret != 0))
2686 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2687 ret = vmw_cotable_notify(res, cmd->defined_id);
2688 vmw_resource_unreference(&res);
2689 if (unlikely(ret != 0))
2692 return vmw_view_add(sw_context->man,
2698 header->size + sizeof(*header),
2699 &sw_context->staged_cmd_res);
2703 * vmw_cmd_dx_set_so_targets - Validate an
2704 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2706 * @dev_priv: Pointer to a device private struct.
2707 * @sw_context: The software context being used for this batch.
2708 * @header: Pointer to the command header in the command stream.
2710 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2714 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2715 struct vmw_ctx_bindinfo_so binding;
2716 struct vmw_resource_val_node *res_node;
2718 SVGA3dCmdHeader header;
2719 SVGA3dCmdDXSetSOTargets body;
2720 SVGA3dSoTarget targets[];
2724 if (unlikely(ctx_node == NULL)) {
2725 DRM_ERROR("DX Context not set.\n");
2729 cmd = container_of(header, typeof(*cmd), header);
2730 num = (cmd->header.size - sizeof(cmd->body)) /
2731 sizeof(SVGA3dSoTarget);
2733 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2734 DRM_ERROR("Invalid DX SO binding.\n");
2738 for (i = 0; i < num; i++) {
2739 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2740 user_surface_converter,
2741 &cmd->targets[i].sid, &res_node);
2742 if (unlikely(ret != 0))
2745 binding.bi.ctx = ctx_node->res;
2746 binding.bi.res = ((res_node) ? res_node->res : NULL);
2747 binding.bi.bt = vmw_ctx_binding_so,
2748 binding.offset = cmd->targets[i].offset;
2749 binding.size = cmd->targets[i].sizeInBytes;
2752 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2759 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2760 struct vmw_sw_context *sw_context,
2761 SVGA3dCmdHeader *header)
2763 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2764 struct vmw_resource *res;
2766 * This is based on the fact that all affected define commands have
2767 * the same initial command body layout.
2770 SVGA3dCmdHeader header;
2773 enum vmw_so_type so_type;
2776 if (unlikely(ctx_node == NULL)) {
2777 DRM_ERROR("DX Context not set.\n");
2781 so_type = vmw_so_cmd_to_type(header->id);
2782 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2783 cmd = container_of(header, typeof(*cmd), header);
2784 ret = vmw_cotable_notify(res, cmd->defined_id);
2785 vmw_resource_unreference(&res);
2791 * vmw_cmd_dx_check_subresource - Validate an
2792 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2794 * @dev_priv: Pointer to a device private struct.
2795 * @sw_context: The software context being used for this batch.
2796 * @header: Pointer to the command header in the command stream.
2798 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2799 struct vmw_sw_context *sw_context,
2800 SVGA3dCmdHeader *header)
2803 SVGA3dCmdHeader header;
2805 SVGA3dCmdDXReadbackSubResource r_body;
2806 SVGA3dCmdDXInvalidateSubResource i_body;
2807 SVGA3dCmdDXUpdateSubResource u_body;
2808 SVGA3dSurfaceId sid;
2812 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2813 offsetof(typeof(*cmd), sid));
2814 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2815 offsetof(typeof(*cmd), sid));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2817 offsetof(typeof(*cmd), sid));
2819 cmd = container_of(header, typeof(*cmd), header);
2821 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2822 user_surface_converter,
2826 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2827 struct vmw_sw_context *sw_context,
2828 SVGA3dCmdHeader *header)
2830 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2832 if (unlikely(ctx_node == NULL)) {
2833 DRM_ERROR("DX Context not set.\n");
2841 * vmw_cmd_dx_view_remove - validate a view remove command and
2842 * schedule the view resource for removal.
2844 * @dev_priv: Pointer to a device private struct.
2845 * @sw_context: The software context being used for this batch.
2846 * @header: Pointer to the command header in the command stream.
2848 * Check that the view exists, and if it was not created using this
2849 * command batch, make sure it's validated (present in the device) so that
2850 * the remove command will not confuse the device.
2852 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2853 struct vmw_sw_context *sw_context,
2854 SVGA3dCmdHeader *header)
2856 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2858 SVGA3dCmdHeader header;
2859 union vmw_view_destroy body;
2860 } *cmd = container_of(header, typeof(*cmd), header);
2861 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2862 struct vmw_resource *view;
2866 DRM_ERROR("DX Context not set.\n");
2870 ret = vmw_view_remove(sw_context->man,
2871 cmd->body.view_id, view_type,
2872 &sw_context->staged_cmd_res,
2878 * Add view to the validate list iff it was not created using this
2881 return vmw_view_res_val_add(sw_context, view);
2885 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2888 * @dev_priv: Pointer to a device private struct.
2889 * @sw_context: The software context being used for this batch.
2890 * @header: Pointer to the command header in the command stream.
2892 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2893 struct vmw_sw_context *sw_context,
2894 SVGA3dCmdHeader *header)
2896 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2897 struct vmw_resource *res;
2899 SVGA3dCmdHeader header;
2900 SVGA3dCmdDXDefineShader body;
2901 } *cmd = container_of(header, typeof(*cmd), header);
2905 DRM_ERROR("DX Context not set.\n");
2909 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2910 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2911 vmw_resource_unreference(&res);
2915 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2916 cmd->body.shaderId, cmd->body.type,
2917 &sw_context->staged_cmd_res);
2921 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2924 * @dev_priv: Pointer to a device private struct.
2925 * @sw_context: The software context being used for this batch.
2926 * @header: Pointer to the command header in the command stream.
2928 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2929 struct vmw_sw_context *sw_context,
2930 SVGA3dCmdHeader *header)
2932 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2934 SVGA3dCmdHeader header;
2935 SVGA3dCmdDXDestroyShader body;
2936 } *cmd = container_of(header, typeof(*cmd), header);
2940 DRM_ERROR("DX Context not set.\n");
2944 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2945 &sw_context->staged_cmd_res);
2947 DRM_ERROR("Could not find shader to remove.\n");
2953 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2956 * @dev_priv: Pointer to a device private struct.
2957 * @sw_context: The software context being used for this batch.
2958 * @header: Pointer to the command header in the command stream.
2960 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2964 struct vmw_resource_val_node *ctx_node;
2965 struct vmw_resource_val_node *res_node;
2966 struct vmw_resource *res;
2968 SVGA3dCmdHeader header;
2969 SVGA3dCmdDXBindShader body;
2970 } *cmd = container_of(header, typeof(*cmd), header);
2973 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2974 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2975 user_context_converter,
2976 &cmd->body.cid, &ctx_node);
2980 ctx_node = sw_context->dx_ctx_node;
2982 DRM_ERROR("DX Context not set.\n");
2987 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2990 DRM_ERROR("Could not find shader to bind.\n");
2991 return PTR_ERR(res);
2994 ret = vmw_resource_val_add(sw_context, res, &res_node);
2996 DRM_ERROR("Error creating resource validation node.\n");
3001 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3003 cmd->body.offsetInBytes);
3005 vmw_resource_unreference(&res);
3010 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3011 struct vmw_sw_context *sw_context,
3012 void *buf, uint32_t *size)
3014 uint32_t size_remaining = *size;
3017 cmd_id = ((uint32_t *)buf)[0];
3019 case SVGA_CMD_UPDATE:
3020 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3022 case SVGA_CMD_DEFINE_GMRFB:
3023 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3025 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3026 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3028 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3029 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3032 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3036 if (*size > size_remaining) {
3037 DRM_ERROR("Invalid SVGA command (size mismatch):"
3042 if (unlikely(!sw_context->kernel)) {
3043 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3047 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3048 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3053 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3054 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3055 false, false, false),
3056 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3057 false, false, false),
3058 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3059 true, false, false),
3060 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3061 true, false, false),
3062 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3063 true, false, false),
3064 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3065 false, false, false),
3066 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3067 false, false, false),
3068 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3069 true, false, false),
3070 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3071 true, false, false),
3072 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3073 true, false, false),
3074 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3075 &vmw_cmd_set_render_target_check, true, false, false),
3076 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3077 true, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3079 true, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3081 true, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3083 true, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3085 true, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3087 true, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3089 true, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3091 false, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3093 true, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3095 true, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3097 true, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3099 true, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3101 true, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3103 true, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3105 true, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3107 true, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3109 true, false, false),
3110 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3111 true, false, false),
3112 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3113 &vmw_cmd_blt_surf_screen_check, false, false, false),
3114 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3115 false, false, false),
3116 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3117 false, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3119 false, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3121 false, false, false),
3122 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3123 false, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3125 false, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3127 false, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3129 false, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3131 false, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3133 false, false, false),
3134 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3135 false, false, false),
3136 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3137 false, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3139 false, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3141 false, false, true),
3142 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3143 false, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3145 false, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3147 false, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3149 false, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3151 false, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3153 false, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3155 false, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3158 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3159 false, false, true),
3160 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3162 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3163 &vmw_cmd_update_gb_surface, true, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3165 &vmw_cmd_readback_gb_image, true, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3167 &vmw_cmd_readback_gb_surface, true, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3169 &vmw_cmd_invalidate_gb_image, true, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3171 &vmw_cmd_invalidate_gb_surface, true, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3173 false, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3179 false, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3183 false, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3186 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3187 false, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3189 false, false, false),
3190 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3192 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3194 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3196 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3198 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3199 false, false, true),
3200 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3201 false, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3207 false, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3211 false, false, true),
3212 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3213 false, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3215 false, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3217 false, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3220 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3227 false, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3241 false, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3243 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3245 &vmw_cmd_dx_set_shader_res, true, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3257 &vmw_cmd_dx_cid_check, true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3261 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3263 &vmw_cmd_dx_set_index_buffer, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3265 &vmw_cmd_dx_set_rendertargets, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3269 &vmw_cmd_dx_cid_check, true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3271 &vmw_cmd_dx_cid_check, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3279 &vmw_cmd_ok, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3293 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3295 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3301 &vmw_cmd_dx_check_subresource, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3303 &vmw_cmd_dx_check_subresource, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3305 &vmw_cmd_dx_check_subresource, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3307 &vmw_cmd_dx_view_define, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3309 &vmw_cmd_dx_view_remove, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3311 &vmw_cmd_dx_view_define, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3313 &vmw_cmd_dx_view_remove, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3315 &vmw_cmd_dx_view_define, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3317 &vmw_cmd_dx_view_remove, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3319 &vmw_cmd_dx_so_define, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3321 &vmw_cmd_dx_cid_check, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3323 &vmw_cmd_dx_so_define, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3325 &vmw_cmd_dx_cid_check, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3327 &vmw_cmd_dx_so_define, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3329 &vmw_cmd_dx_cid_check, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3331 &vmw_cmd_dx_so_define, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3333 &vmw_cmd_dx_cid_check, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3335 &vmw_cmd_dx_so_define, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3337 &vmw_cmd_dx_cid_check, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3339 &vmw_cmd_dx_define_shader, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3341 &vmw_cmd_dx_destroy_shader, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3343 &vmw_cmd_dx_bind_shader, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3345 &vmw_cmd_dx_so_define, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3347 &vmw_cmd_dx_cid_check, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3351 &vmw_cmd_dx_set_so_targets, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3353 &vmw_cmd_dx_cid_check, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3355 &vmw_cmd_dx_cid_check, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3357 &vmw_cmd_buffer_copy_check, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3359 &vmw_cmd_pred_copy_check, true, false, true),
3362 static int vmw_cmd_check(struct vmw_private *dev_priv,
3363 struct vmw_sw_context *sw_context,
3364 void *buf, uint32_t *size)
3367 uint32_t size_remaining = *size;
3368 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3370 const struct vmw_cmd_entry *entry;
3371 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3373 cmd_id = ((uint32_t *)buf)[0];
3374 /* Handle any none 3D commands */
3375 if (unlikely(cmd_id < SVGA_CMD_MAX))
3376 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3379 cmd_id = header->id;
3380 *size = header->size + sizeof(SVGA3dCmdHeader);
3382 cmd_id -= SVGA_3D_CMD_BASE;
3383 if (unlikely(*size > size_remaining))
3386 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3389 entry = &vmw_cmd_entries[cmd_id];
3390 if (unlikely(!entry->func))
3393 if (unlikely(!entry->user_allow && !sw_context->kernel))
3394 goto out_privileged;
3396 if (unlikely(entry->gb_disable && gb))
3399 if (unlikely(entry->gb_enable && !gb))
3402 ret = entry->func(dev_priv, sw_context, header);
3403 if (unlikely(ret != 0))
3408 DRM_ERROR("Invalid SVGA3D command: %d\n",
3409 cmd_id + SVGA_3D_CMD_BASE);
3412 DRM_ERROR("Privileged SVGA3D command: %d\n",
3413 cmd_id + SVGA_3D_CMD_BASE);
3416 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3417 cmd_id + SVGA_3D_CMD_BASE);
3420 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3421 cmd_id + SVGA_3D_CMD_BASE);
3425 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3426 struct vmw_sw_context *sw_context,
3430 int32_t cur_size = size;
3433 sw_context->buf_start = buf;
3435 while (cur_size > 0) {
3437 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3438 if (unlikely(ret != 0))
3440 buf = (void *)((unsigned long) buf + size);
3444 if (unlikely(cur_size != 0)) {
3445 DRM_ERROR("Command verifier out of sync.\n");
3452 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3454 sw_context->cur_reloc = 0;
3457 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3460 struct vmw_relocation *reloc;
3461 struct ttm_validate_buffer *validate;
3462 struct ttm_buffer_object *bo;
3464 for (i = 0; i < sw_context->cur_reloc; ++i) {
3465 reloc = &sw_context->relocs[i];
3466 validate = &sw_context->val_bufs[reloc->index].base;
3468 switch (bo->mem.mem_type) {
3470 reloc->location->offset += bo->offset;
3471 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3474 reloc->location->gmrId = bo->mem.start;
3477 *reloc->mob_loc = bo->mem.start;
3483 vmw_free_relocations(sw_context);
3487 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3488 * all resources referenced by it.
3490 * @list: The resource list.
3492 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3493 struct list_head *list)
3495 struct vmw_resource_val_node *val, *val_next;
3498 * Drop references to resources held during command submission.
3501 list_for_each_entry_safe(val, val_next, list, head) {
3502 list_del_init(&val->head);
3503 vmw_resource_unreference(&val->res);
3505 if (val->staged_bindings) {
3506 if (val->staged_bindings != sw_context->staged_bindings)
3507 vmw_binding_state_free(val->staged_bindings);
3509 sw_context->staged_bindings_inuse = false;
3510 val->staged_bindings = NULL;
3517 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3519 struct vmw_validate_buffer *entry, *next;
3520 struct vmw_resource_val_node *val;
3523 * Drop references to DMA buffers held during command submission.
3525 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3527 list_del(&entry->base.head);
3528 ttm_bo_unref(&entry->base.bo);
3529 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3530 sw_context->cur_val_buf--;
3532 BUG_ON(sw_context->cur_val_buf != 0);
3534 list_for_each_entry(val, &sw_context->resource_list, head)
3535 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3538 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3539 struct ttm_buffer_object *bo,
3541 bool validate_as_mob)
3543 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3547 if (vbo->pin_count > 0)
3550 if (validate_as_mob)
3551 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3555 * Put BO in VRAM if there is space, otherwise as a GMR.
3556 * If there is no space in VRAM and GMR ids are all used up,
3557 * start evicting GMRs to make room. If the DMA buffer can't be
3558 * used as a GMR, this will return -ENOMEM.
3561 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3563 if (likely(ret == 0 || ret == -ERESTARTSYS))
3567 * If that failed, try VRAM again, this time evicting
3568 * previous contents.
3571 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3575 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3576 struct vmw_sw_context *sw_context)
3578 struct vmw_validate_buffer *entry;
3581 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3582 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3584 entry->validate_as_mob);
3585 if (unlikely(ret != 0))
3591 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3594 if (likely(sw_context->cmd_bounce_size >= size))
3597 if (sw_context->cmd_bounce_size == 0)
3598 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3600 while (sw_context->cmd_bounce_size < size) {
3601 sw_context->cmd_bounce_size =
3602 PAGE_ALIGN(sw_context->cmd_bounce_size +
3603 (sw_context->cmd_bounce_size >> 1));
3606 if (sw_context->cmd_bounce != NULL)
3607 vfree(sw_context->cmd_bounce);
3609 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3611 if (sw_context->cmd_bounce == NULL) {
3612 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3613 sw_context->cmd_bounce_size = 0;
3621 * vmw_execbuf_fence_commands - create and submit a command stream fence
3623 * Creates a fence object and submits a command stream marker.
3624 * If this fails for some reason, We sync the fifo and return NULL.
3625 * It is then safe to fence buffers with a NULL pointer.
3627 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3628 * a userspace handle if @p_handle is not NULL, otherwise not.
3631 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3632 struct vmw_private *dev_priv,
3633 struct vmw_fence_obj **p_fence,
3638 bool synced = false;
3640 /* p_handle implies file_priv. */
3641 BUG_ON(p_handle != NULL && file_priv == NULL);
3643 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3644 if (unlikely(ret != 0)) {
3645 DRM_ERROR("Fence submission error. Syncing.\n");
3649 if (p_handle != NULL)
3650 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3651 sequence, p_fence, p_handle);
3653 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3655 if (unlikely(ret != 0 && !synced)) {
3656 (void) vmw_fallback_wait(dev_priv, false, false,
3658 VMW_FENCE_WAIT_TIMEOUT);
3666 * vmw_execbuf_copy_fence_user - copy fence object information to
3669 * @dev_priv: Pointer to a vmw_private struct.
3670 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3671 * @ret: Return value from fence object creation.
3672 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3673 * which the information should be copied.
3674 * @fence: Pointer to the fenc object.
3675 * @fence_handle: User-space fence handle.
3677 * This function copies fence information to user-space. If copying fails,
3678 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3679 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3680 * the error will hopefully be detected.
3681 * Also if copying fails, user-space will be unable to signal the fence
3682 * object so we wait for it immediately, and then unreference the
3683 * user-space reference.
3686 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3687 struct vmw_fpriv *vmw_fp,
3689 struct drm_vmw_fence_rep __user *user_fence_rep,
3690 struct vmw_fence_obj *fence,
3691 uint32_t fence_handle)
3693 struct drm_vmw_fence_rep fence_rep;
3695 if (user_fence_rep == NULL)
3698 memset(&fence_rep, 0, sizeof(fence_rep));
3700 fence_rep.error = ret;
3702 BUG_ON(fence == NULL);
3704 fence_rep.handle = fence_handle;
3705 fence_rep.seqno = fence->base.seqno;
3706 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3707 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3711 * copy_to_user errors will be detected by user space not
3712 * seeing fence_rep::error filled in. Typically
3713 * user-space would have pre-set that member to -EFAULT.
3715 ret = copy_to_user(user_fence_rep, &fence_rep,
3719 * User-space lost the fence object. We need to sync
3720 * and unreference the handle.
3722 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3723 ttm_ref_object_base_unref(vmw_fp->tfile,
3724 fence_handle, TTM_REF_USAGE);
3725 DRM_ERROR("Fence copy error. Syncing.\n");
3726 (void) vmw_fence_obj_wait(fence, false, false,
3727 VMW_FENCE_WAIT_TIMEOUT);
3732 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3735 * @dev_priv: Pointer to a device private structure.
3736 * @kernel_commands: Pointer to the unpatched command batch.
3737 * @command_size: Size of the unpatched command batch.
3738 * @sw_context: Structure holding the relocation lists.
3740 * Side effects: If this function returns 0, then the command batch
3741 * pointed to by @kernel_commands will have been modified.
3743 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3744 void *kernel_commands,
3746 struct vmw_sw_context *sw_context)
3750 if (sw_context->dx_ctx_node)
3751 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3752 sw_context->dx_ctx_node->res->id);
3754 cmd = vmw_fifo_reserve(dev_priv, command_size);
3756 DRM_ERROR("Failed reserving fifo space for commands.\n");
3760 vmw_apply_relocations(sw_context);
3761 memcpy(cmd, kernel_commands, command_size);
3762 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3763 vmw_resource_relocations_free(&sw_context->res_relocations);
3764 vmw_fifo_commit(dev_priv, command_size);
3770 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3771 * the command buffer manager.
3773 * @dev_priv: Pointer to a device private structure.
3774 * @header: Opaque handle to the command buffer allocation.
3775 * @command_size: Size of the unpatched command batch.
3776 * @sw_context: Structure holding the relocation lists.
3778 * Side effects: If this function returns 0, then the command buffer
3779 * represented by @header will have been modified.
3781 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3782 struct vmw_cmdbuf_header *header,
3784 struct vmw_sw_context *sw_context)
3786 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3788 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3791 vmw_apply_relocations(sw_context);
3792 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3793 vmw_resource_relocations_free(&sw_context->res_relocations);
3794 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3800 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3801 * submission using a command buffer.
3803 * @dev_priv: Pointer to a device private structure.
3804 * @user_commands: User-space pointer to the commands to be submitted.
3805 * @command_size: Size of the unpatched command batch.
3806 * @header: Out parameter returning the opaque pointer to the command buffer.
3808 * This function checks whether we can use the command buffer manager for
3809 * submission and if so, creates a command buffer of suitable size and
3810 * copies the user data into that buffer.
3812 * On successful return, the function returns a pointer to the data in the
3813 * command buffer and *@header is set to non-NULL.
3814 * If command buffers could not be used, the function will return the value
3815 * of @kernel_commands on function call. That value may be NULL. In that case,
3816 * the value of *@header will be set to NULL.
3817 * If an error is encountered, the function will return a pointer error value.
3818 * If the function is interrupted by a signal while sleeping, it will return
3819 * -ERESTARTSYS casted to a pointer error value.
3821 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3822 void __user *user_commands,
3823 void *kernel_commands,
3825 struct vmw_cmdbuf_header **header)
3831 if (!dev_priv->cman || kernel_commands)
3832 return kernel_commands;
3834 if (command_size > SVGA_CB_MAX_SIZE) {
3835 DRM_ERROR("Command buffer is too large.\n");
3836 return ERR_PTR(-EINVAL);
3839 /* If possible, add a little space for fencing. */
3840 cmdbuf_size = command_size + 512;
3841 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3842 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3844 if (IS_ERR(kernel_commands))
3845 return kernel_commands;
3847 ret = copy_from_user(kernel_commands, user_commands,
3850 DRM_ERROR("Failed copying commands.\n");
3851 vmw_cmdbuf_header_free(*header);
3853 return ERR_PTR(-EFAULT);
3856 return kernel_commands;
3859 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3860 struct vmw_sw_context *sw_context,
3863 struct vmw_resource_val_node *ctx_node;
3864 struct vmw_resource *res;
3867 if (handle == SVGA3D_INVALID_ID)
3870 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3871 handle, user_context_converter,
3873 if (unlikely(ret != 0)) {
3874 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3879 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3880 if (unlikely(ret != 0))
3883 sw_context->dx_ctx_node = ctx_node;
3884 sw_context->man = vmw_context_res_man(res);
3886 vmw_resource_unreference(&res);
3890 int vmw_execbuf_process(struct drm_file *file_priv,
3891 struct vmw_private *dev_priv,
3892 void __user *user_commands,
3893 void *kernel_commands,
3894 uint32_t command_size,
3895 uint64_t throttle_us,
3896 uint32_t dx_context_handle,
3897 struct drm_vmw_fence_rep __user *user_fence_rep,
3898 struct vmw_fence_obj **out_fence)
3900 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3901 struct vmw_fence_obj *fence = NULL;
3902 struct vmw_resource *error_resource;
3903 struct list_head resource_list;
3904 struct vmw_cmdbuf_header *header;
3905 struct ww_acquire_ctx ticket;
3910 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3917 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3918 kernel_commands, command_size,
3920 if (IS_ERR(kernel_commands))
3921 return PTR_ERR(kernel_commands);
3923 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3926 goto out_free_header;
3929 sw_context->kernel = false;
3930 if (kernel_commands == NULL) {
3931 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3932 if (unlikely(ret != 0))
3936 ret = copy_from_user(sw_context->cmd_bounce,
3937 user_commands, command_size);
3939 if (unlikely(ret != 0)) {
3941 DRM_ERROR("Failed copying commands.\n");
3944 kernel_commands = sw_context->cmd_bounce;
3946 sw_context->kernel = true;
3948 sw_context->fp = vmw_fpriv(file_priv);
3949 sw_context->cur_reloc = 0;
3950 sw_context->cur_val_buf = 0;
3951 INIT_LIST_HEAD(&sw_context->resource_list);
3952 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3953 sw_context->cur_query_bo = dev_priv->pinned_bo;
3954 sw_context->last_query_ctx = NULL;
3955 sw_context->needs_post_query_barrier = false;
3956 sw_context->dx_ctx_node = NULL;
3957 sw_context->dx_query_mob = NULL;
3958 sw_context->dx_query_ctx = NULL;
3959 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3960 INIT_LIST_HEAD(&sw_context->validate_nodes);
3961 INIT_LIST_HEAD(&sw_context->res_relocations);
3962 if (sw_context->staged_bindings)
3963 vmw_binding_state_reset(sw_context->staged_bindings);
3965 if (!sw_context->res_ht_initialized) {
3966 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3967 if (unlikely(ret != 0))
3969 sw_context->res_ht_initialized = true;
3971 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3972 INIT_LIST_HEAD(&resource_list);
3973 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3974 if (unlikely(ret != 0)) {
3975 list_splice_init(&sw_context->ctx_resource_list,
3976 &sw_context->resource_list);
3980 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3983 * Merge the resource lists before checking the return status
3984 * from vmd_cmd_check_all so that all the open hashtabs will
3985 * be handled properly even if vmw_cmd_check_all fails.
3987 list_splice_init(&sw_context->ctx_resource_list,
3988 &sw_context->resource_list);
3990 if (unlikely(ret != 0))
3993 ret = vmw_resources_reserve(sw_context);
3994 if (unlikely(ret != 0))
3997 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
3999 if (unlikely(ret != 0))
4002 ret = vmw_validate_buffers(dev_priv, sw_context);
4003 if (unlikely(ret != 0))
4006 ret = vmw_resources_validate(sw_context);
4007 if (unlikely(ret != 0))
4010 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4011 if (unlikely(ret != 0)) {
4016 if (dev_priv->has_mob) {
4017 ret = vmw_rebind_contexts(sw_context);
4018 if (unlikely(ret != 0))
4019 goto out_unlock_binding;
4023 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4024 command_size, sw_context);
4026 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4030 mutex_unlock(&dev_priv->binding_mutex);
4034 vmw_query_bo_switch_commit(dev_priv, sw_context);
4035 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4037 (user_fence_rep) ? &handle : NULL);
4039 * This error is harmless, because if fence submission fails,
4040 * vmw_fifo_send_fence will sync. The error will be propagated to
4041 * user-space in @fence_rep
4045 DRM_ERROR("Fence submission error. Syncing.\n");
4047 vmw_resources_unreserve(sw_context, false);
4049 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4052 if (unlikely(dev_priv->pinned_bo != NULL &&
4053 !dev_priv->query_cid_valid))
4054 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4056 vmw_clear_validations(sw_context);
4057 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4058 user_fence_rep, fence, handle);
4060 /* Don't unreference when handing fence out */
4061 if (unlikely(out_fence != NULL)) {
4064 } else if (likely(fence != NULL)) {
4065 vmw_fence_obj_unreference(&fence);
4068 list_splice_init(&sw_context->resource_list, &resource_list);
4069 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4070 mutex_unlock(&dev_priv->cmdbuf_mutex);
4073 * Unreference resources outside of the cmdbuf_mutex to
4074 * avoid deadlocks in resource destruction paths.
4076 vmw_resource_list_unreference(sw_context, &resource_list);
4081 mutex_unlock(&dev_priv->binding_mutex);
4083 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4085 vmw_resources_unreserve(sw_context, true);
4086 vmw_resource_relocations_free(&sw_context->res_relocations);
4087 vmw_free_relocations(sw_context);
4088 vmw_clear_validations(sw_context);
4089 if (unlikely(dev_priv->pinned_bo != NULL &&
4090 !dev_priv->query_cid_valid))
4091 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4093 list_splice_init(&sw_context->resource_list, &resource_list);
4094 error_resource = sw_context->error_resource;
4095 sw_context->error_resource = NULL;
4096 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4097 mutex_unlock(&dev_priv->cmdbuf_mutex);
4100 * Unreference resources outside of the cmdbuf_mutex to
4101 * avoid deadlocks in resource destruction paths.
4103 vmw_resource_list_unreference(sw_context, &resource_list);
4104 if (unlikely(error_resource != NULL))
4105 vmw_resource_unreference(&error_resource);
4108 vmw_cmdbuf_header_free(header);
4114 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4116 * @dev_priv: The device private structure.
4118 * This function is called to idle the fifo and unpin the query buffer
4119 * if the normal way to do this hits an error, which should typically be
4122 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4124 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4126 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4127 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4128 if (dev_priv->dummy_query_bo_pinned) {
4129 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4130 dev_priv->dummy_query_bo_pinned = false;
4136 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4139 * @dev_priv: The device private structure.
4140 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4141 * _after_ a query barrier that flushes all queries touching the current
4142 * buffer pointed to by @dev_priv->pinned_bo
4144 * This function should be used to unpin the pinned query bo, or
4145 * as a query barrier when we need to make sure that all queries have
4146 * finished before the next fifo command. (For example on hardware
4147 * context destructions where the hardware may otherwise leak unfinished
4150 * This function does not return any failure codes, but make attempts
4151 * to do safe unpinning in case of errors.
4153 * The function will synchronize on the previous query barrier, and will
4154 * thus not finish until that barrier has executed.
4156 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4157 * before calling this function.
4159 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4160 struct vmw_fence_obj *fence)
4163 struct list_head validate_list;
4164 struct ttm_validate_buffer pinned_val, query_val;
4165 struct vmw_fence_obj *lfence = NULL;
4166 struct ww_acquire_ctx ticket;
4168 if (dev_priv->pinned_bo == NULL)
4171 INIT_LIST_HEAD(&validate_list);
4173 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4174 pinned_val.shared = false;
4175 list_add_tail(&pinned_val.head, &validate_list);
4177 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4178 query_val.shared = false;
4179 list_add_tail(&query_val.head, &validate_list);
4181 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4183 if (unlikely(ret != 0)) {
4184 vmw_execbuf_unpin_panic(dev_priv);
4185 goto out_no_reserve;
4188 if (dev_priv->query_cid_valid) {
4189 BUG_ON(fence != NULL);
4190 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4191 if (unlikely(ret != 0)) {
4192 vmw_execbuf_unpin_panic(dev_priv);
4195 dev_priv->query_cid_valid = false;
4198 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4199 if (dev_priv->dummy_query_bo_pinned) {
4200 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4201 dev_priv->dummy_query_bo_pinned = false;
4203 if (fence == NULL) {
4204 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4208 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4210 vmw_fence_obj_unreference(&lfence);
4212 ttm_bo_unref(&query_val.bo);
4213 ttm_bo_unref(&pinned_val.bo);
4214 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4215 DRM_INFO("Dummy query bo pin count: %d\n",
4216 dev_priv->dummy_query_bo->pin_count);
4222 ttm_eu_backoff_reservation(&ticket, &validate_list);
4224 ttm_bo_unref(&query_val.bo);
4225 ttm_bo_unref(&pinned_val.bo);
4226 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4230 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4233 * @dev_priv: The device private structure.
4235 * This function should be used to unpin the pinned query bo, or
4236 * as a query barrier when we need to make sure that all queries have
4237 * finished before the next fifo command. (For example on hardware
4238 * context destructions where the hardware may otherwise leak unfinished
4241 * This function does not return any failure codes, but make attempts
4242 * to do safe unpinning in case of errors.
4244 * The function will synchronize on the previous query barrier, and will
4245 * thus not finish until that barrier has executed.
4247 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4249 mutex_lock(&dev_priv->cmdbuf_mutex);
4250 if (dev_priv->query_cid_valid)
4251 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4252 mutex_unlock(&dev_priv->cmdbuf_mutex);
4255 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4256 struct drm_file *file_priv, size_t size)
4258 struct vmw_private *dev_priv = vmw_priv(dev);
4259 struct drm_vmw_execbuf_arg arg;
4261 static const size_t copy_offset[] = {
4262 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4263 sizeof(struct drm_vmw_execbuf_arg)};
4265 if (unlikely(size < copy_offset[0])) {
4266 DRM_ERROR("Invalid command size, ioctl %d\n",
4271 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4275 * Extend the ioctl argument while
4276 * maintaining backwards compatibility:
4277 * We take different code paths depending on the value of
4281 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4282 arg.version == 0)) {
4283 DRM_ERROR("Incorrect execbuf version.\n");
4287 if (arg.version > 1 &&
4288 copy_from_user(&arg.context_handle,
4289 (void __user *) (data + copy_offset[0]),
4290 copy_offset[arg.version - 1] -
4291 copy_offset[0]) != 0)
4294 switch (arg.version) {
4296 arg.context_handle = (uint32_t) -1;
4299 if (arg.pad64 != 0) {
4300 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4308 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4309 if (unlikely(ret != 0))
4312 ret = vmw_execbuf_process(file_priv, dev_priv,
4313 (void __user *)(unsigned long)arg.commands,
4314 NULL, arg.command_size, arg.throttle_us,
4316 (void __user *)(unsigned long)arg.fence_rep,
4318 ttm_read_unlock(&dev_priv->reservation_sem);
4319 if (unlikely(ret != 0))
4322 vmw_kms_cursor_post_execbuf(dev_priv);