1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_EVICT_ERR_COUNT 10
38 struct vmw_user_dma_buffer {
39 struct ttm_prime_object prime;
40 struct vmw_dma_buffer dma;
43 struct vmw_bo_user_rep {
49 struct vmw_resource res;
53 struct vmw_user_stream {
54 struct ttm_base_object base;
55 struct vmw_stream stream;
59 static uint64_t vmw_user_stream_size;
61 static const struct vmw_res_func vmw_stream_func = {
62 .res_type = vmw_res_stream,
63 .needs_backup = false,
65 .type_name = "video streams",
66 .backup_placement = NULL,
73 static inline struct vmw_dma_buffer *
74 vmw_dma_buffer(struct ttm_buffer_object *bo)
76 return container_of(bo, struct vmw_dma_buffer, base);
79 static inline struct vmw_user_dma_buffer *
80 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
82 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
86 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
93 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
95 return kref_get_unless_zero(&res->kref) ? res : NULL;
99 * vmw_resource_release_id - release a resource id to the id manager.
101 * @res: Pointer to the resource.
103 * Release the resource id to the resource id manager and set it to -1
105 void vmw_resource_release_id(struct vmw_resource *res)
107 struct vmw_private *dev_priv = res->dev_priv;
108 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
110 write_lock(&dev_priv->resource_lock);
112 idr_remove(idr, res->id);
114 write_unlock(&dev_priv->resource_lock);
117 static void vmw_resource_release(struct kref *kref)
119 struct vmw_resource *res =
120 container_of(kref, struct vmw_resource, kref);
121 struct vmw_private *dev_priv = res->dev_priv;
123 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
125 write_lock(&dev_priv->resource_lock);
127 list_del_init(&res->lru_head);
128 write_unlock(&dev_priv->resource_lock);
130 struct ttm_buffer_object *bo = &res->backup->base;
132 ttm_bo_reserve(bo, false, false, false, NULL);
133 if (!list_empty(&res->mob_head) &&
134 res->func->unbind != NULL) {
135 struct ttm_validate_buffer val_buf;
138 val_buf.shared = false;
139 res->func->unbind(res, false, &val_buf);
141 res->backup_dirty = false;
142 list_del_init(&res->mob_head);
143 ttm_bo_unreserve(bo);
144 vmw_dmabuf_unreference(&res->backup);
147 if (likely(res->hw_destroy != NULL)) {
148 mutex_lock(&dev_priv->binding_mutex);
149 vmw_binding_res_list_kill(&res->binding_head);
150 mutex_unlock(&dev_priv->binding_mutex);
151 res->hw_destroy(res);
155 if (res->res_free != NULL)
160 write_lock(&dev_priv->resource_lock);
163 write_unlock(&dev_priv->resource_lock);
166 void vmw_resource_unreference(struct vmw_resource **p_res)
168 struct vmw_resource *res = *p_res;
171 kref_put(&res->kref, vmw_resource_release);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource *res)
185 struct vmw_private *dev_priv = res->dev_priv;
187 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
189 BUG_ON(res->id != -1);
191 idr_preload(GFP_KERNEL);
192 write_lock(&dev_priv->resource_lock);
194 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
198 write_unlock(&dev_priv->resource_lock);
200 return ret < 0 ? ret : 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @obj_type: Resource object type.
209 * @delay_id: Boolean whether to defer device id allocation until
210 * the first validation.
211 * @res_free: Resource destructor.
212 * @func: Resource function table.
214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
216 void (*res_free) (struct vmw_resource *res),
217 const struct vmw_res_func *func)
219 kref_init(&res->kref);
220 res->hw_destroy = NULL;
221 res->res_free = res_free;
223 res->dev_priv = dev_priv;
225 INIT_LIST_HEAD(&res->lru_head);
226 INIT_LIST_HEAD(&res->mob_head);
227 INIT_LIST_HEAD(&res->binding_head);
230 res->backup_offset = 0;
231 res->backup_dirty = false;
232 res->res_dirty = false;
236 return vmw_resource_alloc_id(res);
240 * vmw_resource_activate
242 * @res: Pointer to the newly created resource
243 * @hw_destroy: Destroy function. NULL if none.
245 * Activate a resource after the hardware has been made aware of it.
246 * Set tye destroy function to @destroy. Typically this frees the
247 * resource and destroys the hardware resources associated with it.
248 * Activate basically means that the function vmw_resource_lookup will
251 void vmw_resource_activate(struct vmw_resource *res,
252 void (*hw_destroy) (struct vmw_resource *))
254 struct vmw_private *dev_priv = res->dev_priv;
256 write_lock(&dev_priv->resource_lock);
258 res->hw_destroy = hw_destroy;
259 write_unlock(&dev_priv->resource_lock);
262 static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 struct idr *idr, int id)
265 struct vmw_resource *res;
267 read_lock(&dev_priv->resource_lock);
268 res = idr_find(idr, id);
269 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
272 read_unlock(&dev_priv->resource_lock);
274 if (unlikely(res == NULL))
281 * vmw_user_resource_lookup_handle - lookup a struct resource from a
282 * TTM user-space handle and perform basic type checks
284 * @dev_priv: Pointer to a device private struct
285 * @tfile: Pointer to a struct ttm_object_file identifying the caller
286 * @handle: The TTM user-space handle
287 * @converter: Pointer to an object describing the resource type
288 * @p_res: On successful return the location pointed to will contain
289 * a pointer to a refcounted struct vmw_resource.
291 * If the handle can't be found or is associated with an incorrect resource
292 * type, -EINVAL will be returned.
294 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
295 struct ttm_object_file *tfile,
297 const struct vmw_user_resource_conv
299 struct vmw_resource **p_res)
301 struct ttm_base_object *base;
302 struct vmw_resource *res;
305 base = ttm_base_object_lookup(tfile, handle);
306 if (unlikely(base == NULL))
309 if (unlikely(ttm_base_object_type(base) != converter->object_type))
310 goto out_bad_resource;
312 res = converter->base_obj_to_res(base);
314 read_lock(&dev_priv->resource_lock);
315 if (!res->avail || res->res_free != converter->res_free) {
316 read_unlock(&dev_priv->resource_lock);
317 goto out_bad_resource;
320 kref_get(&res->kref);
321 read_unlock(&dev_priv->resource_lock);
327 ttm_base_object_unref(&base);
333 * Helper function that looks either a surface or dmabuf.
335 * The pointer this pointed at by out_surf and out_buf needs to be null.
337 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
338 struct ttm_object_file *tfile,
340 struct vmw_surface **out_surf,
341 struct vmw_dma_buffer **out_buf)
343 struct vmw_resource *res;
346 BUG_ON(*out_surf || *out_buf);
348 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
349 user_surface_converter,
352 *out_surf = vmw_res_to_srf(res);
357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
366 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
368 * @dev_priv: Pointer to a struct vmw_private identifying the device.
369 * @size: The requested buffer size.
370 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
372 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
375 static size_t struct_size, user_struct_size;
376 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
377 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
379 if (unlikely(struct_size == 0)) {
380 size_t backend_size = ttm_round_pot(vmw_tt_size);
382 struct_size = backend_size +
383 ttm_round_pot(sizeof(struct vmw_dma_buffer));
384 user_struct_size = backend_size +
385 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
388 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
390 ttm_round_pot(num_pages * sizeof(dma_addr_t));
392 return ((user) ? user_struct_size : struct_size) +
396 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
398 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
403 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
405 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
407 ttm_prime_object_kfree(vmw_user_bo, prime);
410 int vmw_dmabuf_init(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *vmw_bo,
412 size_t size, struct ttm_placement *placement,
414 void (*bo_free) (struct ttm_buffer_object *bo))
416 struct ttm_bo_device *bdev = &dev_priv->bdev;
419 bool user = (bo_free == &vmw_user_dmabuf_destroy);
421 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
423 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
424 memset(vmw_bo, 0, sizeof(*vmw_bo));
426 INIT_LIST_HEAD(&vmw_bo->res_list);
428 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
429 ttm_bo_type_device, placement,
431 NULL, acc_size, NULL, NULL, bo_free);
435 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
437 struct vmw_user_dma_buffer *vmw_user_bo;
438 struct ttm_base_object *base = *p_base;
439 struct ttm_buffer_object *bo;
443 if (unlikely(base == NULL))
446 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
448 bo = &vmw_user_bo->dma.base;
452 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
453 enum ttm_ref_type ref_type)
455 struct vmw_user_dma_buffer *user_bo;
456 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
459 case TTM_REF_SYNCCPU_WRITE:
460 ttm_bo_synccpu_write_release(&user_bo->dma.base);
468 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
470 * @dev_priv: Pointer to a struct device private.
471 * @tfile: Pointer to a struct ttm_object_file on which to register the user
473 * @size: Size of the dma buffer.
474 * @shareable: Boolean whether the buffer is shareable with other open files.
475 * @handle: Pointer to where the handle value should be assigned.
476 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
477 * should be assigned.
479 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
484 struct vmw_dma_buffer **p_dma_buf)
486 struct vmw_user_dma_buffer *user_bo;
487 struct ttm_buffer_object *tmp;
490 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
491 if (unlikely(user_bo == NULL)) {
492 DRM_ERROR("Failed to allocate a buffer.\n");
496 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
497 (dev_priv->has_mob) ?
499 &vmw_vram_sys_placement, true,
500 &vmw_user_dmabuf_destroy);
501 if (unlikely(ret != 0))
504 tmp = ttm_bo_reference(&user_bo->dma.base);
505 ret = ttm_prime_object_init(tfile,
510 &vmw_user_dmabuf_release,
511 &vmw_user_dmabuf_ref_obj_release);
512 if (unlikely(ret != 0)) {
514 goto out_no_base_object;
517 *p_dma_buf = &user_bo->dma;
518 *handle = user_bo->prime.base.hash.key;
525 * vmw_user_dmabuf_verify_access - verify access permissions on this
528 * @bo: Pointer to the buffer object being accessed
529 * @tfile: Identifying the caller.
531 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
532 struct ttm_object_file *tfile)
534 struct vmw_user_dma_buffer *vmw_user_bo;
536 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
539 vmw_user_bo = vmw_user_dma_buffer(bo);
541 /* Check that the caller has opened the object. */
542 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
545 DRM_ERROR("Could not grant buffer access.\n");
550 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
551 * access, idling previous GPU operations on the buffer and optionally
552 * blocking it for further command submissions.
554 * @user_bo: Pointer to the buffer object being grabbed for CPU access
555 * @tfile: Identifying the caller.
556 * @flags: Flags indicating how the grab should be performed.
558 * A blocking grab will be automatically released when @tfile is closed.
560 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
561 struct ttm_object_file *tfile,
564 struct ttm_buffer_object *bo = &user_bo->dma.base;
568 if (flags & drm_vmw_synccpu_allow_cs) {
569 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
573 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
575 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
583 ret = ttm_bo_synccpu_write_grab
584 (bo, !!(flags & drm_vmw_synccpu_dontblock));
585 if (unlikely(ret != 0))
588 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
589 TTM_REF_SYNCCPU_WRITE, &existed);
590 if (ret != 0 || existed)
591 ttm_bo_synccpu_write_release(&user_bo->dma.base);
597 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
598 * and unblock command submission on the buffer if blocked.
600 * @handle: Handle identifying the buffer object.
601 * @tfile: Identifying the caller.
602 * @flags: Flags indicating the type of release.
604 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
605 struct ttm_object_file *tfile,
608 if (!(flags & drm_vmw_synccpu_allow_cs))
609 return ttm_ref_object_base_unref(tfile, handle,
610 TTM_REF_SYNCCPU_WRITE);
616 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
619 * @dev: Identifies the drm device.
620 * @data: Pointer to the ioctl argument.
621 * @file_priv: Identifies the caller.
623 * This function checks the ioctl arguments for validity and calls the
624 * relevant synccpu functions.
626 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
627 struct drm_file *file_priv)
629 struct drm_vmw_synccpu_arg *arg =
630 (struct drm_vmw_synccpu_arg *) data;
631 struct vmw_dma_buffer *dma_buf;
632 struct vmw_user_dma_buffer *user_bo;
633 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
636 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
637 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
638 drm_vmw_synccpu_dontblock |
639 drm_vmw_synccpu_allow_cs)) != 0) {
640 DRM_ERROR("Illegal synccpu flags.\n");
645 case drm_vmw_synccpu_grab:
646 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
647 if (unlikely(ret != 0))
650 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
652 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
653 vmw_dmabuf_unreference(&dma_buf);
654 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
656 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
657 (unsigned int) arg->handle);
661 case drm_vmw_synccpu_release:
662 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
664 if (unlikely(ret != 0)) {
665 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
666 (unsigned int) arg->handle);
671 DRM_ERROR("Invalid synccpu operation.\n");
678 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *file_priv)
681 struct vmw_private *dev_priv = vmw_priv(dev);
682 union drm_vmw_alloc_dmabuf_arg *arg =
683 (union drm_vmw_alloc_dmabuf_arg *)data;
684 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
685 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
686 struct vmw_dma_buffer *dma_buf;
690 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
691 if (unlikely(ret != 0))
694 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
695 req->size, false, &handle, &dma_buf);
696 if (unlikely(ret != 0))
699 rep->handle = handle;
700 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
701 rep->cur_gmr_id = handle;
702 rep->cur_gmr_offset = 0;
704 vmw_dmabuf_unreference(&dma_buf);
707 ttm_read_unlock(&dev_priv->reservation_sem);
712 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
713 struct drm_file *file_priv)
715 struct drm_vmw_unref_dmabuf_arg *arg =
716 (struct drm_vmw_unref_dmabuf_arg *)data;
718 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
723 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
724 uint32_t handle, struct vmw_dma_buffer **out)
726 struct vmw_user_dma_buffer *vmw_user_bo;
727 struct ttm_base_object *base;
729 base = ttm_base_object_lookup(tfile, handle);
730 if (unlikely(base == NULL)) {
731 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
732 (unsigned long)handle);
736 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
737 ttm_base_object_unref(&base);
738 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
739 (unsigned long)handle);
743 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
745 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
746 ttm_base_object_unref(&base);
747 *out = &vmw_user_bo->dma;
752 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
753 struct vmw_dma_buffer *dma_buf,
756 struct vmw_user_dma_buffer *user_bo;
758 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
761 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
763 *handle = user_bo->prime.base.hash.key;
764 return ttm_ref_object_add(tfile, &user_bo->prime.base,
765 TTM_REF_USAGE, NULL);
772 static void vmw_stream_destroy(struct vmw_resource *res)
774 struct vmw_private *dev_priv = res->dev_priv;
775 struct vmw_stream *stream;
778 DRM_INFO("%s: unref\n", __func__);
779 stream = container_of(res, struct vmw_stream, res);
781 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
785 static int vmw_stream_init(struct vmw_private *dev_priv,
786 struct vmw_stream *stream,
787 void (*res_free) (struct vmw_resource *res))
789 struct vmw_resource *res = &stream->res;
792 ret = vmw_resource_init(dev_priv, res, false, res_free,
795 if (unlikely(ret != 0)) {
796 if (res_free == NULL)
799 res_free(&stream->res);
803 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
805 vmw_resource_unreference(&res);
809 DRM_INFO("%s: claimed\n", __func__);
811 vmw_resource_activate(&stream->res, vmw_stream_destroy);
815 static void vmw_user_stream_free(struct vmw_resource *res)
817 struct vmw_user_stream *stream =
818 container_of(res, struct vmw_user_stream, stream.res);
819 struct vmw_private *dev_priv = res->dev_priv;
821 ttm_base_object_kfree(stream, base);
822 ttm_mem_global_free(vmw_mem_glob(dev_priv),
823 vmw_user_stream_size);
827 * This function is called when user space has no more references on the
828 * base object. It releases the base-object's reference on the resource object.
831 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
833 struct ttm_base_object *base = *p_base;
834 struct vmw_user_stream *stream =
835 container_of(base, struct vmw_user_stream, base);
836 struct vmw_resource *res = &stream->stream.res;
839 vmw_resource_unreference(&res);
842 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
845 struct vmw_private *dev_priv = vmw_priv(dev);
846 struct vmw_resource *res;
847 struct vmw_user_stream *stream;
848 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
849 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
850 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
854 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
855 if (unlikely(res == NULL))
858 if (res->res_free != &vmw_user_stream_free) {
863 stream = container_of(res, struct vmw_user_stream, stream.res);
864 if (stream->base.tfile != tfile) {
869 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
871 vmw_resource_unreference(&res);
875 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file_priv)
878 struct vmw_private *dev_priv = vmw_priv(dev);
879 struct vmw_user_stream *stream;
880 struct vmw_resource *res;
881 struct vmw_resource *tmp;
882 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
883 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
887 * Approximate idr memory usage with 128 bytes. It will be limited
888 * by maximum number_of streams anyway?
891 if (unlikely(vmw_user_stream_size == 0))
892 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
894 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
895 if (unlikely(ret != 0))
898 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
899 vmw_user_stream_size,
901 ttm_read_unlock(&dev_priv->reservation_sem);
902 if (unlikely(ret != 0)) {
903 if (ret != -ERESTARTSYS)
904 DRM_ERROR("Out of graphics memory for stream"
910 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
911 if (unlikely(stream == NULL)) {
912 ttm_mem_global_free(vmw_mem_glob(dev_priv),
913 vmw_user_stream_size);
918 res = &stream->stream.res;
919 stream->base.shareable = false;
920 stream->base.tfile = NULL;
923 * From here on, the destructor takes over resource freeing.
926 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
927 if (unlikely(ret != 0))
930 tmp = vmw_resource_reference(res);
931 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
932 &vmw_user_stream_base_release, NULL);
934 if (unlikely(ret != 0)) {
935 vmw_resource_unreference(&tmp);
939 arg->stream_id = res->id;
941 vmw_resource_unreference(&res);
946 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
947 struct ttm_object_file *tfile,
948 uint32_t *inout_id, struct vmw_resource **out)
950 struct vmw_user_stream *stream;
951 struct vmw_resource *res;
954 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
956 if (unlikely(res == NULL))
959 if (res->res_free != &vmw_user_stream_free) {
964 stream = container_of(res, struct vmw_user_stream, stream.res);
965 if (stream->base.tfile != tfile) {
970 *inout_id = stream->stream.stream_id;
974 vmw_resource_unreference(&res);
980 * vmw_dumb_create - Create a dumb kms buffer
982 * @file_priv: Pointer to a struct drm_file identifying the caller.
983 * @dev: Pointer to the drm device.
984 * @args: Pointer to a struct drm_mode_create_dumb structure
986 * This is a driver callback for the core drm create_dumb functionality.
987 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
988 * that the arguments have a different format.
990 int vmw_dumb_create(struct drm_file *file_priv,
991 struct drm_device *dev,
992 struct drm_mode_create_dumb *args)
994 struct vmw_private *dev_priv = vmw_priv(dev);
995 struct vmw_dma_buffer *dma_buf;
998 args->pitch = args->width * ((args->bpp + 7) / 8);
999 args->size = args->pitch * args->height;
1001 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1002 if (unlikely(ret != 0))
1005 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1006 args->size, false, &args->handle,
1008 if (unlikely(ret != 0))
1011 vmw_dmabuf_unreference(&dma_buf);
1013 ttm_read_unlock(&dev_priv->reservation_sem);
1018 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1020 * @file_priv: Pointer to a struct drm_file identifying the caller.
1021 * @dev: Pointer to the drm device.
1022 * @handle: Handle identifying the dumb buffer.
1023 * @offset: The address space offset returned.
1025 * This is a driver callback for the core drm dumb_map_offset functionality.
1027 int vmw_dumb_map_offset(struct drm_file *file_priv,
1028 struct drm_device *dev, uint32_t handle,
1031 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1032 struct vmw_dma_buffer *out_buf;
1035 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1039 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1040 vmw_dmabuf_unreference(&out_buf);
1045 * vmw_dumb_destroy - Destroy a dumb boffer
1047 * @file_priv: Pointer to a struct drm_file identifying the caller.
1048 * @dev: Pointer to the drm device.
1049 * @handle: Handle identifying the dumb buffer.
1051 * This is a driver callback for the core drm dumb_destroy functionality.
1053 int vmw_dumb_destroy(struct drm_file *file_priv,
1054 struct drm_device *dev,
1057 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1058 handle, TTM_REF_USAGE);
1062 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1064 * @res: The resource for which to allocate a backup buffer.
1065 * @interruptible: Whether any sleeps during allocation should be
1066 * performed while interruptible.
1068 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1071 unsigned long size =
1072 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1073 struct vmw_dma_buffer *backup;
1076 if (likely(res->backup)) {
1077 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1081 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1082 if (unlikely(backup == NULL))
1085 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1086 res->func->backup_placement,
1088 &vmw_dmabuf_bo_free);
1089 if (unlikely(ret != 0))
1092 res->backup = backup;
1099 * vmw_resource_do_validate - Make a resource up-to-date and visible
1102 * @res: The resource to make visible to the device.
1103 * @val_buf: Information about a buffer possibly
1104 * containing backup data if a bind operation is needed.
1106 * On hardware resource shortage, this function returns -EBUSY and
1107 * should be retried once resources have been freed up.
1109 static int vmw_resource_do_validate(struct vmw_resource *res,
1110 struct ttm_validate_buffer *val_buf)
1113 const struct vmw_res_func *func = res->func;
1115 if (unlikely(res->id == -1)) {
1116 ret = func->create(res);
1117 if (unlikely(ret != 0))
1122 ((func->needs_backup && list_empty(&res->mob_head) &&
1123 val_buf->bo != NULL) ||
1124 (!func->needs_backup && val_buf->bo != NULL))) {
1125 ret = func->bind(res, val_buf);
1126 if (unlikely(ret != 0))
1127 goto out_bind_failed;
1128 if (func->needs_backup)
1129 list_add_tail(&res->mob_head, &res->backup->res_list);
1133 * Only do this on write operations, and move to
1134 * vmw_resource_unreserve if it can be called after
1135 * backup buffers have been unreserved. Otherwise
1138 res->res_dirty = true;
1149 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1150 * command submission.
1152 * @res: Pointer to the struct vmw_resource to unreserve.
1153 * @switch_backup: Backup buffer has been switched.
1154 * @new_backup: Pointer to new backup buffer if command submission
1155 * switched. May be NULL.
1156 * @new_backup_offset: New backup offset if @switch_backup is true.
1158 * Currently unreserving a resource means putting it back on the device's
1159 * resource lru list, so that it can be evicted if necessary.
1161 void vmw_resource_unreserve(struct vmw_resource *res,
1163 struct vmw_dma_buffer *new_backup,
1164 unsigned long new_backup_offset)
1166 struct vmw_private *dev_priv = res->dev_priv;
1168 if (!list_empty(&res->lru_head))
1171 if (switch_backup && new_backup != res->backup) {
1173 lockdep_assert_held(&res->backup->base.resv->lock.base);
1174 list_del_init(&res->mob_head);
1175 vmw_dmabuf_unreference(&res->backup);
1179 res->backup = vmw_dmabuf_reference(new_backup);
1180 lockdep_assert_held(&new_backup->base.resv->lock.base);
1181 list_add_tail(&res->mob_head, &new_backup->res_list);
1187 res->backup_offset = new_backup_offset;
1189 if (!res->func->may_evict || res->id == -1 || res->pin_count)
1192 write_lock(&dev_priv->resource_lock);
1193 list_add_tail(&res->lru_head,
1194 &res->dev_priv->res_lru[res->func->res_type]);
1195 write_unlock(&dev_priv->resource_lock);
1199 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1200 * for a resource and in that case, allocate
1201 * one, reserve and validate it.
1203 * @res: The resource for which to allocate a backup buffer.
1204 * @interruptible: Whether any sleeps during allocation should be
1205 * performed while interruptible.
1206 * @val_buf: On successful return contains data about the
1207 * reserved and validated backup buffer.
1210 vmw_resource_check_buffer(struct vmw_resource *res,
1212 struct ttm_validate_buffer *val_buf)
1214 struct list_head val_list;
1215 bool backup_dirty = false;
1218 if (unlikely(res->backup == NULL)) {
1219 ret = vmw_resource_buf_alloc(res, interruptible);
1220 if (unlikely(ret != 0))
1224 INIT_LIST_HEAD(&val_list);
1225 val_buf->bo = ttm_bo_reference(&res->backup->base);
1226 val_buf->shared = false;
1227 list_add_tail(&val_buf->head, &val_list);
1228 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1229 if (unlikely(ret != 0))
1230 goto out_no_reserve;
1232 if (res->func->needs_backup && list_empty(&res->mob_head))
1235 backup_dirty = res->backup_dirty;
1236 ret = ttm_bo_validate(&res->backup->base,
1237 res->func->backup_placement,
1240 if (unlikely(ret != 0))
1241 goto out_no_validate;
1246 ttm_eu_backoff_reservation(NULL, &val_list);
1248 ttm_bo_unref(&val_buf->bo);
1250 vmw_dmabuf_unreference(&res->backup);
1256 * vmw_resource_reserve - Reserve a resource for command submission
1258 * @res: The resource to reserve.
1260 * This function takes the resource off the LRU list and make sure
1261 * a backup buffer is present for guest-backed resources. However,
1262 * the buffer may not be bound to the resource at this point.
1265 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1268 struct vmw_private *dev_priv = res->dev_priv;
1271 write_lock(&dev_priv->resource_lock);
1272 list_del_init(&res->lru_head);
1273 write_unlock(&dev_priv->resource_lock);
1275 if (res->func->needs_backup && res->backup == NULL &&
1277 ret = vmw_resource_buf_alloc(res, interruptible);
1278 if (unlikely(ret != 0)) {
1279 DRM_ERROR("Failed to allocate a backup buffer "
1280 "of size %lu. bytes\n",
1281 (unsigned long) res->backup_size);
1290 * vmw_resource_backoff_reservation - Unreserve and unreference a
1293 * @val_buf: Backup buffer information.
1296 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1298 struct list_head val_list;
1300 if (likely(val_buf->bo == NULL))
1303 INIT_LIST_HEAD(&val_list);
1304 list_add_tail(&val_buf->head, &val_list);
1305 ttm_eu_backoff_reservation(NULL, &val_list);
1306 ttm_bo_unref(&val_buf->bo);
1310 * vmw_resource_do_evict - Evict a resource, and transfer its data
1311 * to a backup buffer.
1313 * @res: The resource to evict.
1314 * @interruptible: Whether to wait interruptible.
1316 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1318 struct ttm_validate_buffer val_buf;
1319 const struct vmw_res_func *func = res->func;
1322 BUG_ON(!func->may_evict);
1325 val_buf.shared = false;
1326 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1327 if (unlikely(ret != 0))
1330 if (unlikely(func->unbind != NULL &&
1331 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1332 ret = func->unbind(res, res->res_dirty, &val_buf);
1333 if (unlikely(ret != 0))
1335 list_del_init(&res->mob_head);
1337 ret = func->destroy(res);
1338 res->backup_dirty = true;
1339 res->res_dirty = false;
1341 vmw_resource_backoff_reservation(&val_buf);
1348 * vmw_resource_validate - Make a resource up-to-date and visible
1351 * @res: The resource to make visible to the device.
1353 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1354 * be reserved and validated.
1355 * On hardware resource shortage, this function will repeatedly evict
1356 * resources of the same type until the validation succeeds.
1358 int vmw_resource_validate(struct vmw_resource *res)
1361 struct vmw_resource *evict_res;
1362 struct vmw_private *dev_priv = res->dev_priv;
1363 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1364 struct ttm_validate_buffer val_buf;
1365 unsigned err_count = 0;
1367 if (!res->func->create)
1371 val_buf.shared = false;
1373 val_buf.bo = &res->backup->base;
1375 ret = vmw_resource_do_validate(res, &val_buf);
1376 if (likely(ret != -EBUSY))
1379 write_lock(&dev_priv->resource_lock);
1380 if (list_empty(lru_list) || !res->func->may_evict) {
1381 DRM_ERROR("Out of device device resources "
1382 "for %s.\n", res->func->type_name);
1384 write_unlock(&dev_priv->resource_lock);
1388 evict_res = vmw_resource_reference
1389 (list_first_entry(lru_list, struct vmw_resource,
1391 list_del_init(&evict_res->lru_head);
1393 write_unlock(&dev_priv->resource_lock);
1395 ret = vmw_resource_do_evict(evict_res, true);
1396 if (unlikely(ret != 0)) {
1397 write_lock(&dev_priv->resource_lock);
1398 list_add_tail(&evict_res->lru_head, lru_list);
1399 write_unlock(&dev_priv->resource_lock);
1400 if (ret == -ERESTARTSYS ||
1401 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1402 vmw_resource_unreference(&evict_res);
1403 goto out_no_validate;
1407 vmw_resource_unreference(&evict_res);
1410 if (unlikely(ret != 0))
1411 goto out_no_validate;
1412 else if (!res->func->needs_backup && res->backup) {
1413 list_del_init(&res->mob_head);
1414 vmw_dmabuf_unreference(&res->backup);
1424 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1425 * object without unreserving it.
1427 * @bo: Pointer to the struct ttm_buffer_object to fence.
1428 * @fence: Pointer to the fence. If NULL, this function will
1429 * insert a fence into the command stream..
1431 * Contrary to the ttm_eu version of this function, it takes only
1432 * a single buffer object instead of a list, and it also doesn't
1433 * unreserve the buffer object, which needs to be done separately.
1435 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1436 struct vmw_fence_obj *fence)
1438 struct ttm_bo_device *bdev = bo->bdev;
1440 struct vmw_private *dev_priv =
1441 container_of(bdev, struct vmw_private, bdev);
1443 if (fence == NULL) {
1444 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1445 reservation_object_add_excl_fence(bo->resv, &fence->base);
1446 fence_put(&fence->base);
1448 reservation_object_add_excl_fence(bo->resv, &fence->base);
1452 * vmw_resource_move_notify - TTM move_notify_callback
1454 * @bo: The TTM buffer object about to move.
1455 * @mem: The struct ttm_mem_reg indicating to what memory
1456 * region the move is taking place.
1458 * Evicts the Guest Backed hardware resource if the backup
1459 * buffer is being moved out of MOB memory.
1460 * Note that this function should not race with the resource
1461 * validation code as long as it accesses only members of struct
1462 * resource that remain static while bo::res is !NULL and
1463 * while we have @bo reserved. struct resource::backup is *not* a
1464 * static member. The resource validation code will take care
1465 * to set @bo::res to NULL, while having @bo reserved when the
1466 * buffer is no longer bound to the resource, so @bo:res can be
1467 * used to determine whether there is a need to unbind and whether
1468 * it is safe to unbind.
1470 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1471 struct ttm_mem_reg *mem)
1473 struct vmw_dma_buffer *dma_buf;
1478 if (bo->destroy != vmw_dmabuf_bo_free &&
1479 bo->destroy != vmw_user_dmabuf_destroy)
1482 dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1484 if (mem->mem_type != VMW_PL_MOB) {
1485 struct vmw_resource *res, *n;
1486 struct ttm_validate_buffer val_buf;
1489 val_buf.shared = false;
1491 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1493 if (unlikely(res->func->unbind == NULL))
1496 (void) res->func->unbind(res, true, &val_buf);
1497 res->backup_dirty = true;
1498 res->res_dirty = false;
1499 list_del_init(&res->mob_head);
1502 (void) ttm_bo_wait(bo, false, false, false);
1509 * vmw_query_readback_all - Read back cached query states
1511 * @dx_query_mob: Buffer containing the DX query MOB
1513 * Read back cached states from the device if they exist. This function
1514 * assumings binding_mutex is held.
1516 int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1518 struct vmw_resource *dx_query_ctx;
1519 struct vmw_private *dev_priv;
1521 SVGA3dCmdHeader header;
1522 SVGA3dCmdDXReadbackAllQuery body;
1526 /* No query bound, so do nothing */
1527 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1530 dx_query_ctx = dx_query_mob->dx_query_ctx;
1531 dev_priv = dx_query_ctx->dev_priv;
1533 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1534 if (unlikely(cmd == NULL)) {
1535 DRM_ERROR("Failed reserving FIFO space for "
1536 "query MOB read back.\n");
1540 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1541 cmd->header.size = sizeof(cmd->body);
1542 cmd->body.cid = dx_query_ctx->id;
1544 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1546 /* Triggers a rebind the next time affected context is bound */
1547 dx_query_mob->dx_query_ctx = NULL;
1555 * vmw_query_move_notify - Read back cached query states
1557 * @bo: The TTM buffer object about to move.
1558 * @mem: The memory region @bo is moving to.
1560 * Called before the query MOB is swapped out to read back cached query
1561 * states from the device.
1563 void vmw_query_move_notify(struct ttm_buffer_object *bo,
1564 struct ttm_mem_reg *mem)
1566 struct vmw_dma_buffer *dx_query_mob;
1567 struct ttm_bo_device *bdev = bo->bdev;
1568 struct vmw_private *dev_priv;
1571 dev_priv = container_of(bdev, struct vmw_private, bdev);
1573 mutex_lock(&dev_priv->binding_mutex);
1575 dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1576 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1577 mutex_unlock(&dev_priv->binding_mutex);
1581 /* If BO is being moved from MOB to system memory */
1582 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1583 struct vmw_fence_obj *fence;
1585 (void) vmw_query_readback_all(dx_query_mob);
1586 mutex_unlock(&dev_priv->binding_mutex);
1588 /* Create a fence and attach the BO to it */
1589 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1590 vmw_fence_single_bo(bo, fence);
1593 vmw_fence_obj_unreference(&fence);
1595 (void) ttm_bo_wait(bo, false, false, false);
1597 mutex_unlock(&dev_priv->binding_mutex);
1602 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1604 * @res: The resource being queried.
1606 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1608 return res->func->needs_backup;
1612 * vmw_resource_evict_type - Evict all resources of a specific type
1614 * @dev_priv: Pointer to a device private struct
1615 * @type: The resource type to evict
1617 * To avoid thrashing starvation or as part of the hibernation sequence,
1618 * try to evict all evictable resources of a specific type.
1620 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1621 enum vmw_res_type type)
1623 struct list_head *lru_list = &dev_priv->res_lru[type];
1624 struct vmw_resource *evict_res;
1625 unsigned err_count = 0;
1629 write_lock(&dev_priv->resource_lock);
1631 if (list_empty(lru_list))
1634 evict_res = vmw_resource_reference(
1635 list_first_entry(lru_list, struct vmw_resource,
1637 list_del_init(&evict_res->lru_head);
1638 write_unlock(&dev_priv->resource_lock);
1640 ret = vmw_resource_do_evict(evict_res, false);
1641 if (unlikely(ret != 0)) {
1642 write_lock(&dev_priv->resource_lock);
1643 list_add_tail(&evict_res->lru_head, lru_list);
1644 write_unlock(&dev_priv->resource_lock);
1645 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1646 vmw_resource_unreference(&evict_res);
1651 vmw_resource_unreference(&evict_res);
1655 write_unlock(&dev_priv->resource_lock);
1659 * vmw_resource_evict_all - Evict all evictable resources
1661 * @dev_priv: Pointer to a device private struct
1663 * To avoid thrashing starvation or as part of the hibernation sequence,
1664 * evict all evictable resources. In particular this means that all
1665 * guest-backed resources that are registered with the device are
1666 * evicted and the OTable becomes clean.
1668 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1670 enum vmw_res_type type;
1672 mutex_lock(&dev_priv->cmdbuf_mutex);
1674 for (type = 0; type < vmw_res_max; ++type)
1675 vmw_resource_evict_type(dev_priv, type);
1677 mutex_unlock(&dev_priv->cmdbuf_mutex);
1681 * vmw_resource_pin - Add a pin reference on a resource
1683 * @res: The resource to add a pin reference on
1685 * This function adds a pin reference, and if needed validates the resource.
1686 * Having a pin reference means that the resource can never be evicted, and
1687 * its id will never change as long as there is a pin reference.
1688 * This function returns 0 on success and a negative error code on failure.
1690 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1692 struct vmw_private *dev_priv = res->dev_priv;
1695 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1696 mutex_lock(&dev_priv->cmdbuf_mutex);
1697 ret = vmw_resource_reserve(res, interruptible, false);
1699 goto out_no_reserve;
1701 if (res->pin_count == 0) {
1702 struct vmw_dma_buffer *vbo = NULL;
1707 ttm_bo_reserve(&vbo->base, interruptible, false, false,
1709 if (!vbo->pin_count) {
1710 ret = ttm_bo_validate
1712 res->func->backup_placement,
1713 interruptible, false);
1715 ttm_bo_unreserve(&vbo->base);
1716 goto out_no_validate;
1720 /* Do we really need to pin the MOB as well? */
1721 vmw_bo_pin_reserved(vbo, true);
1723 ret = vmw_resource_validate(res);
1725 ttm_bo_unreserve(&vbo->base);
1727 goto out_no_validate;
1732 vmw_resource_unreserve(res, false, NULL, 0UL);
1734 mutex_unlock(&dev_priv->cmdbuf_mutex);
1735 ttm_write_unlock(&dev_priv->reservation_sem);
1741 * vmw_resource_unpin - Remove a pin reference from a resource
1743 * @res: The resource to remove a pin reference from
1745 * Having a pin reference means that the resource can never be evicted, and
1746 * its id will never change as long as there is a pin reference.
1748 void vmw_resource_unpin(struct vmw_resource *res)
1750 struct vmw_private *dev_priv = res->dev_priv;
1753 ttm_read_lock(&dev_priv->reservation_sem, false);
1754 mutex_lock(&dev_priv->cmdbuf_mutex);
1756 ret = vmw_resource_reserve(res, false, true);
1759 WARN_ON(res->pin_count == 0);
1760 if (--res->pin_count == 0 && res->backup) {
1761 struct vmw_dma_buffer *vbo = res->backup;
1763 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1764 vmw_bo_pin_reserved(vbo, false);
1765 ttm_bo_unreserve(&vbo->base);
1768 vmw_resource_unreserve(res, false, NULL, 0UL);
1770 mutex_unlock(&dev_priv->cmdbuf_mutex);
1771 ttm_read_unlock(&dev_priv->reservation_sem);
1775 * vmw_res_type - Return the resource type
1777 * @res: Pointer to the resource
1779 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1781 return res->func->res_type;