]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem_userptr.c
drm/i915: Rearrange i915_wait_request() accounting with callers
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_userptr.c
index 2314c88323e39861152485ccfd1b8643ce8bc41f..c49dd95413bd42373863495aec6a06704e432f48 100644 (file)
@@ -61,65 +61,25 @@ struct i915_mmu_object {
        bool attached;
 };
 
-static void wait_rendering(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-       int i, n;
-
-       if (!obj->active)
-               return;
-
-       n = 0;
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               struct drm_i915_gem_request *req;
-
-               req = obj->last_read_req[i];
-               if (req == NULL)
-                       continue;
-
-               requests[n++] = i915_gem_request_reference(req);
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-
-       for (i = 0; i < n; i++)
-               __i915_wait_request(requests[i], false, NULL, NULL);
-
-       mutex_lock(&dev->struct_mutex);
-
-       for (i = 0; i < n; i++)
-               i915_gem_request_unreference(requests[i]);
-}
-
 static void cancel_userptr(struct work_struct *work)
 {
        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
        struct drm_i915_gem_object *obj = mo->obj;
        struct drm_device *dev = obj->base.dev;
 
+       i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
+
        mutex_lock(&dev->struct_mutex);
        /* Cancel any active worker and force us to re-evaluate gup */
        obj->userptr.work = NULL;
 
        if (obj->pages != NULL) {
-               struct drm_i915_private *dev_priv = to_i915(dev);
-               struct i915_vma *vma, *tmp;
-               bool was_interruptible;
-
-               wait_rendering(obj);
-
-               was_interruptible = dev_priv->mm.interruptible;
-               dev_priv->mm.interruptible = false;
-
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-                       WARN_ON(i915_vma_unbind(vma));
+               /* We are inside a kthread context and can't be interrupted */
+               WARN_ON(i915_gem_object_unbind(obj));
                WARN_ON(i915_gem_object_put_pages(obj));
-
-               dev_priv->mm.interruptible = was_interruptible;
        }
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 }
 
@@ -538,6 +498,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
+               unsigned int flags = 0;
+
+               if (!obj->userptr.read_only)
+                       flags |= FOLL_WRITE;
 
                ret = -EFAULT;
                if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -547,7 +511,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                                        (work->task, mm,
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
-                                        !obj->userptr.read_only, 0,
+                                        flags,
                                         pvec + pinned, NULL);
                                if (ret < 0)
                                        break;
@@ -572,12 +536,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                        }
                }
                obj->userptr.work = ERR_PTR(ret);
-               if (ret)
-                       __i915_gem_userptr_set_active(obj, false);
        }
 
        obj->userptr.workers--;
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 
        release_pages(pvec, pinned, 0);
@@ -622,8 +584,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
        obj->userptr.work = &work->work;
        obj->userptr.workers++;
 
-       work->obj = obj;
-       drm_gem_object_reference(&obj->base);
+       work->obj = i915_gem_object_get(obj);
 
        work->task = current;
        get_task_struct(work->task);
@@ -659,15 +620,14 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
         * to the vma (discard or cloning) which should prevent the more
         * egregious cases from causing harm.
         */
-       if (IS_ERR(obj->userptr.work)) {
-               /* active flag will have been dropped already by the worker */
-               ret = PTR_ERR(obj->userptr.work);
-               obj->userptr.work = NULL;
-               return ret;
-       }
-       if (obj->userptr.work)
+
+       if (obj->userptr.work) {
                /* active flag should still be held for the pending work */
-               return -EAGAIN;
+               if (IS_ERR(obj->userptr.work))
+                       return PTR_ERR(obj->userptr.work);
+               else
+                       return -EAGAIN;
+       }
 
        /* Let the mmu-notifier know that we have begun and need cancellation */
        ret = __i915_gem_userptr_set_active(obj, true);
@@ -846,7 +806,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                ret = drm_gem_handle_create(file, &obj->base, &handle);
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        if (ret)
                return ret;