From ff8658850aa9bdb5bc308ff8cce60c2558c58566 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Mon, 24 Nov 2014 18:49:28 +0000 Subject: [PATCH] drm/i915: Ensure requests stick around during waits Added reference counting of the request structure around __wait_seqno() calls. This is a precursor to updating the wait code itself to take the request rather than a seqno. At that point, it would be a Bad Idea for a request object to be retired and freed while the wait code is still using it. v3: Note that even though the mutex lock is held during a call to i915_wait_seqno(), it is still necessary to explicitly bump the reference count. It appears that the shrinker can asynchronously retire items even though the mutex is locked. For: VIZ-4377 Signed-off-by: John Harrison Reviewed-by: Thomas Daniel [danvet: Remove wrongly squashed hunk which breaks the build.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bf0135815a96..8ec07853b35c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1417,10 +1417,12 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, return ret; reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); mutex_lock(&dev->struct_mutex); + i915_gem_request_unreference(req); if (ret) return ret; @@ -2920,6 +2922,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; + struct drm_i915_gem_request *req; struct intel_engine_cs *ring = NULL; unsigned reset_counter; u32 seqno = 0; @@ -2946,7 +2949,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (!obj->active || !obj->last_read_req) goto out; - seqno = i915_gem_request_get_seqno(obj->last_read_req); + req = obj->last_read_req; + seqno = i915_gem_request_get_seqno(req); WARN_ON(seqno == 0); ring = obj->ring; @@ -2960,10 +2964,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) drm_gem_object_unreference(&obj->base); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); - return __i915_wait_seqno(ring, seqno, reset_counter, true, - &args->timeout_ns, file->driver_priv); + ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, + file->driver_priv); + mutex_lock(&dev->struct_mutex); + i915_gem_request_unreference(req); + mutex_unlock(&dev->struct_mutex); + return ret; out: drm_gem_object_unreference(&obj->base); @@ -4118,6 +4127,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) target = request; } reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + if (target) + i915_gem_request_reference(target); spin_unlock(&file_priv->mm.lock); if (target == NULL) @@ -4129,6 +4140,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); + mutex_lock(&dev->struct_mutex); + i915_gem_request_unreference(target); + mutex_unlock(&dev->struct_mutex); + return ret; } -- 2.39.5