]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/i915: Mark all incomplete requests as -EIO when wedged
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 10 Jan 2017 17:22:45 +0000 (17:22 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 10 Jan 2017 20:49:31 +0000 (20:49 +0000)
Similarly to a normal reset, after we mark the GPU as wedged (completely
fubar and no more requests can be executed), set the error status on all
the in flight requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170110172246.27297-4-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c

index 94ad9eb83a5cb4496af8beea0f8136adf97fe899..7f73a35c772539fafb18ec11749ec84cda817131 100644 (file)
@@ -2730,12 +2730,16 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
+       dma_fence_set_error(&request->fence, -EIO);
        i915_gem_request_submit(request);
        intel_engine_init_global_seqno(request->engine, request->global_seqno);
 }
 
 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
 {
+       struct drm_i915_gem_request *request;
+       unsigned long flags;
+
        /* We need to be sure that no thread is running the old callback as
         * we install the nop handler (otherwise we would submit a request
         * to hardware that will never complete). In order to prevent this
@@ -2744,6 +2748,12 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
         */
        engine->submit_request = nop_submit_request;
 
+       /* Mark all executing requests as skipped */
+       spin_lock_irqsave(&engine->timeline->lock, flags);
+       list_for_each_entry(request, &engine->timeline->requests, link)
+               dma_fence_set_error(&request->fence, -EIO);
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
        /* Mark all pending requests as complete so that any concurrent
         * (lockless) lookup doesn't try and wait upon the request as we
         * reset it.