]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/i915: Repeat flush of idle work during suspend
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 23 Dec 2016 14:57:56 +0000 (14:57 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 23 Dec 2016 16:07:11 +0000 (16:07 +0000)
The idle work handler is self-arming - if it detects that it needs to
run again it will queue itself from its work handler. Take greater care
when trying to drain the idle work, and double check that it is flushed.

The free worker has a similar issue where it is armed by an RCU task
which may be running concurrently with us.

This should hopefully help with the sporadic WARN_ON(dev_priv->gt.awake)
from i915_gem_suspend.

v2: Reuse drain_freed_objects.
v3: Don't try to flush the freed objects from the shrinker, as it may be
underneath the struct_mutex already.
v4: do while and comment upon the excess rcu_barrier in drain_freed_objects

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161223145804.6605-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index efb2669f0ecd9e87c02023500aed2f883183826b..7c7d6626dee812734b51f6bcfdcd6964539e550e 100644 (file)
@@ -4123,7 +4123,7 @@ unlock:
 
        if (val & DROP_FREED) {
                synchronize_rcu();
-               flush_work(&dev_priv->mm.free_work);
+               i915_gem_drain_freed_objects(dev_priv);
        }
 
        return ret;
index 6428588518aa85506e0181ca53d2305f7f94c1ab..2c020eafada62be1b46d50af386b0560ab06e1d5 100644 (file)
@@ -545,8 +545,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
        i915_gem_context_fini(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       rcu_barrier();
-       flush_work(&dev_priv->mm.free_work);
+       i915_gem_drain_freed_objects(dev_priv);
 
        WARN_ON(!list_empty(&dev_priv->context_list));
 }
index bd3915ac01334d7b6e94b8374ce7f1604b5a3da7..32e2d743102522665ac2f16db1326c275f65246b 100644 (file)
@@ -3208,6 +3208,19 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
+static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+       /* A single pass should suffice to release all the freed objects (along
+        * most call paths) , but be a little more paranoid in that freeing
+        * the objects does take a little amount of time, during which the rcu
+        * callbacks could have added new objects into the freed list, and
+        * armed the work again.
+        */
+       do {
+               rcu_barrier();
+       } while (flush_work(&i915->mm.free_work));
+}
+
 struct i915_vma * __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         const struct i915_ggtt_view *view,
index 84ab7099ff7425ea46bfc1fb0d033afbbe6d67df..0f5f29c7ce40eea83562e171e4a596f352fa9020 100644 (file)
@@ -4264,8 +4264,14 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);
-       flush_delayed_work(&dev_priv->gt.idle_work);
-       flush_work(&dev_priv->mm.free_work);
+
+       /* As the idle_work is rearming if it detects a race, play safe and
+        * repeat the flush until it is definitely idle.
+        */
+       while (flush_delayed_work(&dev_priv->gt.idle_work))
+               ;
+
+       i915_gem_drain_freed_objects(dev_priv);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.