]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem_clflush.c
drm/i915: Force CPU synchronisation even if userspace requests ASYNC
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_clflush.c
index ffac7a1f0caf34d71588c62d88fe83f964087be6..348b29a845c961c73b1605b23212692b438f0f19 100644 (file)
@@ -71,8 +71,6 @@ static const struct dma_fence_ops i915_clflush_ops = {
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
        drm_clflush_sg(obj->mm.pages);
-       obj->cache_dirty = false;
-
        intel_fb_obj_flush(obj, ORIGIN_CPU);
 }
 
@@ -81,9 +79,6 @@ static void i915_clflush_work(struct work_struct *work)
        struct clflush *clflush = container_of(work, typeof(*clflush), work);
        struct drm_i915_gem_object *obj = clflush->obj;
 
-       if (!obj->cache_dirty)
-               goto out;
-
        if (i915_gem_object_pin_pages(obj)) {
                DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
                goto out;
@@ -119,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
        return NOTIFY_DONE;
 }
 
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
                             unsigned int flags)
 {
        struct clflush *clflush;
@@ -131,10 +126,10 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
         * anything not backed by physical memory we consider to be always
         * coherent and not need clflushing.
         */
-       if (!i915_gem_object_has_struct_page(obj))
-               return;
-
-       obj->cache_dirty = true;
+       if (!i915_gem_object_has_struct_page(obj)) {
+               obj->cache_dirty = false;
+               return false;
+       }
 
        /* If the GPU is snooping the contents of the CPU cache,
         * we do not need to manually clear the CPU cache lines.  However,
@@ -144,8 +139,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
         * snooping behaviour occurs naturally as the result of our domain
         * tracking.
         */
-       if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
-               return;
+       if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
+               return false;
 
        trace_i915_gem_object_clflush(obj);
 
@@ -153,6 +148,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
        if (!(flags & I915_CLFLUSH_SYNC))
                clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
        if (clflush) {
+               GEM_BUG_ON(!obj->cache_dirty);
+
                dma_fence_init(&clflush->dma,
                               &i915_clflush_ops,
                               &clflush_lock,
@@ -180,4 +177,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
        } else {
                GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
        }
+
+       obj->cache_dirty = false;
+       return true;
 }