2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "intel_frontbuffer.h"
27 #include "i915_gem_clflush.h"
29 static DEFINE_SPINLOCK(clflush_lock);
30 static u64 clflush_context;
33 struct dma_fence dma; /* Must be first for dma_fence_free() */
34 struct i915_sw_fence wait;
35 struct work_struct work;
36 struct drm_i915_gem_object *obj;
39 static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
44 static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
49 static bool i915_clflush_enable_signaling(struct dma_fence *fence)
54 static void i915_clflush_release(struct dma_fence *fence)
56 struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
58 i915_sw_fence_fini(&clflush->wait);
60 BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
61 dma_fence_free(&clflush->dma);
64 static const struct dma_fence_ops i915_clflush_ops = {
65 .get_driver_name = i915_clflush_get_driver_name,
66 .get_timeline_name = i915_clflush_get_timeline_name,
67 .enable_signaling = i915_clflush_enable_signaling,
68 .wait = dma_fence_default_wait,
69 .release = i915_clflush_release,
72 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
74 drm_clflush_sg(obj->mm.pages);
75 obj->cache_dirty = false;
77 intel_fb_obj_flush(obj, ORIGIN_CPU);
80 static void i915_clflush_work(struct work_struct *work)
82 struct clflush *clflush = container_of(work, typeof(*clflush), work);
83 struct drm_i915_gem_object *obj = clflush->obj;
85 if (!obj->cache_dirty)
88 if (i915_gem_object_pin_pages(obj)) {
89 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
93 __i915_do_clflush(obj);
95 i915_gem_object_unpin_pages(obj);
98 i915_gem_object_put(obj);
100 dma_fence_signal(&clflush->dma);
101 dma_fence_put(&clflush->dma);
104 static int __i915_sw_fence_call
105 i915_clflush_notify(struct i915_sw_fence *fence,
106 enum i915_sw_fence_notify state)
108 struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
112 schedule_work(&clflush->work);
116 dma_fence_put(&clflush->dma);
123 void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
126 struct clflush *clflush;
129 * Stolen memory is always coherent with the GPU as it is explicitly
130 * marked as wc by the system, or the system is cache-coherent.
131 * Similarly, we only access struct pages through the CPU cache, so
132 * anything not backed by physical memory we consider to be always
133 * coherent and not need clflushing.
135 if (!i915_gem_object_has_struct_page(obj))
138 obj->cache_dirty = true;
140 /* If the GPU is snooping the contents of the CPU cache,
141 * we do not need to manually clear the CPU cache lines. However,
142 * the caches are only snooped when the render cache is
143 * flushed/invalidated. As we always have to emit invalidations
144 * and flushes when moving into and out of the RENDER domain, correct
145 * snooping behaviour occurs naturally as the result of our domain
148 if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
151 trace_i915_gem_object_clflush(obj);
154 if (!(flags & I915_CLFLUSH_SYNC))
155 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
157 dma_fence_init(&clflush->dma,
162 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
164 clflush->obj = i915_gem_object_get(obj);
165 INIT_WORK(&clflush->work, i915_clflush_work);
167 dma_fence_get(&clflush->dma);
169 i915_sw_fence_await_reservation(&clflush->wait,
171 true, I915_FENCE_TIMEOUT,
174 reservation_object_lock(obj->resv, NULL);
175 reservation_object_add_excl_fence(obj->resv, &clflush->dma);
176 reservation_object_unlock(obj->resv);
178 i915_sw_fence_commit(&clflush->wait);
179 } else if (obj->mm.pages) {
180 __i915_do_clflush(obj);
182 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
186 void i915_gem_clflush_init(struct drm_i915_private *i915)
188 clflush_context = dma_fence_context_alloc(1);