]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/i915: Update flush_all_caches() to take request structures
authorJohn Harrison <John.C.Harrison@Intel.com>
Fri, 29 May 2015 16:43:55 +0000 (17:43 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 23 Jun 2015 12:02:20 +0000 (14:02 +0200)
Updated the *_ring_flush_all_caches() functions to take requests instead of
rings or ringbuf/context pairs.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 79c4c328984db50f03d651f2e48098a7502b4c90..25fe1ef32eaacdc2ecda562b6b1fd3f05f760efe 100644 (file)
@@ -2507,9 +2507,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         */
        if (flush_caches) {
                if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
+                       ret = logical_ring_flush_all_caches(request);
                else
-                       ret = intel_ring_flush_all_caches(ring);
+                       ret = intel_ring_flush_all_caches(request);
                /* Not allowed to fail! */
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
index 11f0c25c102087819e991401d1a7d94ac8d6226a..7a18e83623b29834b4ee2859a0fd8f2ade52d420 100644 (file)
@@ -997,16 +997,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
        I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-                                 struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (!ring->gpu_caches_dirty)
                return 0;
 
-       ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+       ret = ring->emit_flush(req->ringbuf, req->ctx, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -1071,7 +1070,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
                return 0;
 
        ring->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(ringbuf, req->ctx);
+       ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -1089,7 +1088,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
        intel_logical_ring_advance(ringbuf);
 
        ring->gpu_caches_dirty = true;
-       ret = logical_ring_flush_all_caches(ringbuf, req->ctx);
+       ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
index bf137c43e0a903799ab12a670c6b708539b52286..044c0e5c72e5f16529c97bdfbf2ff92d50699466 100644 (file)
@@ -41,8 +41,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-                                 struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.
index 49869feb9e23f5714e99e784201e2790d8f13d7b..48ca73e7aaa6b4a9832f21ec7289d8a13b56ca6c 100644 (file)
@@ -715,7 +715,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
                return 0;
 
        ring->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(ring);
+       ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -733,7 +733,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        intel_ring_advance(ring);
 
        ring->gpu_caches_dirty = true;
-       ret = intel_ring_flush_all_caches(ring);
+       ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
 
@@ -2892,8 +2892,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (!ring->gpu_caches_dirty)
index 2eba35847946d3c7e9038f256a2f355895f955c7..3f70687a2dc68a2b22eab5bcbf04e1fd29f1df78 100644 (file)
@@ -445,7 +445,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);