]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/i915/bdw: Generic logical ring init and cleanup
authorOscar Mateo <oscar.mateo@intel.com>
Thu, 24 Jul 2014 16:04:23 +0000 (17:04 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 11 Aug 2014 14:55:17 +0000 (16:55 +0200)
Allocate and populate the default LRC for every ring, call
gen-specific init/cleanup, init/fini the command parser and
set the status page (now inside the LRC object). These are
things all engines/rings have in common.

Stopping the ring before cleanup and initializing the seqnos
is left as a TODO task (we need more infrastructure in place
before we can achieve this).

v2: Check the ringbuffer backing obj for ring_is_initialized,
instead of the context backing obj (similar, but not exactly
the same).

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index bcb41002aa1364d8ea2c29c3f93d990db90b9309..7a08f3e9e1ae47b3211aef4554ed42e3db8157a9 100644 (file)
@@ -445,10 +445,6 @@ int i915_gem_context_init(struct drm_device *dev)
 
                /* NB: RCS will hold a ref for all rings */
                ring->default_context = ctx;
-
-               /* FIXME: we really only want to do this for initialized rings */
-               if (i915.enable_execlists)
-                       intel_lr_context_deferred_create(ctx, ring);
        }
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
index 9c2ff8f11c9041b1f6ab6e03af019bfebc04a326..ed7a4ff3bbd2c2970751504958da96e82c12751c 100644 (file)
@@ -110,12 +110,60 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
 
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 {
-       /* TODO */
+       if (!intel_ring_initialized(ring))
+               return;
+
+       /* TODO: make sure the ring is stopped */
+       ring->preallocated_lazy_request = NULL;
+       ring->outstanding_lazy_seqno = 0;
+
+       if (ring->cleanup)
+               ring->cleanup(ring);
+
+       i915_cmd_parser_fini_ring(ring);
+
+       if (ring->status_page.obj) {
+               kunmap(sg_page(ring->status_page.obj->pages->sgl));
+               ring->status_page.obj = NULL;
+       }
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
 {
-       /* TODO */
+       int ret;
+       struct intel_context *dctx = ring->default_context;
+       struct drm_i915_gem_object *dctx_obj;
+
+       /* Intentionally left blank. */
+       ring->buffer = NULL;
+
+       ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       init_waitqueue_head(&ring->irq_queue);
+
+       ret = intel_lr_context_deferred_create(dctx, ring);
+       if (ret)
+               return ret;
+
+       /* The status page is offset 0 from the context object in LRCs. */
+       dctx_obj = dctx->engine[ring->id].state;
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
+       ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
+       if (ring->status_page.page_addr == NULL)
+               return -ENOMEM;
+       ring->status_page.obj = dctx_obj;
+
+       ret = i915_cmd_parser_init_ring(ring);
+       if (ret)
+               return ret;
+
+       if (ring->init) {
+               ret = ring->init(ring);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -399,6 +447,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
        int ret;
 
        WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
+       if (ctx->engine[ring->id].state)
+               return 0;
 
        context_size = round_up(get_lr_context_size(ring), 4096);
 
index c35f956ed6a07f3e1186397593eb0bc87ded8818..e4b97f5c5797ca3c3733c4826786cc4410614508 100644 (file)
  */
 #define CACHELINE_BYTES 64
 
+bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+
+       if (!dev)
+               return false;
+
+       if (i915.enable_execlists) {
+               struct intel_context *dctx = ring->default_context;
+               struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+
+               return ringbuf->obj;
+       } else
+               return ring->buffer && ring->buffer->obj;
+}
+
 static inline int __ring_space(int head, int tail, int size)
 {
        int space = head - (tail + I915_RING_FREE_SPACE);
index fe9d9d9d3598e4753ab9c08f3e8e68bb224ad6ab..fbe54ef6a9a14a9707ca6f170369f479b2e4446d 100644 (file)
@@ -289,11 +289,7 @@ struct  intel_engine_cs {
        u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
-static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
-{
-       return ring->buffer && ring->buffer->obj;
-}
+bool intel_ring_initialized(struct intel_engine_cs *ring);
 
 static inline unsigned
 intel_ring_flag(struct intel_engine_cs *ring)