]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/i915: Only run execlist context-switch handler after an interrupt
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 24 Jan 2017 15:20:21 +0000 (15:20 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 24 Jan 2017 15:56:01 +0000 (15:56 +0000)
Mark when we run the execlist tasklet following the interrupt, so we
don't probe a potentially uninitialised register when submitting the
contexts multiple times before the hardware responds.

v2: Use a shared engine->irq_posted
v3: Always use locked bitops to be sure of atomicity wrt to other bits
in the mask.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170124152021.26587-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 7e087c34426514f9dd91105e98f5597008f68387..3f3c9082b0f8df125483a32aa76ffad0c7139f5b 100644 (file)
@@ -1349,8 +1349,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 {
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
                notify_ring(engine);
-       if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-               tasklet_schedule(&engine->irq_tasklet);
+
+       if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
+               set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               tasklet_hi_schedule(&engine->irq_tasklet);
+       }
 }
 
 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
index 9896027880ea8a3195659a7461be9681acde21d3..f729568e5e544ea7783ea845b6c49014e2013af7 100644 (file)
@@ -564,7 +564,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 
        intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-       if (!execlists_elsp_idle(engine)) {
+       while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
                u32 __iomem *csb_mmio =
                        dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
                u32 __iomem *buf =
@@ -1297,6 +1297,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
        /* After a GPU reset, we may have requests to replay */
+       clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
        if (!execlists_elsp_idle(engine)) {
                engine->execlist_port[0].count = 0;
                engine->execlist_port[1].count = 0;
index a9ea84ea3155b5bb2fed12173a0613b39225ad11..8e872730f8eb05223ce8f06a7e171db377288d63 100644 (file)
@@ -213,6 +213,7 @@ struct intel_engine_cs {
 
        unsigned long irq_posted;
 #define ENGINE_IRQ_BREADCRUMB 0
+#define ENGINE_IRQ_EXECLIST 1
 
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the