From 536f5b5e86b225dab94c7ff8061ae482b6077387 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Thu, 6 Nov 2014 11:03:40 +0200 Subject: [PATCH] drm/i915: Make mmio flip wait for seqno in the work function This simplifies the code quite a bit compared to iterating over all rings during the ring interrupt. Also, it allows us to drop the mmio_flip spinlock, since the mmio_flip struct is only accessed in two places. The first is when the flip is queued and the other when the mmio writes are done. Since a flip cannot be queued while there is a pending flip, the two paths shouldn't ever run in parallel. We might need to revisit that if support for replacing flips is implemented though. v2: Don't hold dev->struct_mutext while waiting (Chris) v3: Make the wait uninterruptable (Chris) Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 3 - drivers/gpu/drm/i915/intel_display.c | 90 ++++------------------------ drivers/gpu/drm/i915/intel_drv.h | 9 +-- 3 files changed, 12 insertions(+), 90 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 318a6a0724d0..5fff2870a17b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -979,9 +979,6 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_notify_mmio_flip(ring); - wake_up_all(&ring->irq_queue); i915_queue_hangcheck(dev); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 18ad2bc8550b..cb0f32fd3b5c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9417,73 +9417,24 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) if (atomic_update) intel_pipe_update_end(intel_crtc, start_vbl_count); - - spin_lock_irq(&dev_priv->mmio_flip_lock); - intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_IDLE; - spin_unlock_irq(&dev_priv->mmio_flip_lock); } static void intel_mmio_flip_work_func(struct work_struct *work) { struct intel_crtc *intel_crtc = container_of(work, struct intel_crtc, mmio_flip.work); - - intel_do_mmio_flip(intel_crtc); -} - -static int intel_postpone_flip(struct drm_i915_gem_object *obj) -{ struct intel_engine_cs *ring; - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - if (!obj->last_write_seqno) - return 0; - - ring = obj->ring; - - if (i915_seqno_passed(ring->get_seqno(ring, true), - obj->last_write_seqno)) - return 0; - - ret = i915_gem_check_olr(ring, obj->last_write_seqno); - if (ret) - return ret; - - if (WARN_ON(!ring->irq_get(ring))) - return 0; - - return 1; -} + uint32_t seqno; -void intel_notify_mmio_flip(struct intel_engine_cs *ring) -{ - struct drm_i915_private *dev_priv = to_i915(ring->dev); - struct intel_crtc *intel_crtc; - unsigned long irq_flags; - u32 seqno; - - seqno = ring->get_seqno(ring, false); + seqno = intel_crtc->mmio_flip.seqno; + ring = intel_crtc->mmio_flip.ring; - spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); - for_each_intel_crtc(ring->dev, intel_crtc) { - struct intel_mmio_flip *mmio_flip; + if (seqno) + WARN_ON(__i915_wait_seqno(ring, seqno, + intel_crtc->reset_counter, + false, NULL, NULL) != 0); - mmio_flip = &intel_crtc->mmio_flip; - if (mmio_flip->status != INTEL_MMIO_FLIP_WAIT_RING) - continue; - - if (ring->id != mmio_flip->ring_id) - continue; - - if (i915_seqno_passed(seqno, mmio_flip->seqno)) { - schedule_work(&intel_crtc->mmio_flip.work); - mmio_flip->status = INTEL_MMIO_FLIP_WORK_SCHEDULED; - ring->irq_put(ring); - } - } - spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); + intel_do_mmio_flip(intel_crtc); } static int intel_queue_mmio_flip(struct drm_device *dev, @@ -9493,32 +9444,13 @@ static int intel_queue_mmio_flip(struct drm_device *dev, struct intel_engine_cs *ring, uint32_t flags) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int ret; - - if (WARN_ON(intel_crtc->mmio_flip.status != INTEL_MMIO_FLIP_IDLE)) - return -EBUSY; - - ret = intel_postpone_flip(obj); - if (ret < 0) - return ret; - if (ret == 0) { - intel_do_mmio_flip(intel_crtc); - return 0; - } - spin_lock_irq(&dev_priv->mmio_flip_lock); - intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_WAIT_RING; intel_crtc->mmio_flip.seqno = obj->last_write_seqno; - intel_crtc->mmio_flip.ring_id = obj->ring->id; - spin_unlock_irq(&dev_priv->mmio_flip_lock); + intel_crtc->mmio_flip.ring = obj->ring; + + schedule_work(&intel_crtc->mmio_flip.work); - /* - * Double check to catch cases where irq fired before - * mmio flip data was ready - */ - intel_notify_mmio_flip(obj->ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cb0e9dbf1c96..5c622ad2e9aa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -399,16 +399,9 @@ struct intel_pipe_wm { bool sprites_scaled; }; -enum intel_mmio_flip_status { - INTEL_MMIO_FLIP_IDLE = 0, - INTEL_MMIO_FLIP_WAIT_RING, - INTEL_MMIO_FLIP_WORK_SCHEDULED, -}; - struct intel_mmio_flip { u32 seqno; - u32 ring_id; - enum intel_mmio_flip_status status; + struct intel_engine_cs *ring; struct work_struct work; }; -- 2.39.2