/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
};
extern struct i915_params i915 __read_mostly;
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
return 0;
}
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /*
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (INTEL_INFO(ring->dev)->gen < 5)
+ return false;
+
+ if (i915.use_mmio_flip < 0)
+ return false;
+ else if (i915.use_mmio_flip > 0)
+ return true;
+ else
+ return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!obj->last_write_seqno)
+ return 0;
+
+ ring = obj->ring;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+ if (mmio_flip->seqno == 0)
+ continue;
+
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno))
+ return -EBUSY;
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /*
+ * Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (use_mmio_flip(ring, obj))
+ ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
+ else
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
if (ret)
goto cleanup_unpin;