1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
145 u32 val = I915_READ(reg);
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg), val);
152 I915_WRITE(reg, 0xffffffff);
154 I915_WRITE(reg, 0xffffffff);
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
174 /* For display hotplug interrupt */
176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
185 val = I915_READ(PORT_HOTPLUG_EN);
188 I915_WRITE(PORT_HOTPLUG_EN, val);
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
218 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
224 assert_spin_locked(&dev_priv->irq_lock);
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
237 I915_WRITE(DEIMR, dev_priv->irq_mask);
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
252 assert_spin_locked(&dev_priv->irq_lock);
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
264 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 ilk_update_gt_irq(dev_priv, mask, mask);
267 POSTING_READ_FW(GTIMR);
270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 ilk_update_gt_irq(dev_priv, mask, 0);
275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
304 assert_spin_locked(&dev_priv->irq_lock);
306 new_val = dev_priv->pm_irq_mask;
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
322 snb_update_pm_irq(dev_priv, mask, mask);
325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
328 snb_update_pm_irq(dev_priv, mask, 0);
331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
336 __gen6_disable_pm_irq(dev_priv, mask);
339 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
347 dev_priv->rps.pm_iir = 0;
348 spin_unlock_irq(&dev_priv->irq_lock);
351 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
353 spin_lock_irq(&dev_priv->irq_lock);
354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
356 dev_priv->rps.interrupts_enabled = true;
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 dev_priv->pm_rps_events);
359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
361 spin_unlock_irq(&dev_priv->irq_lock);
364 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
366 return (mask & ~dev_priv->rps.pm_intr_keep);
369 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
371 spin_lock_irq(&dev_priv->irq_lock);
372 dev_priv->rps.interrupts_enabled = false;
374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
376 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
377 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
378 ~dev_priv->pm_rps_events);
380 spin_unlock_irq(&dev_priv->irq_lock);
381 synchronize_irq(dev_priv->drm.irq);
383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
393 * bdw_update_port_irq - update DE port interrupt
394 * @dev_priv: driver private
395 * @interrupt_mask: mask of interrupt bits to update
396 * @enabled_irq_mask: mask of interrupt bits to enable
398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399 uint32_t interrupt_mask,
400 uint32_t enabled_irq_mask)
405 assert_spin_locked(&dev_priv->irq_lock);
407 WARN_ON(enabled_irq_mask & ~interrupt_mask);
409 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
412 old_val = I915_READ(GEN8_DE_PORT_IMR);
415 new_val &= ~interrupt_mask;
416 new_val |= (~enabled_irq_mask & interrupt_mask);
418 if (new_val != old_val) {
419 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
420 POSTING_READ(GEN8_DE_PORT_IMR);
425 * bdw_update_pipe_irq - update DE pipe interrupt
426 * @dev_priv: driver private
427 * @pipe: pipe whose interrupt to update
428 * @interrupt_mask: mask of interrupt bits to update
429 * @enabled_irq_mask: mask of interrupt bits to enable
431 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
433 uint32_t interrupt_mask,
434 uint32_t enabled_irq_mask)
438 assert_spin_locked(&dev_priv->irq_lock);
440 WARN_ON(enabled_irq_mask & ~interrupt_mask);
442 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
445 new_val = dev_priv->de_irq_mask[pipe];
446 new_val &= ~interrupt_mask;
447 new_val |= (~enabled_irq_mask & interrupt_mask);
449 if (new_val != dev_priv->de_irq_mask[pipe]) {
450 dev_priv->de_irq_mask[pipe] = new_val;
451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
457 * ibx_display_interrupt_update - update SDEIMR
458 * @dev_priv: driver private
459 * @interrupt_mask: mask of interrupt bits to update
460 * @enabled_irq_mask: mask of interrupt bits to enable
462 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
463 uint32_t interrupt_mask,
464 uint32_t enabled_irq_mask)
466 uint32_t sdeimr = I915_READ(SDEIMR);
467 sdeimr &= ~interrupt_mask;
468 sdeimr |= (~enabled_irq_mask & interrupt_mask);
470 WARN_ON(enabled_irq_mask & ~interrupt_mask);
472 assert_spin_locked(&dev_priv->irq_lock);
474 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
477 I915_WRITE(SDEIMR, sdeimr);
478 POSTING_READ(SDEIMR);
482 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
483 u32 enable_mask, u32 status_mask)
485 i915_reg_t reg = PIPESTAT(pipe);
486 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
488 assert_spin_locked(&dev_priv->irq_lock);
489 WARN_ON(!intel_irqs_enabled(dev_priv));
491 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
492 status_mask & ~PIPESTAT_INT_STATUS_MASK,
493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
494 pipe_name(pipe), enable_mask, status_mask))
497 if ((pipestat & enable_mask) == enable_mask)
500 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
502 /* Enable the interrupt, clear any pending status */
503 pipestat |= enable_mask | status_mask;
504 I915_WRITE(reg, pipestat);
509 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
510 u32 enable_mask, u32 status_mask)
512 i915_reg_t reg = PIPESTAT(pipe);
513 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
515 assert_spin_locked(&dev_priv->irq_lock);
516 WARN_ON(!intel_irqs_enabled(dev_priv));
518 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
519 status_mask & ~PIPESTAT_INT_STATUS_MASK,
520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
521 pipe_name(pipe), enable_mask, status_mask))
524 if ((pipestat & enable_mask) == 0)
527 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
529 pipestat &= ~enable_mask;
530 I915_WRITE(reg, pipestat);
534 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
536 u32 enable_mask = status_mask << 16;
539 * On pipe A we don't support the PSR interrupt yet,
540 * on pipe B and C the same bit MBZ.
542 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
545 * On pipe B and C we don't support the PSR interrupt yet, on pipe
546 * A the same bit is for perf counters which we don't use either.
548 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
551 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
552 SPRITE0_FLIP_DONE_INT_EN_VLV |
553 SPRITE1_FLIP_DONE_INT_EN_VLV);
554 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
555 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
556 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
557 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
563 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
572 enable_mask = status_mask << 16;
573 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
577 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
586 enable_mask = status_mask << 16;
587 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
592 * @dev_priv: i915 device private
594 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
599 spin_lock_irq(&dev_priv->irq_lock);
601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
602 if (INTEL_GEN(dev_priv) >= 4)
603 i915_enable_pipestat(dev_priv, PIPE_A,
604 PIPE_LEGACY_BLC_EVENT_STATUS);
606 spin_unlock_irq(&dev_priv->irq_lock);
610 * This timing diagram depicts the video signal in and
611 * around the vertical blanking period.
613 * Assumptions about the fictitious mode used in this example:
615 * vsync_start = vblank_start + 1
616 * vsync_end = vblank_start + 2
617 * vtotal = vblank_start + 3
620 * latch double buffered registers
621 * increment frame counter (ctg+)
622 * generate start of vblank interrupt (gen4+)
625 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
626 * | may be shifted forward 1-3 extra lines via PIPECONF
628 * | | start of vsync:
629 * | | generate vsync interrupt
631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
633 * ----va---> <-----------------vb--------------------> <--------va-------------
634 * | | <----vs-----> |
635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
639 * last visible pixel first visible pixel
640 * | increment frame counter (gen3/4)
641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
643 * x = horizontal active
644 * _ = horizontal blanking
645 * hs = horizontal sync
646 * va = vertical active
647 * vb = vertical blanking
649 * vbs = vblank_start (number)
652 * - most events happen at the start of horizontal sync
653 * - frame start happens at the start of horizontal blank, 1-4 lines
654 * (depending on PIPECONF settings) after the start of vblank
655 * - gen3/4 pixel and frame counter are synchronized with the start
656 * of horizontal active on the first line of vertical active
659 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
661 /* Gen2 doesn't have a hardware frame counter */
665 /* Called from drm generic code, passed a 'crtc', which
666 * we use as a pipe index
668 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
670 struct drm_i915_private *dev_priv = to_i915(dev);
671 i915_reg_t high_frame, low_frame;
672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
673 struct intel_crtc *intel_crtc =
674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
677 htotal = mode->crtc_htotal;
678 hsync_start = mode->crtc_hsync_start;
679 vbl_start = mode->crtc_vblank_start;
680 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
681 vbl_start = DIV_ROUND_UP(vbl_start, 2);
683 /* Convert to pixel count */
686 /* Start of vblank event occurs at start of hsync */
687 vbl_start -= htotal - hsync_start;
689 high_frame = PIPEFRAME(pipe);
690 low_frame = PIPEFRAMEPIXEL(pipe);
693 * High & low register fields aren't synchronized, so make sure
694 * we get a low value that's stable across two reads of the high
698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
699 low = I915_READ(low_frame);
700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
701 } while (high1 != high2);
703 high1 >>= PIPE_FRAME_HIGH_SHIFT;
704 pixel = low & PIPE_PIXEL_MASK;
705 low >>= PIPE_FRAME_LOW_SHIFT;
708 * The frame counter increments at beginning of active.
709 * Cook up a vblank counter by also checking the pixel
710 * counter against vblank start.
712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
715 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
717 struct drm_i915_private *dev_priv = to_i915(dev);
719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
722 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
725 struct drm_device *dev = crtc->base.dev;
726 struct drm_i915_private *dev_priv = to_i915(dev);
727 const struct drm_display_mode *mode = &crtc->base.hwmode;
728 enum pipe pipe = crtc->pipe;
729 int position, vtotal;
731 vtotal = mode->crtc_vtotal;
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
735 if (IS_GEN2(dev_priv))
736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
752 if (HAS_DDI(dev_priv) && !position) {
755 for (i = 0; i < 100; i++) {
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
759 if (temp != position) {
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
770 return (position + crtc->scanline_offset) % vtotal;
773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774 unsigned int flags, int *vpos, int *hpos,
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
778 struct drm_i915_private *dev_priv = to_i915(dev);
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
785 unsigned long irqflags;
787 if (WARN_ON(!mode->crtc_clock)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe));
793 htotal = mode->crtc_htotal;
794 hsync_start = mode->crtc_hsync_start;
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
816 /* Get optional system timestamp before query. */
818 *stime = ktime_get();
820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
824 position = __intel_get_crtc_scanline(intel_crtc);
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
832 /* convert to pixel counts */
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
846 if (position >= vtotal)
847 position = vtotal - 1;
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
858 position = (position + htotal - hsync_start) % vtotal;
861 /* Get optional system timestamp after query. */
863 *etime = ktime_get();
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
869 in_vbl = position >= vbl_start && position < vbl_end;
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
877 if (position >= vbl_start)
880 position += vtotal - vbl_end;
882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
897 int intel_get_crtc_scanline(struct intel_crtc *crtc)
899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
900 unsigned long irqflags;
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
912 struct timeval *vblank_time,
915 struct drm_crtc *crtc;
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
922 /* Get drm_crtc to timestamp: */
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
925 DRM_ERROR("Invalid crtc %u\n", pipe);
929 if (!crtc->hwmode.crtc_clock) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
940 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
942 u32 busy_up, busy_down, max_avg, min_avg;
945 spin_lock(&mchdev_lock);
947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
949 new_delay = dev_priv->ips.cur_delay;
951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
952 busy_up = I915_READ(RCPREVBSYTUPAVG);
953 busy_down = I915_READ(RCPREVBSYTDNAVG);
954 max_avg = I915_READ(RCBMAXAVG);
955 min_avg = I915_READ(RCBMINAVG);
957 /* Handle RCS change request from hw */
958 if (busy_up > max_avg) {
959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
960 new_delay = dev_priv->ips.cur_delay - 1;
961 if (new_delay < dev_priv->ips.max_delay)
962 new_delay = dev_priv->ips.max_delay;
963 } else if (busy_down < min_avg) {
964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
965 new_delay = dev_priv->ips.cur_delay + 1;
966 if (new_delay > dev_priv->ips.min_delay)
967 new_delay = dev_priv->ips.min_delay;
970 if (ironlake_set_drps(dev_priv, new_delay))
971 dev_priv->ips.cur_delay = new_delay;
973 spin_unlock(&mchdev_lock);
978 static void notify_ring(struct intel_engine_cs *engine)
980 smp_store_mb(engine->breadcrumbs.irq_posted, true);
981 if (intel_engine_wakeup(engine)) {
982 trace_i915_gem_request_notify(engine);
983 engine->breadcrumbs.irq_wakeups++;
987 static void vlv_c0_read(struct drm_i915_private *dev_priv,
988 struct intel_rps_ei *ei)
990 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
995 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996 const struct intel_rps_ei *old,
997 const struct intel_rps_ei *now,
1001 unsigned int mul = 100;
1003 if (old->cz_clock == 0)
1006 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1009 time = now->cz_clock - old->cz_clock;
1010 time *= threshold * dev_priv->czclk_freq;
1012 /* Workload can be split between render + media, e.g. SwapBuffers
1013 * being blitted in X after being rendered in mesa. To account for
1014 * this we need to combine both engines into our activity counter.
1016 c0 = now->render_c0 - old->render_c0;
1017 c0 += now->media_c0 - old->media_c0;
1018 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1023 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1025 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1029 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1031 struct intel_rps_ei now;
1034 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1037 vlv_c0_read(dev_priv, &now);
1038 if (now.cz_clock == 0)
1041 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042 if (!vlv_c0_above(dev_priv,
1043 &dev_priv->rps.down_ei, &now,
1044 dev_priv->rps.down_threshold))
1045 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046 dev_priv->rps.down_ei = now;
1049 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050 if (vlv_c0_above(dev_priv,
1051 &dev_priv->rps.up_ei, &now,
1052 dev_priv->rps.up_threshold))
1053 events |= GEN6_PM_RP_UP_THRESHOLD;
1054 dev_priv->rps.up_ei = now;
1060 static bool any_waiters(struct drm_i915_private *dev_priv)
1062 struct intel_engine_cs *engine;
1064 for_each_engine(engine, dev_priv)
1065 if (intel_engine_has_waiter(engine))
1071 static void gen6_pm_rps_work(struct work_struct *work)
1073 struct drm_i915_private *dev_priv =
1074 container_of(work, struct drm_i915_private, rps.work);
1076 int new_delay, adj, min, max;
1079 spin_lock_irq(&dev_priv->irq_lock);
1080 /* Speed up work cancelation during disabling rps interrupts. */
1081 if (!dev_priv->rps.interrupts_enabled) {
1082 spin_unlock_irq(&dev_priv->irq_lock);
1086 pm_iir = dev_priv->rps.pm_iir;
1087 dev_priv->rps.pm_iir = 0;
1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090 client_boost = dev_priv->rps.client_boost;
1091 dev_priv->rps.client_boost = false;
1092 spin_unlock_irq(&dev_priv->irq_lock);
1094 /* Make sure we didn't queue anything we're not going to process. */
1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1100 mutex_lock(&dev_priv->rps.hw_lock);
1102 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1104 adj = dev_priv->rps.last_adj;
1105 new_delay = dev_priv->rps.cur_freq;
1106 min = dev_priv->rps.min_freq_softlimit;
1107 max = dev_priv->rps.max_freq_softlimit;
1108 if (client_boost || any_waiters(dev_priv))
1109 max = dev_priv->rps.max_freq;
1110 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1111 new_delay = dev_priv->rps.boost_freq;
1113 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1116 else /* CHV needs even encode values */
1117 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1119 * For better performance, jump directly
1120 * to RPe if we're below it.
1122 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1123 new_delay = dev_priv->rps.efficient_freq;
1126 } else if (client_boost || any_waiters(dev_priv)) {
1128 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1129 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1130 new_delay = dev_priv->rps.efficient_freq;
1132 new_delay = dev_priv->rps.min_freq_softlimit;
1134 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1137 else /* CHV needs even encode values */
1138 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1139 } else { /* unknown event */
1143 dev_priv->rps.last_adj = adj;
1145 /* sysfs frequency interfaces may have snuck in while servicing the
1149 new_delay = clamp_t(int, new_delay, min, max);
1151 intel_set_rps(dev_priv, new_delay);
1153 mutex_unlock(&dev_priv->rps.hw_lock);
1158 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1160 * @work: workqueue struct
1162 * Doesn't actually do anything except notify userspace. As a consequence of
1163 * this event, userspace should try to remap the bad rows since statistically
1164 * it is likely the same row is more likely to go bad again.
1166 static void ivybridge_parity_work(struct work_struct *work)
1168 struct drm_i915_private *dev_priv =
1169 container_of(work, struct drm_i915_private, l3_parity.error_work);
1170 u32 error_status, row, bank, subbank;
1171 char *parity_event[6];
1175 /* We must turn off DOP level clock gating to access the L3 registers.
1176 * In order to prevent a get/put style interface, acquire struct mutex
1177 * any time we access those registers.
1179 mutex_lock(&dev_priv->drm.struct_mutex);
1181 /* If we've screwed up tracking, just let the interrupt fire again */
1182 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1185 misccpctl = I915_READ(GEN7_MISCCPCTL);
1186 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1187 POSTING_READ(GEN7_MISCCPCTL);
1189 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1193 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1196 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1198 reg = GEN7_L3CDERRST1(slice);
1200 error_status = I915_READ(reg);
1201 row = GEN7_PARITY_ERROR_ROW(error_status);
1202 bank = GEN7_PARITY_ERROR_BANK(error_status);
1203 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1205 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1208 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1209 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1210 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1211 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1212 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1213 parity_event[5] = NULL;
1215 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1216 KOBJ_CHANGE, parity_event);
1218 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1219 slice, row, bank, subbank);
1221 kfree(parity_event[4]);
1222 kfree(parity_event[3]);
1223 kfree(parity_event[2]);
1224 kfree(parity_event[1]);
1227 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1230 WARN_ON(dev_priv->l3_parity.which_slice);
1231 spin_lock_irq(&dev_priv->irq_lock);
1232 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1233 spin_unlock_irq(&dev_priv->irq_lock);
1235 mutex_unlock(&dev_priv->drm.struct_mutex);
1238 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1241 if (!HAS_L3_DPF(dev_priv))
1244 spin_lock(&dev_priv->irq_lock);
1245 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1246 spin_unlock(&dev_priv->irq_lock);
1248 iir &= GT_PARITY_ERROR(dev_priv);
1249 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1250 dev_priv->l3_parity.which_slice |= 1 << 1;
1252 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1253 dev_priv->l3_parity.which_slice |= 1 << 0;
1255 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1258 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1261 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1262 notify_ring(&dev_priv->engine[RCS]);
1263 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1264 notify_ring(&dev_priv->engine[VCS]);
1267 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1270 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1271 notify_ring(&dev_priv->engine[RCS]);
1272 if (gt_iir & GT_BSD_USER_INTERRUPT)
1273 notify_ring(&dev_priv->engine[VCS]);
1274 if (gt_iir & GT_BLT_USER_INTERRUPT)
1275 notify_ring(&dev_priv->engine[BCS]);
1277 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1278 GT_BSD_CS_ERROR_INTERRUPT |
1279 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1280 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1282 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1283 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1286 static __always_inline void
1287 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1289 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1290 notify_ring(engine);
1291 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1292 tasklet_schedule(&engine->irq_tasklet);
1295 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1299 irqreturn_t ret = IRQ_NONE;
1301 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1302 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1304 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1307 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1310 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1311 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1313 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1316 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1319 if (master_ctl & GEN8_GT_VECS_IRQ) {
1320 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1322 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1325 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1328 if (master_ctl & GEN8_GT_PM_IRQ) {
1329 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1330 if (gt_iir[2] & dev_priv->pm_rps_events) {
1331 I915_WRITE_FW(GEN8_GT_IIR(2),
1332 gt_iir[2] & dev_priv->pm_rps_events);
1335 DRM_ERROR("The master control interrupt lied (PM)!\n");
1341 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1345 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1346 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1347 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1348 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1352 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1353 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1354 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1355 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1359 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1360 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1362 if (gt_iir[2] & dev_priv->pm_rps_events)
1363 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1366 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1370 return val & PORTA_HOTPLUG_LONG_DETECT;
1372 return val & PORTB_HOTPLUG_LONG_DETECT;
1374 return val & PORTC_HOTPLUG_LONG_DETECT;
1380 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1384 return val & PORTE_HOTPLUG_LONG_DETECT;
1390 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1394 return val & PORTA_HOTPLUG_LONG_DETECT;
1396 return val & PORTB_HOTPLUG_LONG_DETECT;
1398 return val & PORTC_HOTPLUG_LONG_DETECT;
1400 return val & PORTD_HOTPLUG_LONG_DETECT;
1406 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1410 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1416 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1420 return val & PORTB_HOTPLUG_LONG_DETECT;
1422 return val & PORTC_HOTPLUG_LONG_DETECT;
1424 return val & PORTD_HOTPLUG_LONG_DETECT;
1430 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1434 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1436 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1438 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1445 * Get a bit mask of pins that have triggered, and which ones may be long.
1446 * This can be called multiple times with the same masks to accumulate
1447 * hotplug detection results from several registers.
1449 * Note that the caller is expected to zero out the masks initially.
1451 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1452 u32 hotplug_trigger, u32 dig_hotplug_reg,
1453 const u32 hpd[HPD_NUM_PINS],
1454 bool long_pulse_detect(enum port port, u32 val))
1459 for_each_hpd_pin(i) {
1460 if ((hpd[i] & hotplug_trigger) == 0)
1463 *pin_mask |= BIT(i);
1465 if (!intel_hpd_pin_to_port(i, &port))
1468 if (long_pulse_detect(port, dig_hotplug_reg))
1469 *long_mask |= BIT(i);
1472 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1473 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1477 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1479 wake_up_all(&dev_priv->gmbus_wait_queue);
1482 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1484 wake_up_all(&dev_priv->gmbus_wait_queue);
1487 #if defined(CONFIG_DEBUG_FS)
1488 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1490 uint32_t crc0, uint32_t crc1,
1491 uint32_t crc2, uint32_t crc3,
1494 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1495 struct intel_pipe_crc_entry *entry;
1498 spin_lock(&pipe_crc->lock);
1500 if (!pipe_crc->entries) {
1501 spin_unlock(&pipe_crc->lock);
1502 DRM_DEBUG_KMS("spurious interrupt\n");
1506 head = pipe_crc->head;
1507 tail = pipe_crc->tail;
1509 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1510 spin_unlock(&pipe_crc->lock);
1511 DRM_ERROR("CRC buffer overflowing\n");
1515 entry = &pipe_crc->entries[head];
1517 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1519 entry->crc[0] = crc0;
1520 entry->crc[1] = crc1;
1521 entry->crc[2] = crc2;
1522 entry->crc[3] = crc3;
1523 entry->crc[4] = crc4;
1525 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1526 pipe_crc->head = head;
1528 spin_unlock(&pipe_crc->lock);
1530 wake_up_interruptible(&pipe_crc->wq);
1534 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1536 uint32_t crc0, uint32_t crc1,
1537 uint32_t crc2, uint32_t crc3,
1542 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1545 display_pipe_crc_irq_handler(dev_priv, pipe,
1546 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1550 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1553 display_pipe_crc_irq_handler(dev_priv, pipe,
1554 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1555 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1556 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1557 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1558 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1561 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1564 uint32_t res1, res2;
1566 if (INTEL_GEN(dev_priv) >= 3)
1567 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1571 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1572 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1576 display_pipe_crc_irq_handler(dev_priv, pipe,
1577 I915_READ(PIPE_CRC_RES_RED(pipe)),
1578 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1579 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1583 /* The RPS events need forcewake, so we add them to a work queue and mask their
1584 * IMR bits until the work is done. Other interrupts can be processed without
1585 * the work queue. */
1586 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1588 if (pm_iir & dev_priv->pm_rps_events) {
1589 spin_lock(&dev_priv->irq_lock);
1590 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1591 if (dev_priv->rps.interrupts_enabled) {
1592 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1593 schedule_work(&dev_priv->rps.work);
1595 spin_unlock(&dev_priv->irq_lock);
1598 if (INTEL_INFO(dev_priv)->gen >= 8)
1601 if (HAS_VEBOX(dev_priv)) {
1602 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1603 notify_ring(&dev_priv->engine[VECS]);
1605 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1606 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1610 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1615 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1617 intel_finish_page_flip_mmio(dev_priv, pipe);
1622 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1623 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1627 spin_lock(&dev_priv->irq_lock);
1629 if (!dev_priv->display_irqs_enabled) {
1630 spin_unlock(&dev_priv->irq_lock);
1634 for_each_pipe(dev_priv, pipe) {
1636 u32 mask, iir_bit = 0;
1639 * PIPESTAT bits get signalled even when the interrupt is
1640 * disabled with the mask bits, and some of the status bits do
1641 * not generate interrupts at all (like the underrun bit). Hence
1642 * we need to be careful that we only handle what we want to
1646 /* fifo underruns are filterered in the underrun handler. */
1647 mask = PIPE_FIFO_UNDERRUN_STATUS;
1651 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1654 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1657 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1661 mask |= dev_priv->pipestat_irq_mask[pipe];
1666 reg = PIPESTAT(pipe);
1667 mask |= PIPESTAT_INT_ENABLE_MASK;
1668 pipe_stats[pipe] = I915_READ(reg) & mask;
1671 * Clear the PIPE*STAT regs before the IIR
1673 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1674 PIPESTAT_INT_STATUS_MASK))
1675 I915_WRITE(reg, pipe_stats[pipe]);
1677 spin_unlock(&dev_priv->irq_lock);
1680 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1681 u32 pipe_stats[I915_MAX_PIPES])
1685 for_each_pipe(dev_priv, pipe) {
1686 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1687 intel_pipe_handle_vblank(dev_priv, pipe))
1688 intel_check_page_flip(dev_priv, pipe);
1690 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1691 intel_finish_page_flip_cs(dev_priv, pipe);
1693 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1694 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1696 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1697 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1700 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1701 gmbus_irq_handler(dev_priv);
1704 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1706 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1709 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1711 return hotplug_status;
1714 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1717 u32 pin_mask = 0, long_mask = 0;
1719 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1720 IS_CHERRYVIEW(dev_priv)) {
1721 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1723 if (hotplug_trigger) {
1724 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1725 hotplug_trigger, hpd_status_g4x,
1726 i9xx_port_hotplug_long_detect);
1728 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1731 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1732 dp_aux_irq_handler(dev_priv);
1734 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1736 if (hotplug_trigger) {
1737 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1738 hotplug_trigger, hpd_status_i915,
1739 i9xx_port_hotplug_long_detect);
1740 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1745 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1747 struct drm_device *dev = arg;
1748 struct drm_i915_private *dev_priv = to_i915(dev);
1749 irqreturn_t ret = IRQ_NONE;
1751 if (!intel_irqs_enabled(dev_priv))
1754 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1755 disable_rpm_wakeref_asserts(dev_priv);
1758 u32 iir, gt_iir, pm_iir;
1759 u32 pipe_stats[I915_MAX_PIPES] = {};
1760 u32 hotplug_status = 0;
1763 gt_iir = I915_READ(GTIIR);
1764 pm_iir = I915_READ(GEN6_PMIIR);
1765 iir = I915_READ(VLV_IIR);
1767 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1773 * Theory on interrupt generation, based on empirical evidence:
1775 * x = ((VLV_IIR & VLV_IER) ||
1776 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1777 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1779 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1780 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1781 * guarantee the CPU interrupt will be raised again even if we
1782 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1783 * bits this time around.
1785 I915_WRITE(VLV_MASTER_IER, 0);
1786 ier = I915_READ(VLV_IER);
1787 I915_WRITE(VLV_IER, 0);
1790 I915_WRITE(GTIIR, gt_iir);
1792 I915_WRITE(GEN6_PMIIR, pm_iir);
1794 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1795 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1797 /* Call regardless, as some status bits might not be
1798 * signalled in iir */
1799 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1802 * VLV_IIR is single buffered, and reflects the level
1803 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1806 I915_WRITE(VLV_IIR, iir);
1808 I915_WRITE(VLV_IER, ier);
1809 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1810 POSTING_READ(VLV_MASTER_IER);
1813 snb_gt_irq_handler(dev_priv, gt_iir);
1815 gen6_rps_irq_handler(dev_priv, pm_iir);
1818 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1820 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1823 enable_rpm_wakeref_asserts(dev_priv);
1828 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1830 struct drm_device *dev = arg;
1831 struct drm_i915_private *dev_priv = to_i915(dev);
1832 irqreturn_t ret = IRQ_NONE;
1834 if (!intel_irqs_enabled(dev_priv))
1837 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1838 disable_rpm_wakeref_asserts(dev_priv);
1841 u32 master_ctl, iir;
1843 u32 pipe_stats[I915_MAX_PIPES] = {};
1844 u32 hotplug_status = 0;
1847 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1848 iir = I915_READ(VLV_IIR);
1850 if (master_ctl == 0 && iir == 0)
1856 * Theory on interrupt generation, based on empirical evidence:
1858 * x = ((VLV_IIR & VLV_IER) ||
1859 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1860 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1862 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1863 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1864 * guarantee the CPU interrupt will be raised again even if we
1865 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1866 * bits this time around.
1868 I915_WRITE(GEN8_MASTER_IRQ, 0);
1869 ier = I915_READ(VLV_IER);
1870 I915_WRITE(VLV_IER, 0);
1872 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1874 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1875 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1877 /* Call regardless, as some status bits might not be
1878 * signalled in iir */
1879 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1882 * VLV_IIR is single buffered, and reflects the level
1883 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1886 I915_WRITE(VLV_IIR, iir);
1888 I915_WRITE(VLV_IER, ier);
1889 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1890 POSTING_READ(GEN8_MASTER_IRQ);
1892 gen8_gt_irq_handler(dev_priv, gt_iir);
1895 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1897 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1900 enable_rpm_wakeref_asserts(dev_priv);
1905 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1906 u32 hotplug_trigger,
1907 const u32 hpd[HPD_NUM_PINS])
1909 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1912 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1913 * unless we touch the hotplug register, even if hotplug_trigger is
1914 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1917 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1918 if (!hotplug_trigger) {
1919 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1920 PORTD_HOTPLUG_STATUS_MASK |
1921 PORTC_HOTPLUG_STATUS_MASK |
1922 PORTB_HOTPLUG_STATUS_MASK;
1923 dig_hotplug_reg &= ~mask;
1926 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1927 if (!hotplug_trigger)
1930 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1931 dig_hotplug_reg, hpd,
1932 pch_port_hotplug_long_detect);
1934 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1937 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1940 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1942 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1944 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1945 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1946 SDE_AUDIO_POWER_SHIFT);
1947 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1951 if (pch_iir & SDE_AUX_MASK)
1952 dp_aux_irq_handler(dev_priv);
1954 if (pch_iir & SDE_GMBUS)
1955 gmbus_irq_handler(dev_priv);
1957 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1958 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1960 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1961 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1963 if (pch_iir & SDE_POISON)
1964 DRM_ERROR("PCH poison interrupt\n");
1966 if (pch_iir & SDE_FDI_MASK)
1967 for_each_pipe(dev_priv, pipe)
1968 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1970 I915_READ(FDI_RX_IIR(pipe)));
1972 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1973 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1975 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1976 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1978 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1979 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1981 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1982 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1985 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1987 u32 err_int = I915_READ(GEN7_ERR_INT);
1990 if (err_int & ERR_INT_POISON)
1991 DRM_ERROR("Poison interrupt\n");
1993 for_each_pipe(dev_priv, pipe) {
1994 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1995 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1997 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1998 if (IS_IVYBRIDGE(dev_priv))
1999 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2001 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2005 I915_WRITE(GEN7_ERR_INT, err_int);
2008 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2010 u32 serr_int = I915_READ(SERR_INT);
2012 if (serr_int & SERR_INT_POISON)
2013 DRM_ERROR("PCH poison interrupt\n");
2015 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2016 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2018 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2019 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2021 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2022 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2024 I915_WRITE(SERR_INT, serr_int);
2027 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2030 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2032 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2034 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2035 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2036 SDE_AUDIO_POWER_SHIFT_CPT);
2037 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2041 if (pch_iir & SDE_AUX_MASK_CPT)
2042 dp_aux_irq_handler(dev_priv);
2044 if (pch_iir & SDE_GMBUS_CPT)
2045 gmbus_irq_handler(dev_priv);
2047 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2048 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2050 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2051 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2053 if (pch_iir & SDE_FDI_MASK_CPT)
2054 for_each_pipe(dev_priv, pipe)
2055 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2057 I915_READ(FDI_RX_IIR(pipe)));
2059 if (pch_iir & SDE_ERROR_CPT)
2060 cpt_serr_int_handler(dev_priv);
2063 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2065 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2066 ~SDE_PORTE_HOTPLUG_SPT;
2067 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2068 u32 pin_mask = 0, long_mask = 0;
2070 if (hotplug_trigger) {
2071 u32 dig_hotplug_reg;
2073 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2074 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2076 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2077 dig_hotplug_reg, hpd_spt,
2078 spt_port_hotplug_long_detect);
2081 if (hotplug2_trigger) {
2082 u32 dig_hotplug_reg;
2084 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2085 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2087 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2088 dig_hotplug_reg, hpd_spt,
2089 spt_port_hotplug2_long_detect);
2093 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2095 if (pch_iir & SDE_GMBUS_CPT)
2096 gmbus_irq_handler(dev_priv);
2099 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2100 u32 hotplug_trigger,
2101 const u32 hpd[HPD_NUM_PINS])
2103 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2105 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2106 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2108 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2109 dig_hotplug_reg, hpd,
2110 ilk_port_hotplug_long_detect);
2112 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2115 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2119 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2121 if (hotplug_trigger)
2122 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2124 if (de_iir & DE_AUX_CHANNEL_A)
2125 dp_aux_irq_handler(dev_priv);
2127 if (de_iir & DE_GSE)
2128 intel_opregion_asle_intr(dev_priv);
2130 if (de_iir & DE_POISON)
2131 DRM_ERROR("Poison interrupt\n");
2133 for_each_pipe(dev_priv, pipe) {
2134 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2135 intel_pipe_handle_vblank(dev_priv, pipe))
2136 intel_check_page_flip(dev_priv, pipe);
2138 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2139 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2141 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2142 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2144 /* plane/pipes map 1:1 on ilk+ */
2145 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2146 intel_finish_page_flip_cs(dev_priv, pipe);
2149 /* check event from PCH */
2150 if (de_iir & DE_PCH_EVENT) {
2151 u32 pch_iir = I915_READ(SDEIIR);
2153 if (HAS_PCH_CPT(dev_priv))
2154 cpt_irq_handler(dev_priv, pch_iir);
2156 ibx_irq_handler(dev_priv, pch_iir);
2158 /* should clear PCH hotplug event before clear CPU irq */
2159 I915_WRITE(SDEIIR, pch_iir);
2162 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2163 ironlake_rps_change_irq_handler(dev_priv);
2166 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2170 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2172 if (hotplug_trigger)
2173 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2175 if (de_iir & DE_ERR_INT_IVB)
2176 ivb_err_int_handler(dev_priv);
2178 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2179 dp_aux_irq_handler(dev_priv);
2181 if (de_iir & DE_GSE_IVB)
2182 intel_opregion_asle_intr(dev_priv);
2184 for_each_pipe(dev_priv, pipe) {
2185 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2186 intel_pipe_handle_vblank(dev_priv, pipe))
2187 intel_check_page_flip(dev_priv, pipe);
2189 /* plane/pipes map 1:1 on ilk+ */
2190 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2191 intel_finish_page_flip_cs(dev_priv, pipe);
2194 /* check event from PCH */
2195 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2196 u32 pch_iir = I915_READ(SDEIIR);
2198 cpt_irq_handler(dev_priv, pch_iir);
2200 /* clear PCH hotplug event before clear CPU irq */
2201 I915_WRITE(SDEIIR, pch_iir);
2206 * To handle irqs with the minimum potential races with fresh interrupts, we:
2207 * 1 - Disable Master Interrupt Control.
2208 * 2 - Find the source(s) of the interrupt.
2209 * 3 - Clear the Interrupt Identity bits (IIR).
2210 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2211 * 5 - Re-enable Master Interrupt Control.
2213 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2215 struct drm_device *dev = arg;
2216 struct drm_i915_private *dev_priv = to_i915(dev);
2217 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2218 irqreturn_t ret = IRQ_NONE;
2220 if (!intel_irqs_enabled(dev_priv))
2223 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2224 disable_rpm_wakeref_asserts(dev_priv);
2226 /* disable master interrupt before clearing iir */
2227 de_ier = I915_READ(DEIER);
2228 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2229 POSTING_READ(DEIER);
2231 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2232 * interrupts will will be stored on its back queue, and then we'll be
2233 * able to process them after we restore SDEIER (as soon as we restore
2234 * it, we'll get an interrupt if SDEIIR still has something to process
2235 * due to its back queue). */
2236 if (!HAS_PCH_NOP(dev_priv)) {
2237 sde_ier = I915_READ(SDEIER);
2238 I915_WRITE(SDEIER, 0);
2239 POSTING_READ(SDEIER);
2242 /* Find, clear, then process each source of interrupt */
2244 gt_iir = I915_READ(GTIIR);
2246 I915_WRITE(GTIIR, gt_iir);
2248 if (INTEL_GEN(dev_priv) >= 6)
2249 snb_gt_irq_handler(dev_priv, gt_iir);
2251 ilk_gt_irq_handler(dev_priv, gt_iir);
2254 de_iir = I915_READ(DEIIR);
2256 I915_WRITE(DEIIR, de_iir);
2258 if (INTEL_GEN(dev_priv) >= 7)
2259 ivb_display_irq_handler(dev_priv, de_iir);
2261 ilk_display_irq_handler(dev_priv, de_iir);
2264 if (INTEL_GEN(dev_priv) >= 6) {
2265 u32 pm_iir = I915_READ(GEN6_PMIIR);
2267 I915_WRITE(GEN6_PMIIR, pm_iir);
2269 gen6_rps_irq_handler(dev_priv, pm_iir);
2273 I915_WRITE(DEIER, de_ier);
2274 POSTING_READ(DEIER);
2275 if (!HAS_PCH_NOP(dev_priv)) {
2276 I915_WRITE(SDEIER, sde_ier);
2277 POSTING_READ(SDEIER);
2280 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2281 enable_rpm_wakeref_asserts(dev_priv);
2286 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2287 u32 hotplug_trigger,
2288 const u32 hpd[HPD_NUM_PINS])
2290 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2292 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2293 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2295 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2296 dig_hotplug_reg, hpd,
2297 bxt_port_hotplug_long_detect);
2299 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2303 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2305 irqreturn_t ret = IRQ_NONE;
2309 if (master_ctl & GEN8_DE_MISC_IRQ) {
2310 iir = I915_READ(GEN8_DE_MISC_IIR);
2312 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2314 if (iir & GEN8_DE_MISC_GSE)
2315 intel_opregion_asle_intr(dev_priv);
2317 DRM_ERROR("Unexpected DE Misc interrupt\n");
2320 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2323 if (master_ctl & GEN8_DE_PORT_IRQ) {
2324 iir = I915_READ(GEN8_DE_PORT_IIR);
2329 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2332 tmp_mask = GEN8_AUX_CHANNEL_A;
2333 if (INTEL_INFO(dev_priv)->gen >= 9)
2334 tmp_mask |= GEN9_AUX_CHANNEL_B |
2335 GEN9_AUX_CHANNEL_C |
2338 if (iir & tmp_mask) {
2339 dp_aux_irq_handler(dev_priv);
2343 if (IS_BROXTON(dev_priv)) {
2344 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2346 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2350 } else if (IS_BROADWELL(dev_priv)) {
2351 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2353 ilk_hpd_irq_handler(dev_priv,
2359 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2360 gmbus_irq_handler(dev_priv);
2365 DRM_ERROR("Unexpected DE Port interrupt\n");
2368 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2371 for_each_pipe(dev_priv, pipe) {
2372 u32 flip_done, fault_errors;
2374 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2377 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2379 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2384 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2386 if (iir & GEN8_PIPE_VBLANK &&
2387 intel_pipe_handle_vblank(dev_priv, pipe))
2388 intel_check_page_flip(dev_priv, pipe);
2391 if (INTEL_INFO(dev_priv)->gen >= 9)
2392 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2394 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2397 intel_finish_page_flip_cs(dev_priv, pipe);
2399 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2400 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2402 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2403 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2406 if (INTEL_INFO(dev_priv)->gen >= 9)
2407 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2409 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2412 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2417 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2418 master_ctl & GEN8_DE_PCH_IRQ) {
2420 * FIXME(BDW): Assume for now that the new interrupt handling
2421 * scheme also closed the SDE interrupt handling race we've seen
2422 * on older pch-split platforms. But this needs testing.
2424 iir = I915_READ(SDEIIR);
2426 I915_WRITE(SDEIIR, iir);
2429 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2430 spt_irq_handler(dev_priv, iir);
2432 cpt_irq_handler(dev_priv, iir);
2435 * Like on previous PCH there seems to be something
2436 * fishy going on with forwarding PCH interrupts.
2438 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2445 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2447 struct drm_device *dev = arg;
2448 struct drm_i915_private *dev_priv = to_i915(dev);
2453 if (!intel_irqs_enabled(dev_priv))
2456 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2457 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2461 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2463 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2464 disable_rpm_wakeref_asserts(dev_priv);
2466 /* Find, clear, then process each source of interrupt */
2467 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2468 gen8_gt_irq_handler(dev_priv, gt_iir);
2469 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2471 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2472 POSTING_READ_FW(GEN8_MASTER_IRQ);
2474 enable_rpm_wakeref_asserts(dev_priv);
2479 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2482 * Notify all waiters for GPU completion events that reset state has
2483 * been changed, and that they need to restart their wait after
2484 * checking for potential errors (and bail out to drop locks if there is
2485 * a gpu reset pending so that i915_error_work_func can acquire them).
2488 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2489 wake_up_all(&dev_priv->gpu_error.wait_queue);
2491 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2492 wake_up_all(&dev_priv->pending_flip_queue);
2496 * i915_reset_and_wakeup - do process context error handling work
2497 * @dev_priv: i915 device private
2499 * Fire an error uevent so userspace can see that a hang or error
2502 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2504 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2505 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2506 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2507 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2510 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2513 * Note that there's only one work item which does gpu resets, so we
2514 * need not worry about concurrent gpu resets potentially incrementing
2515 * error->reset_counter twice. We only need to take care of another
2516 * racing irq/hangcheck declaring the gpu dead for a second time. A
2517 * quick check for that is good enough: schedule_work ensures the
2518 * correct ordering between hang detection and this work item, and since
2519 * the reset in-progress bit is only ever set by code outside of this
2520 * work we don't need to worry about any other races.
2522 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2523 DRM_DEBUG_DRIVER("resetting chip\n");
2524 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2527 * In most cases it's guaranteed that we get here with an RPM
2528 * reference held, for example because there is a pending GPU
2529 * request that won't finish until the reset is done. This
2530 * isn't the case at least when we get here by doing a
2531 * simulated reset via debugs, so get an RPM reference.
2533 intel_runtime_pm_get(dev_priv);
2535 intel_prepare_reset(dev_priv);
2538 * All state reset _must_ be completed before we update the
2539 * reset counter, for otherwise waiters might miss the reset
2540 * pending state and not properly drop locks, resulting in
2541 * deadlocks with the reset work.
2543 ret = i915_reset(dev_priv);
2545 intel_finish_reset(dev_priv);
2547 intel_runtime_pm_put(dev_priv);
2550 kobject_uevent_env(kobj,
2551 KOBJ_CHANGE, reset_done_event);
2554 * Note: The wake_up also serves as a memory barrier so that
2555 * waiters see the update value of the reset counter atomic_t.
2557 wake_up_all(&dev_priv->gpu_error.reset_queue);
2561 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2563 uint32_t instdone[I915_NUM_INSTDONE_REG];
2564 u32 eir = I915_READ(EIR);
2570 pr_err("render error detected, EIR: 0x%08x\n", eir);
2572 i915_get_extra_instdone(dev_priv, instdone);
2574 if (IS_G4X(dev_priv)) {
2575 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2576 u32 ipeir = I915_READ(IPEIR_I965);
2578 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2579 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2580 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2581 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2582 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2583 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2584 I915_WRITE(IPEIR_I965, ipeir);
2585 POSTING_READ(IPEIR_I965);
2587 if (eir & GM45_ERROR_PAGE_TABLE) {
2588 u32 pgtbl_err = I915_READ(PGTBL_ER);
2589 pr_err("page table error\n");
2590 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2591 I915_WRITE(PGTBL_ER, pgtbl_err);
2592 POSTING_READ(PGTBL_ER);
2596 if (!IS_GEN2(dev_priv)) {
2597 if (eir & I915_ERROR_PAGE_TABLE) {
2598 u32 pgtbl_err = I915_READ(PGTBL_ER);
2599 pr_err("page table error\n");
2600 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2601 I915_WRITE(PGTBL_ER, pgtbl_err);
2602 POSTING_READ(PGTBL_ER);
2606 if (eir & I915_ERROR_MEMORY_REFRESH) {
2607 pr_err("memory refresh error:\n");
2608 for_each_pipe(dev_priv, pipe)
2609 pr_err("pipe %c stat: 0x%08x\n",
2610 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2611 /* pipestat has already been acked */
2613 if (eir & I915_ERROR_INSTRUCTION) {
2614 pr_err("instruction error\n");
2615 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2616 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2617 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2618 if (INTEL_GEN(dev_priv) < 4) {
2619 u32 ipeir = I915_READ(IPEIR);
2621 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2622 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2623 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2624 I915_WRITE(IPEIR, ipeir);
2625 POSTING_READ(IPEIR);
2627 u32 ipeir = I915_READ(IPEIR_I965);
2629 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2630 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2631 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2632 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2633 I915_WRITE(IPEIR_I965, ipeir);
2634 POSTING_READ(IPEIR_I965);
2638 I915_WRITE(EIR, eir);
2640 eir = I915_READ(EIR);
2643 * some errors might have become stuck,
2646 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2647 I915_WRITE(EMR, I915_READ(EMR) | eir);
2648 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2653 * i915_handle_error - handle a gpu error
2654 * @dev_priv: i915 device private
2655 * @engine_mask: mask representing engines that are hung
2656 * Do some basic checking of register state at error time and
2657 * dump it to the syslog. Also call i915_capture_error_state() to make
2658 * sure we get a record and make it available in debugfs. Fire a uevent
2659 * so userspace knows something bad happened (should trigger collection
2660 * of a ring dump etc.).
2661 * @fmt: Error message format string
2663 void i915_handle_error(struct drm_i915_private *dev_priv,
2665 const char *fmt, ...)
2670 va_start(args, fmt);
2671 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2674 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2675 i915_report_and_clear_eir(dev_priv);
2678 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2679 &dev_priv->gpu_error.reset_counter);
2682 * Wakeup waiting processes so that the reset function
2683 * i915_reset_and_wakeup doesn't deadlock trying to grab
2684 * various locks. By bumping the reset counter first, the woken
2685 * processes will see a reset in progress and back off,
2686 * releasing their locks and then wait for the reset completion.
2687 * We must do this for _all_ gpu waiters that might hold locks
2688 * that the reset work needs to acquire.
2690 * Note: The wake_up serves as the required memory barrier to
2691 * ensure that the waiters see the updated value of the reset
2694 i915_error_wake_up(dev_priv);
2697 i915_reset_and_wakeup(dev_priv);
2700 /* Called from drm generic code, passed 'crtc' which
2701 * we use as a pipe index
2703 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2705 struct drm_i915_private *dev_priv = to_i915(dev);
2706 unsigned long irqflags;
2708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2709 if (INTEL_INFO(dev)->gen >= 4)
2710 i915_enable_pipestat(dev_priv, pipe,
2711 PIPE_START_VBLANK_INTERRUPT_STATUS);
2713 i915_enable_pipestat(dev_priv, pipe,
2714 PIPE_VBLANK_INTERRUPT_STATUS);
2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2720 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2722 struct drm_i915_private *dev_priv = to_i915(dev);
2723 unsigned long irqflags;
2724 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2725 DE_PIPE_VBLANK(pipe);
2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 ilk_enable_display_irq(dev_priv, bit);
2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2734 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2736 struct drm_i915_private *dev_priv = to_i915(dev);
2737 unsigned long irqflags;
2739 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2740 i915_enable_pipestat(dev_priv, pipe,
2741 PIPE_START_VBLANK_INTERRUPT_STATUS);
2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2747 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2749 struct drm_i915_private *dev_priv = to_i915(dev);
2750 unsigned long irqflags;
2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2753 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2759 /* Called from drm generic code, passed 'crtc' which
2760 * we use as a pipe index
2762 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2764 struct drm_i915_private *dev_priv = to_i915(dev);
2765 unsigned long irqflags;
2767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2768 i915_disable_pipestat(dev_priv, pipe,
2769 PIPE_VBLANK_INTERRUPT_STATUS |
2770 PIPE_START_VBLANK_INTERRUPT_STATUS);
2771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2774 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2776 struct drm_i915_private *dev_priv = to_i915(dev);
2777 unsigned long irqflags;
2778 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2779 DE_PIPE_VBLANK(pipe);
2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 ilk_disable_display_irq(dev_priv, bit);
2783 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2788 struct drm_i915_private *dev_priv = to_i915(dev);
2789 unsigned long irqflags;
2791 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2792 i915_disable_pipestat(dev_priv, pipe,
2793 PIPE_START_VBLANK_INTERRUPT_STATUS);
2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2797 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2799 struct drm_i915_private *dev_priv = to_i915(dev);
2800 unsigned long irqflags;
2802 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2803 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2804 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2808 ring_idle(struct intel_engine_cs *engine, u32 seqno)
2810 return i915_seqno_passed(seqno,
2811 READ_ONCE(engine->last_submitted_seqno));
2815 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2817 if (INTEL_GEN(engine->i915) >= 8) {
2818 return (ipehr >> 23) == 0x1c;
2820 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2821 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2822 MI_SEMAPHORE_REGISTER);
2826 static struct intel_engine_cs *
2827 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2830 struct drm_i915_private *dev_priv = engine->i915;
2831 struct intel_engine_cs *signaller;
2833 if (INTEL_GEN(dev_priv) >= 8) {
2834 for_each_engine(signaller, dev_priv) {
2835 if (engine == signaller)
2838 if (offset == signaller->semaphore.signal_ggtt[engine->id])
2842 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2844 for_each_engine(signaller, dev_priv) {
2845 if(engine == signaller)
2848 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2853 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2854 engine->id, ipehr, offset);
2859 static struct intel_engine_cs *
2860 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2862 struct drm_i915_private *dev_priv = engine->i915;
2863 void __iomem *vaddr;
2864 u32 cmd, ipehr, head;
2869 * This function does not support execlist mode - any attempt to
2870 * proceed further into this function will result in a kernel panic
2871 * when dereferencing ring->buffer, which is not set up in execlist
2874 * The correct way of doing it would be to derive the currently
2875 * executing ring buffer from the current context, which is derived
2876 * from the currently running request. Unfortunately, to get the
2877 * current request we would have to grab the struct_mutex before doing
2878 * anything else, which would be ill-advised since some other thread
2879 * might have grabbed it already and managed to hang itself, causing
2880 * the hang checker to deadlock.
2882 * Therefore, this function does not support execlist mode in its
2883 * current form. Just return NULL and move on.
2885 if (engine->buffer == NULL)
2888 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2889 if (!ipehr_is_semaphore_wait(engine, ipehr))
2893 * HEAD is likely pointing to the dword after the actual command,
2894 * so scan backwards until we find the MBOX. But limit it to just 3
2895 * or 4 dwords depending on the semaphore wait command size.
2896 * Note that we don't care about ACTHD here since that might
2897 * point at at batch, and semaphores are always emitted into the
2898 * ringbuffer itself.
2900 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2901 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2902 vaddr = (void __iomem *)engine->buffer->vaddr;
2904 for (i = backwards; i; --i) {
2906 * Be paranoid and presume the hw has gone off into the wild -
2907 * our ring is smaller than what the hardware (and hence
2908 * HEAD_ADDR) allows. Also handles wrap-around.
2910 head &= engine->buffer->size - 1;
2912 /* This here seems to blow up */
2913 cmd = ioread32(vaddr + head);
2923 *seqno = ioread32(vaddr + head + 4) + 1;
2924 if (INTEL_GEN(dev_priv) >= 8) {
2925 offset = ioread32(vaddr + head + 12);
2927 offset |= ioread32(vaddr + head + 8);
2929 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2932 static int semaphore_passed(struct intel_engine_cs *engine)
2934 struct drm_i915_private *dev_priv = engine->i915;
2935 struct intel_engine_cs *signaller;
2938 engine->hangcheck.deadlock++;
2940 signaller = semaphore_waits_for(engine, &seqno);
2941 if (signaller == NULL)
2944 /* Prevent pathological recursion due to driver bugs */
2945 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2948 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2951 /* cursory check for an unkickable deadlock */
2952 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2953 semaphore_passed(signaller) < 0)
2959 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2961 struct intel_engine_cs *engine;
2963 for_each_engine(engine, dev_priv)
2964 engine->hangcheck.deadlock = 0;
2967 static bool subunits_stuck(struct intel_engine_cs *engine)
2969 u32 instdone[I915_NUM_INSTDONE_REG];
2973 if (engine->id != RCS)
2976 i915_get_extra_instdone(engine->i915, instdone);
2978 /* There might be unstable subunit states even when
2979 * actual head is not moving. Filter out the unstable ones by
2980 * accumulating the undone -> done transitions and only
2981 * consider those as progress.
2984 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2985 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
2987 if (tmp != engine->hangcheck.instdone[i])
2990 engine->hangcheck.instdone[i] |= tmp;
2996 static enum intel_ring_hangcheck_action
2997 head_stuck(struct intel_engine_cs *engine, u64 acthd)
2999 if (acthd != engine->hangcheck.acthd) {
3001 /* Clear subunit states on head movement */
3002 memset(engine->hangcheck.instdone, 0,
3003 sizeof(engine->hangcheck.instdone));
3005 return HANGCHECK_ACTIVE;
3008 if (!subunits_stuck(engine))
3009 return HANGCHECK_ACTIVE;
3011 return HANGCHECK_HUNG;
3014 static enum intel_ring_hangcheck_action
3015 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3017 struct drm_i915_private *dev_priv = engine->i915;
3018 enum intel_ring_hangcheck_action ha;
3021 ha = head_stuck(engine, acthd);
3022 if (ha != HANGCHECK_HUNG)
3025 if (IS_GEN2(dev_priv))
3026 return HANGCHECK_HUNG;
3028 /* Is the chip hanging on a WAIT_FOR_EVENT?
3029 * If so we can simply poke the RB_WAIT bit
3030 * and break the hang. This should work on
3031 * all but the second generation chipsets.
3033 tmp = I915_READ_CTL(engine);
3034 if (tmp & RING_WAIT) {
3035 i915_handle_error(dev_priv, 0,
3036 "Kicking stuck wait on %s",
3038 I915_WRITE_CTL(engine, tmp);
3039 return HANGCHECK_KICK;
3042 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3043 switch (semaphore_passed(engine)) {
3045 return HANGCHECK_HUNG;
3047 i915_handle_error(dev_priv, 0,
3048 "Kicking stuck semaphore on %s",
3050 I915_WRITE_CTL(engine, tmp);
3051 return HANGCHECK_KICK;
3053 return HANGCHECK_WAIT;
3057 return HANGCHECK_HUNG;
3060 static unsigned long kick_waiters(struct intel_engine_cs *engine)
3062 struct drm_i915_private *i915 = engine->i915;
3063 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
3065 if (engine->hangcheck.user_interrupts == irq_count &&
3066 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3067 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
3068 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3071 intel_engine_enable_fake_irq(engine);
3077 * This is called when the chip hasn't reported back with completed
3078 * batchbuffers in a long time. We keep track per ring seqno progress and
3079 * if there are no progress, hangcheck score for that ring is increased.
3080 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3081 * we kick the ring. If we see no progress on three subsequent calls
3082 * we assume chip is wedged and try to fix it by resetting the chip.
3084 static void i915_hangcheck_elapsed(struct work_struct *work)
3086 struct drm_i915_private *dev_priv =
3087 container_of(work, typeof(*dev_priv),
3088 gpu_error.hangcheck_work.work);
3089 struct intel_engine_cs *engine;
3090 unsigned int hung = 0, stuck = 0;
3095 #define ACTIVE_DECAY 15
3097 if (!i915.enable_hangcheck)
3100 if (!READ_ONCE(dev_priv->gt.awake))
3103 /* As enabling the GPU requires fairly extensive mmio access,
3104 * periodically arm the mmio checker to see if we are triggering
3105 * any invalid access.
3107 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3109 for_each_engine(engine, dev_priv) {
3110 bool busy = intel_engine_has_waiter(engine);
3113 unsigned user_interrupts;
3115 semaphore_clear_deadlocks(dev_priv);
3117 /* We don't strictly need an irq-barrier here, as we are not
3118 * serving an interrupt request, be paranoid in case the
3119 * barrier has side-effects (such as preventing a broken
3120 * cacheline snoop) and so be sure that we can see the seqno
3121 * advance. If the seqno should stick, due to a stale
3122 * cacheline, we would erroneously declare the GPU hung.
3124 if (engine->irq_seqno_barrier)
3125 engine->irq_seqno_barrier(engine);
3127 acthd = intel_ring_get_active_head(engine);
3128 seqno = intel_engine_get_seqno(engine);
3130 /* Reset stuck interrupts between batch advances */
3131 user_interrupts = 0;
3133 if (engine->hangcheck.seqno == seqno) {
3134 if (ring_idle(engine, seqno)) {
3135 engine->hangcheck.action = HANGCHECK_IDLE;
3137 /* Safeguard against driver failure */
3138 user_interrupts = kick_waiters(engine);
3139 engine->hangcheck.score += BUSY;
3142 /* We always increment the hangcheck score
3143 * if the engine is busy and still processing
3144 * the same request, so that no single request
3145 * can run indefinitely (such as a chain of
3146 * batches). The only time we do not increment
3147 * the hangcheck score on this ring, if this
3148 * engine is in a legitimate wait for another
3149 * engine. In that case the waiting engine is a
3150 * victim and we want to be sure we catch the
3151 * right culprit. Then every time we do kick
3152 * the ring, add a small increment to the
3153 * score so that we can catch a batch that is
3154 * being repeatedly kicked and so responsible
3155 * for stalling the machine.
3157 engine->hangcheck.action = ring_stuck(engine,
3160 switch (engine->hangcheck.action) {
3161 case HANGCHECK_IDLE:
3162 case HANGCHECK_WAIT:
3164 case HANGCHECK_ACTIVE:
3165 engine->hangcheck.score += BUSY;
3167 case HANGCHECK_KICK:
3168 engine->hangcheck.score += KICK;
3170 case HANGCHECK_HUNG:
3171 engine->hangcheck.score += HUNG;
3176 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3177 hung |= intel_engine_flag(engine);
3178 if (engine->hangcheck.action != HANGCHECK_HUNG)
3179 stuck |= intel_engine_flag(engine);
3182 engine->hangcheck.action = HANGCHECK_ACTIVE;
3184 /* Gradually reduce the count so that we catch DoS
3185 * attempts across multiple batches.
3187 if (engine->hangcheck.score > 0)
3188 engine->hangcheck.score -= ACTIVE_DECAY;
3189 if (engine->hangcheck.score < 0)
3190 engine->hangcheck.score = 0;
3192 /* Clear head and subunit states on seqno movement */
3195 memset(engine->hangcheck.instdone, 0,
3196 sizeof(engine->hangcheck.instdone));
3199 engine->hangcheck.seqno = seqno;
3200 engine->hangcheck.acthd = acthd;
3201 engine->hangcheck.user_interrupts = user_interrupts;
3209 /* If some rings hung but others were still busy, only
3210 * blame the hanging rings in the synopsis.
3214 len = scnprintf(msg, sizeof(msg),
3215 "%s on ", stuck == hung ? "No progress" : "Hang");
3216 for_each_engine_masked(engine, dev_priv, hung)
3217 len += scnprintf(msg + len, sizeof(msg) - len,
3218 "%s, ", engine->name);
3221 return i915_handle_error(dev_priv, hung, msg);
3224 /* Reset timer in case GPU hangs without another request being added */
3226 i915_queue_hangcheck(dev_priv);
3229 static void ibx_irq_reset(struct drm_device *dev)
3231 struct drm_i915_private *dev_priv = to_i915(dev);
3233 if (HAS_PCH_NOP(dev))
3236 GEN5_IRQ_RESET(SDE);
3238 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3239 I915_WRITE(SERR_INT, 0xffffffff);
3243 * SDEIER is also touched by the interrupt handler to work around missed PCH
3244 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3245 * instead we unconditionally enable all PCH interrupt sources here, but then
3246 * only unmask them as needed with SDEIMR.
3248 * This function needs to be called before interrupts are enabled.
3250 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3252 struct drm_i915_private *dev_priv = to_i915(dev);
3254 if (HAS_PCH_NOP(dev))
3257 WARN_ON(I915_READ(SDEIER) != 0);
3258 I915_WRITE(SDEIER, 0xffffffff);
3259 POSTING_READ(SDEIER);
3262 static void gen5_gt_irq_reset(struct drm_device *dev)
3264 struct drm_i915_private *dev_priv = to_i915(dev);
3267 if (INTEL_INFO(dev)->gen >= 6)
3268 GEN5_IRQ_RESET(GEN6_PM);
3271 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3275 if (IS_CHERRYVIEW(dev_priv))
3276 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3278 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3280 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3281 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3283 for_each_pipe(dev_priv, pipe) {
3284 I915_WRITE(PIPESTAT(pipe),
3285 PIPE_FIFO_UNDERRUN_STATUS |
3286 PIPESTAT_INT_STATUS_MASK);
3287 dev_priv->pipestat_irq_mask[pipe] = 0;
3290 GEN5_IRQ_RESET(VLV_);
3291 dev_priv->irq_mask = ~0;
3294 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3300 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3301 PIPE_CRC_DONE_INTERRUPT_STATUS;
3303 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3304 for_each_pipe(dev_priv, pipe)
3305 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3307 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3308 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3309 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3310 if (IS_CHERRYVIEW(dev_priv))
3311 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3313 WARN_ON(dev_priv->irq_mask != ~0);
3315 dev_priv->irq_mask = ~enable_mask;
3317 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3322 static void ironlake_irq_reset(struct drm_device *dev)
3324 struct drm_i915_private *dev_priv = to_i915(dev);
3326 I915_WRITE(HWSTAM, 0xffffffff);
3330 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3332 gen5_gt_irq_reset(dev);
3337 static void valleyview_irq_preinstall(struct drm_device *dev)
3339 struct drm_i915_private *dev_priv = to_i915(dev);
3341 I915_WRITE(VLV_MASTER_IER, 0);
3342 POSTING_READ(VLV_MASTER_IER);
3344 gen5_gt_irq_reset(dev);
3346 spin_lock_irq(&dev_priv->irq_lock);
3347 if (dev_priv->display_irqs_enabled)
3348 vlv_display_irq_reset(dev_priv);
3349 spin_unlock_irq(&dev_priv->irq_lock);
3352 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3354 GEN8_IRQ_RESET_NDX(GT, 0);
3355 GEN8_IRQ_RESET_NDX(GT, 1);
3356 GEN8_IRQ_RESET_NDX(GT, 2);
3357 GEN8_IRQ_RESET_NDX(GT, 3);
3360 static void gen8_irq_reset(struct drm_device *dev)
3362 struct drm_i915_private *dev_priv = to_i915(dev);
3365 I915_WRITE(GEN8_MASTER_IRQ, 0);
3366 POSTING_READ(GEN8_MASTER_IRQ);
3368 gen8_gt_irq_reset(dev_priv);
3370 for_each_pipe(dev_priv, pipe)
3371 if (intel_display_power_is_enabled(dev_priv,
3372 POWER_DOMAIN_PIPE(pipe)))
3373 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3375 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3376 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3377 GEN5_IRQ_RESET(GEN8_PCU_);
3379 if (HAS_PCH_SPLIT(dev))
3383 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3384 unsigned int pipe_mask)
3386 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3389 spin_lock_irq(&dev_priv->irq_lock);
3390 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3391 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3392 dev_priv->de_irq_mask[pipe],
3393 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3394 spin_unlock_irq(&dev_priv->irq_lock);
3397 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3398 unsigned int pipe_mask)
3402 spin_lock_irq(&dev_priv->irq_lock);
3403 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3404 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3405 spin_unlock_irq(&dev_priv->irq_lock);
3407 /* make sure we're done processing display irqs */
3408 synchronize_irq(dev_priv->drm.irq);
3411 static void cherryview_irq_preinstall(struct drm_device *dev)
3413 struct drm_i915_private *dev_priv = to_i915(dev);
3415 I915_WRITE(GEN8_MASTER_IRQ, 0);
3416 POSTING_READ(GEN8_MASTER_IRQ);
3418 gen8_gt_irq_reset(dev_priv);
3420 GEN5_IRQ_RESET(GEN8_PCU_);
3422 spin_lock_irq(&dev_priv->irq_lock);
3423 if (dev_priv->display_irqs_enabled)
3424 vlv_display_irq_reset(dev_priv);
3425 spin_unlock_irq(&dev_priv->irq_lock);
3428 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3429 const u32 hpd[HPD_NUM_PINS])
3431 struct intel_encoder *encoder;
3432 u32 enabled_irqs = 0;
3434 for_each_intel_encoder(&dev_priv->drm, encoder)
3435 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3436 enabled_irqs |= hpd[encoder->hpd_pin];
3438 return enabled_irqs;
3441 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3443 u32 hotplug_irqs, hotplug, enabled_irqs;
3445 if (HAS_PCH_IBX(dev_priv)) {
3446 hotplug_irqs = SDE_HOTPLUG_MASK;
3447 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3449 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3450 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3453 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3456 * Enable digital hotplug on the PCH, and configure the DP short pulse
3457 * duration to 2ms (which is the minimum in the Display Port spec).
3458 * The pulse duration bits are reserved on LPT+.
3460 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3461 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3462 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3463 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3464 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3466 * When CPU and PCH are on the same package, port A
3467 * HPD must be enabled in both north and south.
3469 if (HAS_PCH_LPT_LP(dev_priv))
3470 hotplug |= PORTA_HOTPLUG_ENABLE;
3471 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3474 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3476 u32 hotplug_irqs, hotplug, enabled_irqs;
3478 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3479 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3481 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3483 /* Enable digital hotplug on the PCH */
3484 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3485 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3486 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3487 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3489 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3490 hotplug |= PORTE_HOTPLUG_ENABLE;
3491 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3494 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3496 u32 hotplug_irqs, hotplug, enabled_irqs;
3498 if (INTEL_GEN(dev_priv) >= 8) {
3499 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3500 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3502 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3503 } else if (INTEL_GEN(dev_priv) >= 7) {
3504 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3505 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3507 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3509 hotplug_irqs = DE_DP_A_HOTPLUG;
3510 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3512 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3516 * Enable digital hotplug on the CPU, and configure the DP short pulse
3517 * duration to 2ms (which is the minimum in the Display Port spec)
3518 * The pulse duration bits are reserved on HSW+.
3520 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3521 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3522 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3523 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3525 ibx_hpd_irq_setup(dev_priv);
3528 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3530 u32 hotplug_irqs, hotplug, enabled_irqs;
3532 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3533 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3535 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3537 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3538 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3539 PORTA_HOTPLUG_ENABLE;
3541 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3542 hotplug, enabled_irqs);
3543 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3546 * For BXT invert bit has to be set based on AOB design
3547 * for HPD detection logic, update it based on VBT fields.
3550 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3551 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3552 hotplug |= BXT_DDIA_HPD_INVERT;
3553 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3554 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3555 hotplug |= BXT_DDIB_HPD_INVERT;
3556 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3557 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3558 hotplug |= BXT_DDIC_HPD_INVERT;
3560 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3563 static void ibx_irq_postinstall(struct drm_device *dev)
3565 struct drm_i915_private *dev_priv = to_i915(dev);
3568 if (HAS_PCH_NOP(dev))
3571 if (HAS_PCH_IBX(dev))
3572 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3574 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3576 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3577 I915_WRITE(SDEIMR, ~mask);
3580 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3582 struct drm_i915_private *dev_priv = to_i915(dev);
3583 u32 pm_irqs, gt_irqs;
3585 pm_irqs = gt_irqs = 0;
3587 dev_priv->gt_irq_mask = ~0;
3588 if (HAS_L3_DPF(dev)) {
3589 /* L3 parity interrupt is always unmasked. */
3590 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3591 gt_irqs |= GT_PARITY_ERROR(dev);
3594 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3596 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3598 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3601 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3603 if (INTEL_INFO(dev)->gen >= 6) {
3605 * RPS interrupts will get enabled/disabled on demand when RPS
3606 * itself is enabled/disabled.
3609 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3611 dev_priv->pm_irq_mask = 0xffffffff;
3612 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3616 static int ironlake_irq_postinstall(struct drm_device *dev)
3618 struct drm_i915_private *dev_priv = to_i915(dev);
3619 u32 display_mask, extra_mask;
3621 if (INTEL_INFO(dev)->gen >= 7) {
3622 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3623 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3624 DE_PLANEB_FLIP_DONE_IVB |
3625 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3626 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3627 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3628 DE_DP_A_HOTPLUG_IVB);
3630 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3631 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3633 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3635 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3636 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3640 dev_priv->irq_mask = ~display_mask;
3642 I915_WRITE(HWSTAM, 0xeffe);
3644 ibx_irq_pre_postinstall(dev);
3646 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3648 gen5_gt_irq_postinstall(dev);
3650 ibx_irq_postinstall(dev);
3652 if (IS_IRONLAKE_M(dev)) {
3653 /* Enable PCU event interrupts
3655 * spinlocking not required here for correctness since interrupt
3656 * setup is guaranteed to run in single-threaded context. But we
3657 * need it to make the assert_spin_locked happy. */
3658 spin_lock_irq(&dev_priv->irq_lock);
3659 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3660 spin_unlock_irq(&dev_priv->irq_lock);
3666 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3668 assert_spin_locked(&dev_priv->irq_lock);
3670 if (dev_priv->display_irqs_enabled)
3673 dev_priv->display_irqs_enabled = true;
3675 if (intel_irqs_enabled(dev_priv)) {
3676 vlv_display_irq_reset(dev_priv);
3677 vlv_display_irq_postinstall(dev_priv);
3681 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3683 assert_spin_locked(&dev_priv->irq_lock);
3685 if (!dev_priv->display_irqs_enabled)
3688 dev_priv->display_irqs_enabled = false;
3690 if (intel_irqs_enabled(dev_priv))
3691 vlv_display_irq_reset(dev_priv);
3695 static int valleyview_irq_postinstall(struct drm_device *dev)
3697 struct drm_i915_private *dev_priv = to_i915(dev);
3699 gen5_gt_irq_postinstall(dev);
3701 spin_lock_irq(&dev_priv->irq_lock);
3702 if (dev_priv->display_irqs_enabled)
3703 vlv_display_irq_postinstall(dev_priv);
3704 spin_unlock_irq(&dev_priv->irq_lock);
3706 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3707 POSTING_READ(VLV_MASTER_IER);
3712 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3714 /* These are interrupts we'll toggle with the ring mask register */
3715 uint32_t gt_interrupts[] = {
3716 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3717 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3718 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3719 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3720 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3721 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3722 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3723 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3725 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3726 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3729 if (HAS_L3_DPF(dev_priv))
3730 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3732 dev_priv->pm_irq_mask = 0xffffffff;
3733 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3734 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3736 * RPS interrupts will get enabled/disabled on demand when RPS itself
3737 * is enabled/disabled.
3739 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3740 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3743 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3745 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3746 uint32_t de_pipe_enables;
3747 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3748 u32 de_port_enables;
3749 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3752 if (INTEL_INFO(dev_priv)->gen >= 9) {
3753 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3754 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3755 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3757 if (IS_BROXTON(dev_priv))
3758 de_port_masked |= BXT_DE_PORT_GMBUS;
3760 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3761 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3764 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3765 GEN8_PIPE_FIFO_UNDERRUN;
3767 de_port_enables = de_port_masked;
3768 if (IS_BROXTON(dev_priv))
3769 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3770 else if (IS_BROADWELL(dev_priv))
3771 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3773 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3774 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3775 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3777 for_each_pipe(dev_priv, pipe)
3778 if (intel_display_power_is_enabled(dev_priv,
3779 POWER_DOMAIN_PIPE(pipe)))
3780 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3781 dev_priv->de_irq_mask[pipe],
3784 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3785 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3788 static int gen8_irq_postinstall(struct drm_device *dev)
3790 struct drm_i915_private *dev_priv = to_i915(dev);
3792 if (HAS_PCH_SPLIT(dev))
3793 ibx_irq_pre_postinstall(dev);
3795 gen8_gt_irq_postinstall(dev_priv);
3796 gen8_de_irq_postinstall(dev_priv);
3798 if (HAS_PCH_SPLIT(dev))
3799 ibx_irq_postinstall(dev);
3801 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3802 POSTING_READ(GEN8_MASTER_IRQ);
3807 static int cherryview_irq_postinstall(struct drm_device *dev)
3809 struct drm_i915_private *dev_priv = to_i915(dev);
3811 gen8_gt_irq_postinstall(dev_priv);
3813 spin_lock_irq(&dev_priv->irq_lock);
3814 if (dev_priv->display_irqs_enabled)
3815 vlv_display_irq_postinstall(dev_priv);
3816 spin_unlock_irq(&dev_priv->irq_lock);
3818 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3819 POSTING_READ(GEN8_MASTER_IRQ);
3824 static void gen8_irq_uninstall(struct drm_device *dev)
3826 struct drm_i915_private *dev_priv = to_i915(dev);
3831 gen8_irq_reset(dev);
3834 static void valleyview_irq_uninstall(struct drm_device *dev)
3836 struct drm_i915_private *dev_priv = to_i915(dev);
3841 I915_WRITE(VLV_MASTER_IER, 0);
3842 POSTING_READ(VLV_MASTER_IER);
3844 gen5_gt_irq_reset(dev);
3846 I915_WRITE(HWSTAM, 0xffffffff);
3848 spin_lock_irq(&dev_priv->irq_lock);
3849 if (dev_priv->display_irqs_enabled)
3850 vlv_display_irq_reset(dev_priv);
3851 spin_unlock_irq(&dev_priv->irq_lock);
3854 static void cherryview_irq_uninstall(struct drm_device *dev)
3856 struct drm_i915_private *dev_priv = to_i915(dev);
3861 I915_WRITE(GEN8_MASTER_IRQ, 0);
3862 POSTING_READ(GEN8_MASTER_IRQ);
3864 gen8_gt_irq_reset(dev_priv);
3866 GEN5_IRQ_RESET(GEN8_PCU_);
3868 spin_lock_irq(&dev_priv->irq_lock);
3869 if (dev_priv->display_irqs_enabled)
3870 vlv_display_irq_reset(dev_priv);
3871 spin_unlock_irq(&dev_priv->irq_lock);
3874 static void ironlake_irq_uninstall(struct drm_device *dev)
3876 struct drm_i915_private *dev_priv = to_i915(dev);
3881 ironlake_irq_reset(dev);
3884 static void i8xx_irq_preinstall(struct drm_device * dev)
3886 struct drm_i915_private *dev_priv = to_i915(dev);
3889 for_each_pipe(dev_priv, pipe)
3890 I915_WRITE(PIPESTAT(pipe), 0);
3891 I915_WRITE16(IMR, 0xffff);
3892 I915_WRITE16(IER, 0x0);
3893 POSTING_READ16(IER);
3896 static int i8xx_irq_postinstall(struct drm_device *dev)
3898 struct drm_i915_private *dev_priv = to_i915(dev);
3901 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3903 /* Unmask the interrupts that we always want on. */
3904 dev_priv->irq_mask =
3905 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3906 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3907 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3908 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3909 I915_WRITE16(IMR, dev_priv->irq_mask);
3912 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3913 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3914 I915_USER_INTERRUPT);
3915 POSTING_READ16(IER);
3917 /* Interrupt setup is already guaranteed to be single-threaded, this is
3918 * just to make the assert_spin_locked check happy. */
3919 spin_lock_irq(&dev_priv->irq_lock);
3920 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3921 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3922 spin_unlock_irq(&dev_priv->irq_lock);
3928 * Returns true when a page flip has completed.
3930 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3931 int plane, int pipe, u32 iir)
3933 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3935 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3938 if ((iir & flip_pending) == 0)
3939 goto check_page_flip;
3941 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3942 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3943 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3944 * the flip is completed (no longer pending). Since this doesn't raise
3945 * an interrupt per se, we watch for the change at vblank.
3947 if (I915_READ16(ISR) & flip_pending)
3948 goto check_page_flip;
3950 intel_finish_page_flip_cs(dev_priv, pipe);
3954 intel_check_page_flip(dev_priv, pipe);
3958 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3960 struct drm_device *dev = arg;
3961 struct drm_i915_private *dev_priv = to_i915(dev);
3966 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3967 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3970 if (!intel_irqs_enabled(dev_priv))
3973 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3974 disable_rpm_wakeref_asserts(dev_priv);
3977 iir = I915_READ16(IIR);
3981 while (iir & ~flip_mask) {
3982 /* Can't rely on pipestat interrupt bit in iir as it might
3983 * have been cleared after the pipestat interrupt was received.
3984 * It doesn't set the bit in iir again, but it still produces
3985 * interrupts (for non-MSI).
3987 spin_lock(&dev_priv->irq_lock);
3988 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3989 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3991 for_each_pipe(dev_priv, pipe) {
3992 i915_reg_t reg = PIPESTAT(pipe);
3993 pipe_stats[pipe] = I915_READ(reg);
3996 * Clear the PIPE*STAT regs before the IIR
3998 if (pipe_stats[pipe] & 0x8000ffff)
3999 I915_WRITE(reg, pipe_stats[pipe]);
4001 spin_unlock(&dev_priv->irq_lock);
4003 I915_WRITE16(IIR, iir & ~flip_mask);
4004 new_iir = I915_READ16(IIR); /* Flush posted writes */
4006 if (iir & I915_USER_INTERRUPT)
4007 notify_ring(&dev_priv->engine[RCS]);
4009 for_each_pipe(dev_priv, pipe) {
4011 if (HAS_FBC(dev_priv))
4014 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4015 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4016 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4018 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4019 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4021 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4022 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4031 enable_rpm_wakeref_asserts(dev_priv);
4036 static void i8xx_irq_uninstall(struct drm_device * dev)
4038 struct drm_i915_private *dev_priv = to_i915(dev);
4041 for_each_pipe(dev_priv, pipe) {
4042 /* Clear enable bits; then clear status bits */
4043 I915_WRITE(PIPESTAT(pipe), 0);
4044 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4046 I915_WRITE16(IMR, 0xffff);
4047 I915_WRITE16(IER, 0x0);
4048 I915_WRITE16(IIR, I915_READ16(IIR));
4051 static void i915_irq_preinstall(struct drm_device * dev)
4053 struct drm_i915_private *dev_priv = to_i915(dev);
4056 if (I915_HAS_HOTPLUG(dev)) {
4057 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4058 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4061 I915_WRITE16(HWSTAM, 0xeffe);
4062 for_each_pipe(dev_priv, pipe)
4063 I915_WRITE(PIPESTAT(pipe), 0);
4064 I915_WRITE(IMR, 0xffffffff);
4065 I915_WRITE(IER, 0x0);
4069 static int i915_irq_postinstall(struct drm_device *dev)
4071 struct drm_i915_private *dev_priv = to_i915(dev);
4074 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4076 /* Unmask the interrupts that we always want on. */
4077 dev_priv->irq_mask =
4078 ~(I915_ASLE_INTERRUPT |
4079 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4080 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4081 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4082 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4085 I915_ASLE_INTERRUPT |
4086 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4087 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4088 I915_USER_INTERRUPT;
4090 if (I915_HAS_HOTPLUG(dev)) {
4091 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4092 POSTING_READ(PORT_HOTPLUG_EN);
4094 /* Enable in IER... */
4095 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4096 /* and unmask in IMR */
4097 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4100 I915_WRITE(IMR, dev_priv->irq_mask);
4101 I915_WRITE(IER, enable_mask);
4104 i915_enable_asle_pipestat(dev_priv);
4106 /* Interrupt setup is already guaranteed to be single-threaded, this is
4107 * just to make the assert_spin_locked check happy. */
4108 spin_lock_irq(&dev_priv->irq_lock);
4109 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4110 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4111 spin_unlock_irq(&dev_priv->irq_lock);
4117 * Returns true when a page flip has completed.
4119 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4120 int plane, int pipe, u32 iir)
4122 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4124 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4127 if ((iir & flip_pending) == 0)
4128 goto check_page_flip;
4130 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4131 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4132 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4133 * the flip is completed (no longer pending). Since this doesn't raise
4134 * an interrupt per se, we watch for the change at vblank.
4136 if (I915_READ(ISR) & flip_pending)
4137 goto check_page_flip;
4139 intel_finish_page_flip_cs(dev_priv, pipe);
4143 intel_check_page_flip(dev_priv, pipe);
4147 static irqreturn_t i915_irq_handler(int irq, void *arg)
4149 struct drm_device *dev = arg;
4150 struct drm_i915_private *dev_priv = to_i915(dev);
4151 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4153 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4154 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4155 int pipe, ret = IRQ_NONE;
4157 if (!intel_irqs_enabled(dev_priv))
4160 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4161 disable_rpm_wakeref_asserts(dev_priv);
4163 iir = I915_READ(IIR);
4165 bool irq_received = (iir & ~flip_mask) != 0;
4166 bool blc_event = false;
4168 /* Can't rely on pipestat interrupt bit in iir as it might
4169 * have been cleared after the pipestat interrupt was received.
4170 * It doesn't set the bit in iir again, but it still produces
4171 * interrupts (for non-MSI).
4173 spin_lock(&dev_priv->irq_lock);
4174 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4175 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4177 for_each_pipe(dev_priv, pipe) {
4178 i915_reg_t reg = PIPESTAT(pipe);
4179 pipe_stats[pipe] = I915_READ(reg);
4181 /* Clear the PIPE*STAT regs before the IIR */
4182 if (pipe_stats[pipe] & 0x8000ffff) {
4183 I915_WRITE(reg, pipe_stats[pipe]);
4184 irq_received = true;
4187 spin_unlock(&dev_priv->irq_lock);
4192 /* Consume port. Then clear IIR or we'll miss events */
4193 if (I915_HAS_HOTPLUG(dev_priv) &&
4194 iir & I915_DISPLAY_PORT_INTERRUPT) {
4195 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4197 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4200 I915_WRITE(IIR, iir & ~flip_mask);
4201 new_iir = I915_READ(IIR); /* Flush posted writes */
4203 if (iir & I915_USER_INTERRUPT)
4204 notify_ring(&dev_priv->engine[RCS]);
4206 for_each_pipe(dev_priv, pipe) {
4208 if (HAS_FBC(dev_priv))
4211 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4212 i915_handle_vblank(dev_priv, plane, pipe, iir))
4213 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4215 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4218 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4219 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4221 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4222 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4226 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4227 intel_opregion_asle_intr(dev_priv);
4229 /* With MSI, interrupts are only generated when iir
4230 * transitions from zero to nonzero. If another bit got
4231 * set while we were handling the existing iir bits, then
4232 * we would never get another interrupt.
4234 * This is fine on non-MSI as well, as if we hit this path
4235 * we avoid exiting the interrupt handler only to generate
4238 * Note that for MSI this could cause a stray interrupt report
4239 * if an interrupt landed in the time between writing IIR and
4240 * the posting read. This should be rare enough to never
4241 * trigger the 99% of 100,000 interrupts test for disabling
4246 } while (iir & ~flip_mask);
4248 enable_rpm_wakeref_asserts(dev_priv);
4253 static void i915_irq_uninstall(struct drm_device * dev)
4255 struct drm_i915_private *dev_priv = to_i915(dev);
4258 if (I915_HAS_HOTPLUG(dev)) {
4259 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4260 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4263 I915_WRITE16(HWSTAM, 0xffff);
4264 for_each_pipe(dev_priv, pipe) {
4265 /* Clear enable bits; then clear status bits */
4266 I915_WRITE(PIPESTAT(pipe), 0);
4267 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4269 I915_WRITE(IMR, 0xffffffff);
4270 I915_WRITE(IER, 0x0);
4272 I915_WRITE(IIR, I915_READ(IIR));
4275 static void i965_irq_preinstall(struct drm_device * dev)
4277 struct drm_i915_private *dev_priv = to_i915(dev);
4280 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4281 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4283 I915_WRITE(HWSTAM, 0xeffe);
4284 for_each_pipe(dev_priv, pipe)
4285 I915_WRITE(PIPESTAT(pipe), 0);
4286 I915_WRITE(IMR, 0xffffffff);
4287 I915_WRITE(IER, 0x0);
4291 static int i965_irq_postinstall(struct drm_device *dev)
4293 struct drm_i915_private *dev_priv = to_i915(dev);
4297 /* Unmask the interrupts that we always want on. */
4298 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4299 I915_DISPLAY_PORT_INTERRUPT |
4300 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4301 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4302 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4303 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4304 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4306 enable_mask = ~dev_priv->irq_mask;
4307 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4308 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4309 enable_mask |= I915_USER_INTERRUPT;
4311 if (IS_G4X(dev_priv))
4312 enable_mask |= I915_BSD_USER_INTERRUPT;
4314 /* Interrupt setup is already guaranteed to be single-threaded, this is
4315 * just to make the assert_spin_locked check happy. */
4316 spin_lock_irq(&dev_priv->irq_lock);
4317 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4318 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4319 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4320 spin_unlock_irq(&dev_priv->irq_lock);
4323 * Enable some error detection, note the instruction error mask
4324 * bit is reserved, so we leave it masked.
4326 if (IS_G4X(dev_priv)) {
4327 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4328 GM45_ERROR_MEM_PRIV |
4329 GM45_ERROR_CP_PRIV |
4330 I915_ERROR_MEMORY_REFRESH);
4332 error_mask = ~(I915_ERROR_PAGE_TABLE |
4333 I915_ERROR_MEMORY_REFRESH);
4335 I915_WRITE(EMR, error_mask);
4337 I915_WRITE(IMR, dev_priv->irq_mask);
4338 I915_WRITE(IER, enable_mask);
4341 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4342 POSTING_READ(PORT_HOTPLUG_EN);
4344 i915_enable_asle_pipestat(dev_priv);
4349 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4353 assert_spin_locked(&dev_priv->irq_lock);
4355 /* Note HDMI and DP share hotplug bits */
4356 /* enable bits are the same for all generations */
4357 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4358 /* Programming the CRT detection parameters tends
4359 to generate a spurious hotplug event about three
4360 seconds later. So just do it once.
4362 if (IS_G4X(dev_priv))
4363 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4364 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4366 /* Ignore TV since it's buggy */
4367 i915_hotplug_interrupt_update_locked(dev_priv,
4368 HOTPLUG_INT_EN_MASK |
4369 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4370 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4374 static irqreturn_t i965_irq_handler(int irq, void *arg)
4376 struct drm_device *dev = arg;
4377 struct drm_i915_private *dev_priv = to_i915(dev);
4379 u32 pipe_stats[I915_MAX_PIPES];
4380 int ret = IRQ_NONE, pipe;
4382 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4383 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4385 if (!intel_irqs_enabled(dev_priv))
4388 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4389 disable_rpm_wakeref_asserts(dev_priv);
4391 iir = I915_READ(IIR);
4394 bool irq_received = (iir & ~flip_mask) != 0;
4395 bool blc_event = false;
4397 /* Can't rely on pipestat interrupt bit in iir as it might
4398 * have been cleared after the pipestat interrupt was received.
4399 * It doesn't set the bit in iir again, but it still produces
4400 * interrupts (for non-MSI).
4402 spin_lock(&dev_priv->irq_lock);
4403 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4404 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4406 for_each_pipe(dev_priv, pipe) {
4407 i915_reg_t reg = PIPESTAT(pipe);
4408 pipe_stats[pipe] = I915_READ(reg);
4411 * Clear the PIPE*STAT regs before the IIR
4413 if (pipe_stats[pipe] & 0x8000ffff) {
4414 I915_WRITE(reg, pipe_stats[pipe]);
4415 irq_received = true;
4418 spin_unlock(&dev_priv->irq_lock);
4425 /* Consume port. Then clear IIR or we'll miss events */
4426 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4427 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4429 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4432 I915_WRITE(IIR, iir & ~flip_mask);
4433 new_iir = I915_READ(IIR); /* Flush posted writes */
4435 if (iir & I915_USER_INTERRUPT)
4436 notify_ring(&dev_priv->engine[RCS]);
4437 if (iir & I915_BSD_USER_INTERRUPT)
4438 notify_ring(&dev_priv->engine[VCS]);
4440 for_each_pipe(dev_priv, pipe) {
4441 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4442 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4443 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4445 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4448 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4449 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4451 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4452 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4455 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4456 intel_opregion_asle_intr(dev_priv);
4458 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4459 gmbus_irq_handler(dev_priv);
4461 /* With MSI, interrupts are only generated when iir
4462 * transitions from zero to nonzero. If another bit got
4463 * set while we were handling the existing iir bits, then
4464 * we would never get another interrupt.
4466 * This is fine on non-MSI as well, as if we hit this path
4467 * we avoid exiting the interrupt handler only to generate
4470 * Note that for MSI this could cause a stray interrupt report
4471 * if an interrupt landed in the time between writing IIR and
4472 * the posting read. This should be rare enough to never
4473 * trigger the 99% of 100,000 interrupts test for disabling
4479 enable_rpm_wakeref_asserts(dev_priv);
4484 static void i965_irq_uninstall(struct drm_device * dev)
4486 struct drm_i915_private *dev_priv = to_i915(dev);
4492 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4493 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4495 I915_WRITE(HWSTAM, 0xffffffff);
4496 for_each_pipe(dev_priv, pipe)
4497 I915_WRITE(PIPESTAT(pipe), 0);
4498 I915_WRITE(IMR, 0xffffffff);
4499 I915_WRITE(IER, 0x0);
4501 for_each_pipe(dev_priv, pipe)
4502 I915_WRITE(PIPESTAT(pipe),
4503 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4504 I915_WRITE(IIR, I915_READ(IIR));
4508 * intel_irq_init - initializes irq support
4509 * @dev_priv: i915 device instance
4511 * This function initializes all the irq support including work items, timers
4512 * and all the vtables. It does not setup the interrupt itself though.
4514 void intel_irq_init(struct drm_i915_private *dev_priv)
4516 struct drm_device *dev = &dev_priv->drm;
4518 intel_hpd_init_work(dev_priv);
4520 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4521 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4523 /* Let's track the enabled rps events */
4524 if (IS_VALLEYVIEW(dev_priv))
4525 /* WaGsvRC0ResidencyMethod:vlv */
4526 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4528 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4530 dev_priv->rps.pm_intr_keep = 0;
4533 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4534 * if GEN6_PM_UP_EI_EXPIRED is masked.
4536 * TODO: verify if this can be reproduced on VLV,CHV.
4538 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4539 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4541 if (INTEL_INFO(dev_priv)->gen >= 8)
4542 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4544 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4545 i915_hangcheck_elapsed);
4547 if (IS_GEN2(dev_priv)) {
4548 dev->max_vblank_count = 0;
4549 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4550 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4551 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4552 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4554 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4555 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4559 * Opt out of the vblank disable timer on everything except gen2.
4560 * Gen2 doesn't have a hardware frame counter and so depends on
4561 * vblank interrupts to produce sane vblank seuquence numbers.
4563 if (!IS_GEN2(dev_priv))
4564 dev->vblank_disable_immediate = true;
4566 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4567 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4569 if (IS_CHERRYVIEW(dev_priv)) {
4570 dev->driver->irq_handler = cherryview_irq_handler;
4571 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4572 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4573 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4574 dev->driver->enable_vblank = valleyview_enable_vblank;
4575 dev->driver->disable_vblank = valleyview_disable_vblank;
4576 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4577 } else if (IS_VALLEYVIEW(dev_priv)) {
4578 dev->driver->irq_handler = valleyview_irq_handler;
4579 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4580 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4581 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4582 dev->driver->enable_vblank = valleyview_enable_vblank;
4583 dev->driver->disable_vblank = valleyview_disable_vblank;
4584 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4585 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4586 dev->driver->irq_handler = gen8_irq_handler;
4587 dev->driver->irq_preinstall = gen8_irq_reset;
4588 dev->driver->irq_postinstall = gen8_irq_postinstall;
4589 dev->driver->irq_uninstall = gen8_irq_uninstall;
4590 dev->driver->enable_vblank = gen8_enable_vblank;
4591 dev->driver->disable_vblank = gen8_disable_vblank;
4592 if (IS_BROXTON(dev))
4593 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4594 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4595 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4597 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4598 } else if (HAS_PCH_SPLIT(dev)) {
4599 dev->driver->irq_handler = ironlake_irq_handler;
4600 dev->driver->irq_preinstall = ironlake_irq_reset;
4601 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4602 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4603 dev->driver->enable_vblank = ironlake_enable_vblank;
4604 dev->driver->disable_vblank = ironlake_disable_vblank;
4605 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4607 if (IS_GEN2(dev_priv)) {
4608 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4609 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4610 dev->driver->irq_handler = i8xx_irq_handler;
4611 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4612 } else if (IS_GEN3(dev_priv)) {
4613 dev->driver->irq_preinstall = i915_irq_preinstall;
4614 dev->driver->irq_postinstall = i915_irq_postinstall;
4615 dev->driver->irq_uninstall = i915_irq_uninstall;
4616 dev->driver->irq_handler = i915_irq_handler;
4618 dev->driver->irq_preinstall = i965_irq_preinstall;
4619 dev->driver->irq_postinstall = i965_irq_postinstall;
4620 dev->driver->irq_uninstall = i965_irq_uninstall;
4621 dev->driver->irq_handler = i965_irq_handler;
4623 if (I915_HAS_HOTPLUG(dev_priv))
4624 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4625 dev->driver->enable_vblank = i915_enable_vblank;
4626 dev->driver->disable_vblank = i915_disable_vblank;
4631 * intel_irq_install - enables the hardware interrupt
4632 * @dev_priv: i915 device instance
4634 * This function enables the hardware interrupt handling, but leaves the hotplug
4635 * handling still disabled. It is called after intel_irq_init().
4637 * In the driver load and resume code we need working interrupts in a few places
4638 * but don't want to deal with the hassle of concurrent probe and hotplug
4639 * workers. Hence the split into this two-stage approach.
4641 int intel_irq_install(struct drm_i915_private *dev_priv)
4644 * We enable some interrupt sources in our postinstall hooks, so mark
4645 * interrupts as enabled _before_ actually enabling them to avoid
4646 * special cases in our ordering checks.
4648 dev_priv->pm.irqs_enabled = true;
4650 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4654 * intel_irq_uninstall - finilizes all irq handling
4655 * @dev_priv: i915 device instance
4657 * This stops interrupt and hotplug handling and unregisters and frees all
4658 * resources acquired in the init functions.
4660 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4662 drm_irq_uninstall(&dev_priv->drm);
4663 intel_hpd_cancel_work(dev_priv);
4664 dev_priv->pm.irqs_enabled = false;
4668 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4669 * @dev_priv: i915 device instance
4671 * This function is used to disable interrupts at runtime, both in the runtime
4672 * pm and the system suspend/resume code.
4674 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4676 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4677 dev_priv->pm.irqs_enabled = false;
4678 synchronize_irq(dev_priv->drm.irq);
4682 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4683 * @dev_priv: i915 device instance
4685 * This function is used to enable interrupts at runtime, both in the runtime
4686 * pm and the system suspend/resume code.
4688 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4690 dev_priv->pm.irqs_enabled = true;
4691 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4692 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);