]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Rearrange i915_wait_request() accounting with callers
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61         [HPD_CRT] = SDE_CRT_HOTPLUG,
62         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121         POSTING_READ(GEN8_##type##_IMR(which)); \
122         I915_WRITE(GEN8_##type##_IER(which), 0); \
123         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124         POSTING_READ(GEN8_##type##_IIR(which)); \
125         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126         POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130         I915_WRITE(type##IMR, 0xffffffff); \
131         POSTING_READ(type##IMR); \
132         I915_WRITE(type##IER, 0); \
133         I915_WRITE(type##IIR, 0xffffffff); \
134         POSTING_READ(type##IIR); \
135         I915_WRITE(type##IIR, 0xffffffff); \
136         POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141  */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143                                     i915_reg_t reg)
144 {
145         u32 val = I915_READ(reg);
146
147         if (val == 0)
148                 return;
149
150         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151              i915_mmio_reg_offset(reg), val);
152         I915_WRITE(reg, 0xffffffff);
153         POSTING_READ(reg);
154         I915_WRITE(reg, 0xffffffff);
155         POSTING_READ(reg);
156 }
157
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159         gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162         POSTING_READ(GEN8_##type##_IMR(which)); \
163 } while (0)
164
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166         gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167         I915_WRITE(type##IER, (ier_val)); \
168         I915_WRITE(type##IMR, (imr_val)); \
169         POSTING_READ(type##IMR); \
170 } while (0)
171
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
174
175 /* For display hotplug interrupt */
176 static inline void
177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
178                                      uint32_t mask,
179                                      uint32_t bits)
180 {
181         uint32_t val;
182
183         assert_spin_locked(&dev_priv->irq_lock);
184         WARN_ON(bits & ~mask);
185
186         val = I915_READ(PORT_HOTPLUG_EN);
187         val &= ~mask;
188         val |= bits;
189         I915_WRITE(PORT_HOTPLUG_EN, val);
190 }
191
192 /**
193  * i915_hotplug_interrupt_update - update hotplug interrupt enable
194  * @dev_priv: driver private
195  * @mask: bits to update
196  * @bits: bits to enable
197  * NOTE: the HPD enable bits are modified both inside and outside
198  * of an interrupt context. To avoid that read-modify-write cycles
199  * interfer, these bits are protected by a spinlock. Since this
200  * function is usually not called from a context where the lock is
201  * held already, this function acquires the lock itself. A non-locking
202  * version is also available.
203  */
204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
205                                    uint32_t mask,
206                                    uint32_t bits)
207 {
208         spin_lock_irq(&dev_priv->irq_lock);
209         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
210         spin_unlock_irq(&dev_priv->irq_lock);
211 }
212
213 /**
214  * ilk_update_display_irq - update DEIMR
215  * @dev_priv: driver private
216  * @interrupt_mask: mask of interrupt bits to update
217  * @enabled_irq_mask: mask of interrupt bits to enable
218  */
219 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
220                             uint32_t interrupt_mask,
221                             uint32_t enabled_irq_mask)
222 {
223         uint32_t new_val;
224
225         assert_spin_locked(&dev_priv->irq_lock);
226
227         WARN_ON(enabled_irq_mask & ~interrupt_mask);
228
229         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
230                 return;
231
232         new_val = dev_priv->irq_mask;
233         new_val &= ~interrupt_mask;
234         new_val |= (~enabled_irq_mask & interrupt_mask);
235
236         if (new_val != dev_priv->irq_mask) {
237                 dev_priv->irq_mask = new_val;
238                 I915_WRITE(DEIMR, dev_priv->irq_mask);
239                 POSTING_READ(DEIMR);
240         }
241 }
242
243 /**
244  * ilk_update_gt_irq - update GTIMR
245  * @dev_priv: driver private
246  * @interrupt_mask: mask of interrupt bits to update
247  * @enabled_irq_mask: mask of interrupt bits to enable
248  */
249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
250                               uint32_t interrupt_mask,
251                               uint32_t enabled_irq_mask)
252 {
253         assert_spin_locked(&dev_priv->irq_lock);
254
255         WARN_ON(enabled_irq_mask & ~interrupt_mask);
256
257         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258                 return;
259
260         dev_priv->gt_irq_mask &= ~interrupt_mask;
261         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
262         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
263 }
264
265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 {
267         ilk_update_gt_irq(dev_priv, mask, mask);
268         POSTING_READ_FW(GTIMR);
269 }
270
271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 {
273         ilk_update_gt_irq(dev_priv, mask, 0);
274 }
275
276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
277 {
278         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
279 }
280
281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
282 {
283         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
284 }
285
286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
287 {
288         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
289 }
290
291 /**
292  * snb_update_pm_irq - update GEN6_PMIMR
293  * @dev_priv: driver private
294  * @interrupt_mask: mask of interrupt bits to update
295  * @enabled_irq_mask: mask of interrupt bits to enable
296  */
297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
298                               uint32_t interrupt_mask,
299                               uint32_t enabled_irq_mask)
300 {
301         uint32_t new_val;
302
303         WARN_ON(enabled_irq_mask & ~interrupt_mask);
304
305         assert_spin_locked(&dev_priv->irq_lock);
306
307         new_val = dev_priv->pm_imr;
308         new_val &= ~interrupt_mask;
309         new_val |= (~enabled_irq_mask & interrupt_mask);
310
311         if (new_val != dev_priv->pm_imr) {
312                 dev_priv->pm_imr = new_val;
313                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
314                 POSTING_READ(gen6_pm_imr(dev_priv));
315         }
316 }
317
318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
319 {
320         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
321                 return;
322
323         snb_update_pm_irq(dev_priv, mask, mask);
324 }
325
326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
327 {
328         snb_update_pm_irq(dev_priv, mask, 0);
329 }
330
331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
332 {
333         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334                 return;
335
336         __gen6_mask_pm_irq(dev_priv, mask);
337 }
338
339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
340 {
341         i915_reg_t reg = gen6_pm_iir(dev_priv);
342
343         assert_spin_locked(&dev_priv->irq_lock);
344
345         I915_WRITE(reg, reset_mask);
346         I915_WRITE(reg, reset_mask);
347         POSTING_READ(reg);
348 }
349
350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
351 {
352         assert_spin_locked(&dev_priv->irq_lock);
353
354         dev_priv->pm_ier |= enable_mask;
355         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
356         gen6_unmask_pm_irq(dev_priv, enable_mask);
357         /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
358 }
359
360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
361 {
362         assert_spin_locked(&dev_priv->irq_lock);
363
364         dev_priv->pm_ier &= ~disable_mask;
365         __gen6_mask_pm_irq(dev_priv, disable_mask);
366         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
367         /* though a barrier is missing here, but don't really need a one */
368 }
369
370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
371 {
372         spin_lock_irq(&dev_priv->irq_lock);
373         gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
374         dev_priv->rps.pm_iir = 0;
375         spin_unlock_irq(&dev_priv->irq_lock);
376 }
377
378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
379 {
380         if (READ_ONCE(dev_priv->rps.interrupts_enabled))
381                 return;
382
383         spin_lock_irq(&dev_priv->irq_lock);
384         WARN_ON_ONCE(dev_priv->rps.pm_iir);
385         WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
386         dev_priv->rps.interrupts_enabled = true;
387         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
388
389         spin_unlock_irq(&dev_priv->irq_lock);
390 }
391
392 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
393 {
394         return (mask & ~dev_priv->rps.pm_intr_keep);
395 }
396
397 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
398 {
399         if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
400                 return;
401
402         spin_lock_irq(&dev_priv->irq_lock);
403         dev_priv->rps.interrupts_enabled = false;
404
405         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
406
407         gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
408
409         spin_unlock_irq(&dev_priv->irq_lock);
410         synchronize_irq(dev_priv->drm.irq);
411
412         /* Now that we will not be generating any more work, flush any
413          * outsanding tasks. As we are called on the RPS idle path,
414          * we will reset the GPU to minimum frequencies, so the current
415          * state of the worker can be discarded.
416          */
417         cancel_work_sync(&dev_priv->rps.work);
418         gen6_reset_rps_interrupts(dev_priv);
419 }
420
421 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
422 {
423         spin_lock_irq(&dev_priv->irq_lock);
424         gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
425         spin_unlock_irq(&dev_priv->irq_lock);
426 }
427
428 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
429 {
430         spin_lock_irq(&dev_priv->irq_lock);
431         if (!dev_priv->guc.interrupts_enabled) {
432                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
433                                        dev_priv->pm_guc_events);
434                 dev_priv->guc.interrupts_enabled = true;
435                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
436         }
437         spin_unlock_irq(&dev_priv->irq_lock);
438 }
439
440 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
441 {
442         spin_lock_irq(&dev_priv->irq_lock);
443         dev_priv->guc.interrupts_enabled = false;
444
445         gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
446
447         spin_unlock_irq(&dev_priv->irq_lock);
448         synchronize_irq(dev_priv->drm.irq);
449
450         gen9_reset_guc_interrupts(dev_priv);
451 }
452
453 /**
454  * bdw_update_port_irq - update DE port interrupt
455  * @dev_priv: driver private
456  * @interrupt_mask: mask of interrupt bits to update
457  * @enabled_irq_mask: mask of interrupt bits to enable
458  */
459 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
460                                 uint32_t interrupt_mask,
461                                 uint32_t enabled_irq_mask)
462 {
463         uint32_t new_val;
464         uint32_t old_val;
465
466         assert_spin_locked(&dev_priv->irq_lock);
467
468         WARN_ON(enabled_irq_mask & ~interrupt_mask);
469
470         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
471                 return;
472
473         old_val = I915_READ(GEN8_DE_PORT_IMR);
474
475         new_val = old_val;
476         new_val &= ~interrupt_mask;
477         new_val |= (~enabled_irq_mask & interrupt_mask);
478
479         if (new_val != old_val) {
480                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
481                 POSTING_READ(GEN8_DE_PORT_IMR);
482         }
483 }
484
485 /**
486  * bdw_update_pipe_irq - update DE pipe interrupt
487  * @dev_priv: driver private
488  * @pipe: pipe whose interrupt to update
489  * @interrupt_mask: mask of interrupt bits to update
490  * @enabled_irq_mask: mask of interrupt bits to enable
491  */
492 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
493                          enum pipe pipe,
494                          uint32_t interrupt_mask,
495                          uint32_t enabled_irq_mask)
496 {
497         uint32_t new_val;
498
499         assert_spin_locked(&dev_priv->irq_lock);
500
501         WARN_ON(enabled_irq_mask & ~interrupt_mask);
502
503         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
504                 return;
505
506         new_val = dev_priv->de_irq_mask[pipe];
507         new_val &= ~interrupt_mask;
508         new_val |= (~enabled_irq_mask & interrupt_mask);
509
510         if (new_val != dev_priv->de_irq_mask[pipe]) {
511                 dev_priv->de_irq_mask[pipe] = new_val;
512                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
513                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
514         }
515 }
516
517 /**
518  * ibx_display_interrupt_update - update SDEIMR
519  * @dev_priv: driver private
520  * @interrupt_mask: mask of interrupt bits to update
521  * @enabled_irq_mask: mask of interrupt bits to enable
522  */
523 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
524                                   uint32_t interrupt_mask,
525                                   uint32_t enabled_irq_mask)
526 {
527         uint32_t sdeimr = I915_READ(SDEIMR);
528         sdeimr &= ~interrupt_mask;
529         sdeimr |= (~enabled_irq_mask & interrupt_mask);
530
531         WARN_ON(enabled_irq_mask & ~interrupt_mask);
532
533         assert_spin_locked(&dev_priv->irq_lock);
534
535         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
536                 return;
537
538         I915_WRITE(SDEIMR, sdeimr);
539         POSTING_READ(SDEIMR);
540 }
541
542 static void
543 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
544                        u32 enable_mask, u32 status_mask)
545 {
546         i915_reg_t reg = PIPESTAT(pipe);
547         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
548
549         assert_spin_locked(&dev_priv->irq_lock);
550         WARN_ON(!intel_irqs_enabled(dev_priv));
551
552         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
553                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
554                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
555                       pipe_name(pipe), enable_mask, status_mask))
556                 return;
557
558         if ((pipestat & enable_mask) == enable_mask)
559                 return;
560
561         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
562
563         /* Enable the interrupt, clear any pending status */
564         pipestat |= enable_mask | status_mask;
565         I915_WRITE(reg, pipestat);
566         POSTING_READ(reg);
567 }
568
569 static void
570 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
571                         u32 enable_mask, u32 status_mask)
572 {
573         i915_reg_t reg = PIPESTAT(pipe);
574         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
575
576         assert_spin_locked(&dev_priv->irq_lock);
577         WARN_ON(!intel_irqs_enabled(dev_priv));
578
579         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
580                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
581                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
582                       pipe_name(pipe), enable_mask, status_mask))
583                 return;
584
585         if ((pipestat & enable_mask) == 0)
586                 return;
587
588         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
589
590         pipestat &= ~enable_mask;
591         I915_WRITE(reg, pipestat);
592         POSTING_READ(reg);
593 }
594
595 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
596 {
597         u32 enable_mask = status_mask << 16;
598
599         /*
600          * On pipe A we don't support the PSR interrupt yet,
601          * on pipe B and C the same bit MBZ.
602          */
603         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
604                 return 0;
605         /*
606          * On pipe B and C we don't support the PSR interrupt yet, on pipe
607          * A the same bit is for perf counters which we don't use either.
608          */
609         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
610                 return 0;
611
612         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
613                          SPRITE0_FLIP_DONE_INT_EN_VLV |
614                          SPRITE1_FLIP_DONE_INT_EN_VLV);
615         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
616                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
617         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
618                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
619
620         return enable_mask;
621 }
622
623 void
624 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
625                      u32 status_mask)
626 {
627         u32 enable_mask;
628
629         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
630                 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
631                                                            status_mask);
632         else
633                 enable_mask = status_mask << 16;
634         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
635 }
636
637 void
638 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
639                       u32 status_mask)
640 {
641         u32 enable_mask;
642
643         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
644                 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
645                                                            status_mask);
646         else
647                 enable_mask = status_mask << 16;
648         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
649 }
650
651 /**
652  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
653  * @dev_priv: i915 device private
654  */
655 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
656 {
657         if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
658                 return;
659
660         spin_lock_irq(&dev_priv->irq_lock);
661
662         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
663         if (INTEL_GEN(dev_priv) >= 4)
664                 i915_enable_pipestat(dev_priv, PIPE_A,
665                                      PIPE_LEGACY_BLC_EVENT_STATUS);
666
667         spin_unlock_irq(&dev_priv->irq_lock);
668 }
669
670 /*
671  * This timing diagram depicts the video signal in and
672  * around the vertical blanking period.
673  *
674  * Assumptions about the fictitious mode used in this example:
675  *  vblank_start >= 3
676  *  vsync_start = vblank_start + 1
677  *  vsync_end = vblank_start + 2
678  *  vtotal = vblank_start + 3
679  *
680  *           start of vblank:
681  *           latch double buffered registers
682  *           increment frame counter (ctg+)
683  *           generate start of vblank interrupt (gen4+)
684  *           |
685  *           |          frame start:
686  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
687  *           |          may be shifted forward 1-3 extra lines via PIPECONF
688  *           |          |
689  *           |          |  start of vsync:
690  *           |          |  generate vsync interrupt
691  *           |          |  |
692  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
693  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
694  * ----va---> <-----------------vb--------------------> <--------va-------------
695  *       |          |       <----vs----->                     |
696  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
697  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
698  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
699  *       |          |                                         |
700  *       last visible pixel                                   first visible pixel
701  *                  |                                         increment frame counter (gen3/4)
702  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
703  *
704  * x  = horizontal active
705  * _  = horizontal blanking
706  * hs = horizontal sync
707  * va = vertical active
708  * vb = vertical blanking
709  * vs = vertical sync
710  * vbs = vblank_start (number)
711  *
712  * Summary:
713  * - most events happen at the start of horizontal sync
714  * - frame start happens at the start of horizontal blank, 1-4 lines
715  *   (depending on PIPECONF settings) after the start of vblank
716  * - gen3/4 pixel and frame counter are synchronized with the start
717  *   of horizontal active on the first line of vertical active
718  */
719
720 /* Called from drm generic code, passed a 'crtc', which
721  * we use as a pipe index
722  */
723 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
724 {
725         struct drm_i915_private *dev_priv = to_i915(dev);
726         i915_reg_t high_frame, low_frame;
727         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
728         struct intel_crtc *intel_crtc =
729                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
730         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
731
732         htotal = mode->crtc_htotal;
733         hsync_start = mode->crtc_hsync_start;
734         vbl_start = mode->crtc_vblank_start;
735         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
736                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
737
738         /* Convert to pixel count */
739         vbl_start *= htotal;
740
741         /* Start of vblank event occurs at start of hsync */
742         vbl_start -= htotal - hsync_start;
743
744         high_frame = PIPEFRAME(pipe);
745         low_frame = PIPEFRAMEPIXEL(pipe);
746
747         /*
748          * High & low register fields aren't synchronized, so make sure
749          * we get a low value that's stable across two reads of the high
750          * register.
751          */
752         do {
753                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
754                 low   = I915_READ(low_frame);
755                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
756         } while (high1 != high2);
757
758         high1 >>= PIPE_FRAME_HIGH_SHIFT;
759         pixel = low & PIPE_PIXEL_MASK;
760         low >>= PIPE_FRAME_LOW_SHIFT;
761
762         /*
763          * The frame counter increments at beginning of active.
764          * Cook up a vblank counter by also checking the pixel
765          * counter against vblank start.
766          */
767         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
768 }
769
770 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
771 {
772         struct drm_i915_private *dev_priv = to_i915(dev);
773
774         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
775 }
776
777 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
779 {
780         struct drm_device *dev = crtc->base.dev;
781         struct drm_i915_private *dev_priv = to_i915(dev);
782         const struct drm_display_mode *mode = &crtc->base.hwmode;
783         enum pipe pipe = crtc->pipe;
784         int position, vtotal;
785
786         vtotal = mode->crtc_vtotal;
787         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
788                 vtotal /= 2;
789
790         if (IS_GEN2(dev_priv))
791                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
792         else
793                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
794
795         /*
796          * On HSW, the DSL reg (0x70000) appears to return 0 if we
797          * read it just before the start of vblank.  So try it again
798          * so we don't accidentally end up spanning a vblank frame
799          * increment, causing the pipe_update_end() code to squak at us.
800          *
801          * The nature of this problem means we can't simply check the ISR
802          * bit and return the vblank start value; nor can we use the scanline
803          * debug register in the transcoder as it appears to have the same
804          * problem.  We may need to extend this to include other platforms,
805          * but so far testing only shows the problem on HSW.
806          */
807         if (HAS_DDI(dev_priv) && !position) {
808                 int i, temp;
809
810                 for (i = 0; i < 100; i++) {
811                         udelay(1);
812                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
813                                 DSL_LINEMASK_GEN3;
814                         if (temp != position) {
815                                 position = temp;
816                                 break;
817                         }
818                 }
819         }
820
821         /*
822          * See update_scanline_offset() for the details on the
823          * scanline_offset adjustment.
824          */
825         return (position + crtc->scanline_offset) % vtotal;
826 }
827
828 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
829                                     unsigned int flags, int *vpos, int *hpos,
830                                     ktime_t *stime, ktime_t *etime,
831                                     const struct drm_display_mode *mode)
832 {
833         struct drm_i915_private *dev_priv = to_i915(dev);
834         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
835         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
836         int position;
837         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
838         bool in_vbl = true;
839         int ret = 0;
840         unsigned long irqflags;
841
842         if (WARN_ON(!mode->crtc_clock)) {
843                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
844                                  "pipe %c\n", pipe_name(pipe));
845                 return 0;
846         }
847
848         htotal = mode->crtc_htotal;
849         hsync_start = mode->crtc_hsync_start;
850         vtotal = mode->crtc_vtotal;
851         vbl_start = mode->crtc_vblank_start;
852         vbl_end = mode->crtc_vblank_end;
853
854         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
855                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
856                 vbl_end /= 2;
857                 vtotal /= 2;
858         }
859
860         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
861
862         /*
863          * Lock uncore.lock, as we will do multiple timing critical raw
864          * register reads, potentially with preemption disabled, so the
865          * following code must not block on uncore.lock.
866          */
867         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
868
869         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
870
871         /* Get optional system timestamp before query. */
872         if (stime)
873                 *stime = ktime_get();
874
875         if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
876                 /* No obvious pixelcount register. Only query vertical
877                  * scanout position from Display scan line register.
878                  */
879                 position = __intel_get_crtc_scanline(intel_crtc);
880         } else {
881                 /* Have access to pixelcount since start of frame.
882                  * We can split this into vertical and horizontal
883                  * scanout position.
884                  */
885                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
886
887                 /* convert to pixel counts */
888                 vbl_start *= htotal;
889                 vbl_end *= htotal;
890                 vtotal *= htotal;
891
892                 /*
893                  * In interlaced modes, the pixel counter counts all pixels,
894                  * so one field will have htotal more pixels. In order to avoid
895                  * the reported position from jumping backwards when the pixel
896                  * counter is beyond the length of the shorter field, just
897                  * clamp the position the length of the shorter field. This
898                  * matches how the scanline counter based position works since
899                  * the scanline counter doesn't count the two half lines.
900                  */
901                 if (position >= vtotal)
902                         position = vtotal - 1;
903
904                 /*
905                  * Start of vblank interrupt is triggered at start of hsync,
906                  * just prior to the first active line of vblank. However we
907                  * consider lines to start at the leading edge of horizontal
908                  * active. So, should we get here before we've crossed into
909                  * the horizontal active of the first line in vblank, we would
910                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
911                  * always add htotal-hsync_start to the current pixel position.
912                  */
913                 position = (position + htotal - hsync_start) % vtotal;
914         }
915
916         /* Get optional system timestamp after query. */
917         if (etime)
918                 *etime = ktime_get();
919
920         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
921
922         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
923
924         in_vbl = position >= vbl_start && position < vbl_end;
925
926         /*
927          * While in vblank, position will be negative
928          * counting up towards 0 at vbl_end. And outside
929          * vblank, position will be positive counting
930          * up since vbl_end.
931          */
932         if (position >= vbl_start)
933                 position -= vbl_end;
934         else
935                 position += vtotal - vbl_end;
936
937         if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
938                 *vpos = position;
939                 *hpos = 0;
940         } else {
941                 *vpos = position / htotal;
942                 *hpos = position - (*vpos * htotal);
943         }
944
945         /* In vblank? */
946         if (in_vbl)
947                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
948
949         return ret;
950 }
951
952 int intel_get_crtc_scanline(struct intel_crtc *crtc)
953 {
954         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
955         unsigned long irqflags;
956         int position;
957
958         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
959         position = __intel_get_crtc_scanline(crtc);
960         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
961
962         return position;
963 }
964
965 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
966                               int *max_error,
967                               struct timeval *vblank_time,
968                               unsigned flags)
969 {
970         struct drm_crtc *crtc;
971
972         if (pipe >= INTEL_INFO(dev)->num_pipes) {
973                 DRM_ERROR("Invalid crtc %u\n", pipe);
974                 return -EINVAL;
975         }
976
977         /* Get drm_crtc to timestamp: */
978         crtc = intel_get_crtc_for_pipe(dev, pipe);
979         if (crtc == NULL) {
980                 DRM_ERROR("Invalid crtc %u\n", pipe);
981                 return -EINVAL;
982         }
983
984         if (!crtc->hwmode.crtc_clock) {
985                 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
986                 return -EBUSY;
987         }
988
989         /* Helper routine in DRM core does all the work: */
990         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
991                                                      vblank_time, flags,
992                                                      &crtc->hwmode);
993 }
994
995 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
996 {
997         u32 busy_up, busy_down, max_avg, min_avg;
998         u8 new_delay;
999
1000         spin_lock(&mchdev_lock);
1001
1002         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1003
1004         new_delay = dev_priv->ips.cur_delay;
1005
1006         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1007         busy_up = I915_READ(RCPREVBSYTUPAVG);
1008         busy_down = I915_READ(RCPREVBSYTDNAVG);
1009         max_avg = I915_READ(RCBMAXAVG);
1010         min_avg = I915_READ(RCBMINAVG);
1011
1012         /* Handle RCS change request from hw */
1013         if (busy_up > max_avg) {
1014                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1015                         new_delay = dev_priv->ips.cur_delay - 1;
1016                 if (new_delay < dev_priv->ips.max_delay)
1017                         new_delay = dev_priv->ips.max_delay;
1018         } else if (busy_down < min_avg) {
1019                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1020                         new_delay = dev_priv->ips.cur_delay + 1;
1021                 if (new_delay > dev_priv->ips.min_delay)
1022                         new_delay = dev_priv->ips.min_delay;
1023         }
1024
1025         if (ironlake_set_drps(dev_priv, new_delay))
1026                 dev_priv->ips.cur_delay = new_delay;
1027
1028         spin_unlock(&mchdev_lock);
1029
1030         return;
1031 }
1032
1033 static void notify_ring(struct intel_engine_cs *engine)
1034 {
1035         smp_store_mb(engine->breadcrumbs.irq_posted, true);
1036         if (intel_engine_wakeup(engine))
1037                 trace_i915_gem_request_notify(engine);
1038 }
1039
1040 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1041                         struct intel_rps_ei *ei)
1042 {
1043         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1044         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1045         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1046 }
1047
1048 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1049                          const struct intel_rps_ei *old,
1050                          const struct intel_rps_ei *now,
1051                          int threshold)
1052 {
1053         u64 time, c0;
1054         unsigned int mul = 100;
1055
1056         if (old->cz_clock == 0)
1057                 return false;
1058
1059         if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1060                 mul <<= 8;
1061
1062         time = now->cz_clock - old->cz_clock;
1063         time *= threshold * dev_priv->czclk_freq;
1064
1065         /* Workload can be split between render + media, e.g. SwapBuffers
1066          * being blitted in X after being rendered in mesa. To account for
1067          * this we need to combine both engines into our activity counter.
1068          */
1069         c0 = now->render_c0 - old->render_c0;
1070         c0 += now->media_c0 - old->media_c0;
1071         c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1072
1073         return c0 >= time;
1074 }
1075
1076 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1077 {
1078         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1079         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1080 }
1081
1082 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1083 {
1084         struct intel_rps_ei now;
1085         u32 events = 0;
1086
1087         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1088                 return 0;
1089
1090         vlv_c0_read(dev_priv, &now);
1091         if (now.cz_clock == 0)
1092                 return 0;
1093
1094         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1095                 if (!vlv_c0_above(dev_priv,
1096                                   &dev_priv->rps.down_ei, &now,
1097                                   dev_priv->rps.down_threshold))
1098                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
1099                 dev_priv->rps.down_ei = now;
1100         }
1101
1102         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1103                 if (vlv_c0_above(dev_priv,
1104                                  &dev_priv->rps.up_ei, &now,
1105                                  dev_priv->rps.up_threshold))
1106                         events |= GEN6_PM_RP_UP_THRESHOLD;
1107                 dev_priv->rps.up_ei = now;
1108         }
1109
1110         return events;
1111 }
1112
1113 static bool any_waiters(struct drm_i915_private *dev_priv)
1114 {
1115         struct intel_engine_cs *engine;
1116         enum intel_engine_id id;
1117
1118         for_each_engine(engine, dev_priv, id)
1119                 if (intel_engine_has_waiter(engine))
1120                         return true;
1121
1122         return false;
1123 }
1124
1125 static void gen6_pm_rps_work(struct work_struct *work)
1126 {
1127         struct drm_i915_private *dev_priv =
1128                 container_of(work, struct drm_i915_private, rps.work);
1129         bool client_boost;
1130         int new_delay, adj, min, max;
1131         u32 pm_iir;
1132
1133         spin_lock_irq(&dev_priv->irq_lock);
1134         /* Speed up work cancelation during disabling rps interrupts. */
1135         if (!dev_priv->rps.interrupts_enabled) {
1136                 spin_unlock_irq(&dev_priv->irq_lock);
1137                 return;
1138         }
1139
1140         pm_iir = dev_priv->rps.pm_iir;
1141         dev_priv->rps.pm_iir = 0;
1142         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1143         gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1144         client_boost = dev_priv->rps.client_boost;
1145         dev_priv->rps.client_boost = false;
1146         spin_unlock_irq(&dev_priv->irq_lock);
1147
1148         /* Make sure we didn't queue anything we're not going to process. */
1149         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1150
1151         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1152                 return;
1153
1154         mutex_lock(&dev_priv->rps.hw_lock);
1155
1156         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1157
1158         adj = dev_priv->rps.last_adj;
1159         new_delay = dev_priv->rps.cur_freq;
1160         min = dev_priv->rps.min_freq_softlimit;
1161         max = dev_priv->rps.max_freq_softlimit;
1162         if (client_boost || any_waiters(dev_priv))
1163                 max = dev_priv->rps.max_freq;
1164         if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1165                 new_delay = dev_priv->rps.boost_freq;
1166                 adj = 0;
1167         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1168                 if (adj > 0)
1169                         adj *= 2;
1170                 else /* CHV needs even encode values */
1171                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1172                 /*
1173                  * For better performance, jump directly
1174                  * to RPe if we're below it.
1175                  */
1176                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1177                         new_delay = dev_priv->rps.efficient_freq;
1178                         adj = 0;
1179                 }
1180         } else if (client_boost || any_waiters(dev_priv)) {
1181                 adj = 0;
1182         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1183                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1184                         new_delay = dev_priv->rps.efficient_freq;
1185                 else
1186                         new_delay = dev_priv->rps.min_freq_softlimit;
1187                 adj = 0;
1188         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1189                 if (adj < 0)
1190                         adj *= 2;
1191                 else /* CHV needs even encode values */
1192                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1193         } else { /* unknown event */
1194                 adj = 0;
1195         }
1196
1197         dev_priv->rps.last_adj = adj;
1198
1199         /* sysfs frequency interfaces may have snuck in while servicing the
1200          * interrupt
1201          */
1202         new_delay += adj;
1203         new_delay = clamp_t(int, new_delay, min, max);
1204
1205         intel_set_rps(dev_priv, new_delay);
1206
1207         mutex_unlock(&dev_priv->rps.hw_lock);
1208 }
1209
1210
1211 /**
1212  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1213  * occurred.
1214  * @work: workqueue struct
1215  *
1216  * Doesn't actually do anything except notify userspace. As a consequence of
1217  * this event, userspace should try to remap the bad rows since statistically
1218  * it is likely the same row is more likely to go bad again.
1219  */
1220 static void ivybridge_parity_work(struct work_struct *work)
1221 {
1222         struct drm_i915_private *dev_priv =
1223                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1224         u32 error_status, row, bank, subbank;
1225         char *parity_event[6];
1226         uint32_t misccpctl;
1227         uint8_t slice = 0;
1228
1229         /* We must turn off DOP level clock gating to access the L3 registers.
1230          * In order to prevent a get/put style interface, acquire struct mutex
1231          * any time we access those registers.
1232          */
1233         mutex_lock(&dev_priv->drm.struct_mutex);
1234
1235         /* If we've screwed up tracking, just let the interrupt fire again */
1236         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1237                 goto out;
1238
1239         misccpctl = I915_READ(GEN7_MISCCPCTL);
1240         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1241         POSTING_READ(GEN7_MISCCPCTL);
1242
1243         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1244                 i915_reg_t reg;
1245
1246                 slice--;
1247                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1248                         break;
1249
1250                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1251
1252                 reg = GEN7_L3CDERRST1(slice);
1253
1254                 error_status = I915_READ(reg);
1255                 row = GEN7_PARITY_ERROR_ROW(error_status);
1256                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1257                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1258
1259                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1260                 POSTING_READ(reg);
1261
1262                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1263                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1264                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1265                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1266                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1267                 parity_event[5] = NULL;
1268
1269                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1270                                    KOBJ_CHANGE, parity_event);
1271
1272                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1273                           slice, row, bank, subbank);
1274
1275                 kfree(parity_event[4]);
1276                 kfree(parity_event[3]);
1277                 kfree(parity_event[2]);
1278                 kfree(parity_event[1]);
1279         }
1280
1281         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1282
1283 out:
1284         WARN_ON(dev_priv->l3_parity.which_slice);
1285         spin_lock_irq(&dev_priv->irq_lock);
1286         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1287         spin_unlock_irq(&dev_priv->irq_lock);
1288
1289         mutex_unlock(&dev_priv->drm.struct_mutex);
1290 }
1291
1292 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1293                                                u32 iir)
1294 {
1295         if (!HAS_L3_DPF(dev_priv))
1296                 return;
1297
1298         spin_lock(&dev_priv->irq_lock);
1299         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1300         spin_unlock(&dev_priv->irq_lock);
1301
1302         iir &= GT_PARITY_ERROR(dev_priv);
1303         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1304                 dev_priv->l3_parity.which_slice |= 1 << 1;
1305
1306         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1307                 dev_priv->l3_parity.which_slice |= 1 << 0;
1308
1309         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1310 }
1311
1312 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1313                                u32 gt_iir)
1314 {
1315         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1316                 notify_ring(dev_priv->engine[RCS]);
1317         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1318                 notify_ring(dev_priv->engine[VCS]);
1319 }
1320
1321 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1322                                u32 gt_iir)
1323 {
1324         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1325                 notify_ring(dev_priv->engine[RCS]);
1326         if (gt_iir & GT_BSD_USER_INTERRUPT)
1327                 notify_ring(dev_priv->engine[VCS]);
1328         if (gt_iir & GT_BLT_USER_INTERRUPT)
1329                 notify_ring(dev_priv->engine[BCS]);
1330
1331         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1332                       GT_BSD_CS_ERROR_INTERRUPT |
1333                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1334                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1335
1336         if (gt_iir & GT_PARITY_ERROR(dev_priv))
1337                 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1338 }
1339
1340 static __always_inline void
1341 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1342 {
1343         if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1344                 notify_ring(engine);
1345         if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1346                 tasklet_schedule(&engine->irq_tasklet);
1347 }
1348
1349 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1350                                    u32 master_ctl,
1351                                    u32 gt_iir[4])
1352 {
1353         irqreturn_t ret = IRQ_NONE;
1354
1355         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1356                 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1357                 if (gt_iir[0]) {
1358                         I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1359                         ret = IRQ_HANDLED;
1360                 } else
1361                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1362         }
1363
1364         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1365                 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1366                 if (gt_iir[1]) {
1367                         I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1368                         ret = IRQ_HANDLED;
1369                 } else
1370                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1371         }
1372
1373         if (master_ctl & GEN8_GT_VECS_IRQ) {
1374                 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1375                 if (gt_iir[3]) {
1376                         I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1377                         ret = IRQ_HANDLED;
1378                 } else
1379                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1380         }
1381
1382         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1383                 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1384                 if (gt_iir[2] & (dev_priv->pm_rps_events |
1385                                  dev_priv->pm_guc_events)) {
1386                         I915_WRITE_FW(GEN8_GT_IIR(2),
1387                                       gt_iir[2] & (dev_priv->pm_rps_events |
1388                                                    dev_priv->pm_guc_events));
1389                         ret = IRQ_HANDLED;
1390                 } else
1391                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1392         }
1393
1394         return ret;
1395 }
1396
1397 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1398                                 u32 gt_iir[4])
1399 {
1400         if (gt_iir[0]) {
1401                 gen8_cs_irq_handler(dev_priv->engine[RCS],
1402                                     gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1403                 gen8_cs_irq_handler(dev_priv->engine[BCS],
1404                                     gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1405         }
1406
1407         if (gt_iir[1]) {
1408                 gen8_cs_irq_handler(dev_priv->engine[VCS],
1409                                     gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1410                 gen8_cs_irq_handler(dev_priv->engine[VCS2],
1411                                     gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1412         }
1413
1414         if (gt_iir[3])
1415                 gen8_cs_irq_handler(dev_priv->engine[VECS],
1416                                     gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1417
1418         if (gt_iir[2] & dev_priv->pm_rps_events)
1419                 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1420
1421         if (gt_iir[2] & dev_priv->pm_guc_events)
1422                 gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1423 }
1424
1425 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1426 {
1427         switch (port) {
1428         case PORT_A:
1429                 return val & PORTA_HOTPLUG_LONG_DETECT;
1430         case PORT_B:
1431                 return val & PORTB_HOTPLUG_LONG_DETECT;
1432         case PORT_C:
1433                 return val & PORTC_HOTPLUG_LONG_DETECT;
1434         default:
1435                 return false;
1436         }
1437 }
1438
1439 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1440 {
1441         switch (port) {
1442         case PORT_E:
1443                 return val & PORTE_HOTPLUG_LONG_DETECT;
1444         default:
1445                 return false;
1446         }
1447 }
1448
1449 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1450 {
1451         switch (port) {
1452         case PORT_A:
1453                 return val & PORTA_HOTPLUG_LONG_DETECT;
1454         case PORT_B:
1455                 return val & PORTB_HOTPLUG_LONG_DETECT;
1456         case PORT_C:
1457                 return val & PORTC_HOTPLUG_LONG_DETECT;
1458         case PORT_D:
1459                 return val & PORTD_HOTPLUG_LONG_DETECT;
1460         default:
1461                 return false;
1462         }
1463 }
1464
1465 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1466 {
1467         switch (port) {
1468         case PORT_A:
1469                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1470         default:
1471                 return false;
1472         }
1473 }
1474
1475 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1476 {
1477         switch (port) {
1478         case PORT_B:
1479                 return val & PORTB_HOTPLUG_LONG_DETECT;
1480         case PORT_C:
1481                 return val & PORTC_HOTPLUG_LONG_DETECT;
1482         case PORT_D:
1483                 return val & PORTD_HOTPLUG_LONG_DETECT;
1484         default:
1485                 return false;
1486         }
1487 }
1488
1489 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1490 {
1491         switch (port) {
1492         case PORT_B:
1493                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1494         case PORT_C:
1495                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1496         case PORT_D:
1497                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1498         default:
1499                 return false;
1500         }
1501 }
1502
1503 /*
1504  * Get a bit mask of pins that have triggered, and which ones may be long.
1505  * This can be called multiple times with the same masks to accumulate
1506  * hotplug detection results from several registers.
1507  *
1508  * Note that the caller is expected to zero out the masks initially.
1509  */
1510 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1511                              u32 hotplug_trigger, u32 dig_hotplug_reg,
1512                              const u32 hpd[HPD_NUM_PINS],
1513                              bool long_pulse_detect(enum port port, u32 val))
1514 {
1515         enum port port;
1516         int i;
1517
1518         for_each_hpd_pin(i) {
1519                 if ((hpd[i] & hotplug_trigger) == 0)
1520                         continue;
1521
1522                 *pin_mask |= BIT(i);
1523
1524                 if (!intel_hpd_pin_to_port(i, &port))
1525                         continue;
1526
1527                 if (long_pulse_detect(port, dig_hotplug_reg))
1528                         *long_mask |= BIT(i);
1529         }
1530
1531         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1532                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
1533
1534 }
1535
1536 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1537 {
1538         wake_up_all(&dev_priv->gmbus_wait_queue);
1539 }
1540
1541 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1542 {
1543         wake_up_all(&dev_priv->gmbus_wait_queue);
1544 }
1545
1546 #if defined(CONFIG_DEBUG_FS)
1547 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1548                                          enum pipe pipe,
1549                                          uint32_t crc0, uint32_t crc1,
1550                                          uint32_t crc2, uint32_t crc3,
1551                                          uint32_t crc4)
1552 {
1553         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1554         struct intel_pipe_crc_entry *entry;
1555         int head, tail;
1556
1557         spin_lock(&pipe_crc->lock);
1558
1559         if (!pipe_crc->entries) {
1560                 spin_unlock(&pipe_crc->lock);
1561                 DRM_DEBUG_KMS("spurious interrupt\n");
1562                 return;
1563         }
1564
1565         head = pipe_crc->head;
1566         tail = pipe_crc->tail;
1567
1568         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1569                 spin_unlock(&pipe_crc->lock);
1570                 DRM_ERROR("CRC buffer overflowing\n");
1571                 return;
1572         }
1573
1574         entry = &pipe_crc->entries[head];
1575
1576         entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1577                                                                  pipe);
1578         entry->crc[0] = crc0;
1579         entry->crc[1] = crc1;
1580         entry->crc[2] = crc2;
1581         entry->crc[3] = crc3;
1582         entry->crc[4] = crc4;
1583
1584         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1585         pipe_crc->head = head;
1586
1587         spin_unlock(&pipe_crc->lock);
1588
1589         wake_up_interruptible(&pipe_crc->wq);
1590 }
1591 #else
1592 static inline void
1593 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1594                              enum pipe pipe,
1595                              uint32_t crc0, uint32_t crc1,
1596                              uint32_t crc2, uint32_t crc3,
1597                              uint32_t crc4) {}
1598 #endif
1599
1600
1601 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1602                                      enum pipe pipe)
1603 {
1604         display_pipe_crc_irq_handler(dev_priv, pipe,
1605                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1606                                      0, 0, 0, 0);
1607 }
1608
1609 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1610                                      enum pipe pipe)
1611 {
1612         display_pipe_crc_irq_handler(dev_priv, pipe,
1613                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1614                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1615                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1616                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1617                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1618 }
1619
1620 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1621                                       enum pipe pipe)
1622 {
1623         uint32_t res1, res2;
1624
1625         if (INTEL_GEN(dev_priv) >= 3)
1626                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1627         else
1628                 res1 = 0;
1629
1630         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1631                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1632         else
1633                 res2 = 0;
1634
1635         display_pipe_crc_irq_handler(dev_priv, pipe,
1636                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1637                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1638                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1639                                      res1, res2);
1640 }
1641
1642 /* The RPS events need forcewake, so we add them to a work queue and mask their
1643  * IMR bits until the work is done. Other interrupts can be processed without
1644  * the work queue. */
1645 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1646 {
1647         if (pm_iir & dev_priv->pm_rps_events) {
1648                 spin_lock(&dev_priv->irq_lock);
1649                 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1650                 if (dev_priv->rps.interrupts_enabled) {
1651                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1652                         schedule_work(&dev_priv->rps.work);
1653                 }
1654                 spin_unlock(&dev_priv->irq_lock);
1655         }
1656
1657         if (INTEL_INFO(dev_priv)->gen >= 8)
1658                 return;
1659
1660         if (HAS_VEBOX(dev_priv)) {
1661                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1662                         notify_ring(dev_priv->engine[VECS]);
1663
1664                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1665                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1666         }
1667 }
1668
1669 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1670 {
1671         if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
1672                 /* Sample the log buffer flush related bits & clear them out now
1673                  * itself from the message identity register to minimize the
1674                  * probability of losing a flush interrupt, when there are back
1675                  * to back flush interrupts.
1676                  * There can be a new flush interrupt, for different log buffer
1677                  * type (like for ISR), whilst Host is handling one (for DPC).
1678                  * Since same bit is used in message register for ISR & DPC, it
1679                  * could happen that GuC sets the bit for 2nd interrupt but Host
1680                  * clears out the bit on handling the 1st interrupt.
1681                  */
1682                 u32 msg, flush;
1683
1684                 msg = I915_READ(SOFT_SCRATCH(15));
1685                 flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
1686                                GUC2HOST_MSG_FLUSH_LOG_BUFFER);
1687                 if (flush) {
1688                         /* Clear the message bits that are handled */
1689                         I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1690
1691                         /* Handle flush interrupt in bottom half */
1692                         queue_work(dev_priv->guc.log.flush_wq,
1693                                    &dev_priv->guc.log.flush_work);
1694
1695                         dev_priv->guc.log.flush_interrupt_count++;
1696                 } else {
1697                         /* Not clearing of unhandled event bits won't result in
1698                          * re-triggering of the interrupt.
1699                          */
1700                 }
1701         }
1702 }
1703
1704 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1705                                      enum pipe pipe)
1706 {
1707         bool ret;
1708
1709         ret = drm_handle_vblank(&dev_priv->drm, pipe);
1710         if (ret)
1711                 intel_finish_page_flip_mmio(dev_priv, pipe);
1712
1713         return ret;
1714 }
1715
1716 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1717                                         u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1718 {
1719         int pipe;
1720
1721         spin_lock(&dev_priv->irq_lock);
1722
1723         if (!dev_priv->display_irqs_enabled) {
1724                 spin_unlock(&dev_priv->irq_lock);
1725                 return;
1726         }
1727
1728         for_each_pipe(dev_priv, pipe) {
1729                 i915_reg_t reg;
1730                 u32 mask, iir_bit = 0;
1731
1732                 /*
1733                  * PIPESTAT bits get signalled even when the interrupt is
1734                  * disabled with the mask bits, and some of the status bits do
1735                  * not generate interrupts at all (like the underrun bit). Hence
1736                  * we need to be careful that we only handle what we want to
1737                  * handle.
1738                  */
1739
1740                 /* fifo underruns are filterered in the underrun handler. */
1741                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1742
1743                 switch (pipe) {
1744                 case PIPE_A:
1745                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1746                         break;
1747                 case PIPE_B:
1748                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1749                         break;
1750                 case PIPE_C:
1751                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1752                         break;
1753                 }
1754                 if (iir & iir_bit)
1755                         mask |= dev_priv->pipestat_irq_mask[pipe];
1756
1757                 if (!mask)
1758                         continue;
1759
1760                 reg = PIPESTAT(pipe);
1761                 mask |= PIPESTAT_INT_ENABLE_MASK;
1762                 pipe_stats[pipe] = I915_READ(reg) & mask;
1763
1764                 /*
1765                  * Clear the PIPE*STAT regs before the IIR
1766                  */
1767                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1768                                         PIPESTAT_INT_STATUS_MASK))
1769                         I915_WRITE(reg, pipe_stats[pipe]);
1770         }
1771         spin_unlock(&dev_priv->irq_lock);
1772 }
1773
1774 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1775                                             u32 pipe_stats[I915_MAX_PIPES])
1776 {
1777         enum pipe pipe;
1778
1779         for_each_pipe(dev_priv, pipe) {
1780                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1781                     intel_pipe_handle_vblank(dev_priv, pipe))
1782                         intel_check_page_flip(dev_priv, pipe);
1783
1784                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1785                         intel_finish_page_flip_cs(dev_priv, pipe);
1786
1787                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1788                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1789
1790                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1791                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1792         }
1793
1794         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1795                 gmbus_irq_handler(dev_priv);
1796 }
1797
1798 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1799 {
1800         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1801
1802         if (hotplug_status)
1803                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1804
1805         return hotplug_status;
1806 }
1807
1808 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1809                                  u32 hotplug_status)
1810 {
1811         u32 pin_mask = 0, long_mask = 0;
1812
1813         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1814             IS_CHERRYVIEW(dev_priv)) {
1815                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1816
1817                 if (hotplug_trigger) {
1818                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1819                                            hotplug_trigger, hpd_status_g4x,
1820                                            i9xx_port_hotplug_long_detect);
1821
1822                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1823                 }
1824
1825                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1826                         dp_aux_irq_handler(dev_priv);
1827         } else {
1828                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1829
1830                 if (hotplug_trigger) {
1831                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1832                                            hotplug_trigger, hpd_status_i915,
1833                                            i9xx_port_hotplug_long_detect);
1834                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1835                 }
1836         }
1837 }
1838
1839 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1840 {
1841         struct drm_device *dev = arg;
1842         struct drm_i915_private *dev_priv = to_i915(dev);
1843         irqreturn_t ret = IRQ_NONE;
1844
1845         if (!intel_irqs_enabled(dev_priv))
1846                 return IRQ_NONE;
1847
1848         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1849         disable_rpm_wakeref_asserts(dev_priv);
1850
1851         do {
1852                 u32 iir, gt_iir, pm_iir;
1853                 u32 pipe_stats[I915_MAX_PIPES] = {};
1854                 u32 hotplug_status = 0;
1855                 u32 ier = 0;
1856
1857                 gt_iir = I915_READ(GTIIR);
1858                 pm_iir = I915_READ(GEN6_PMIIR);
1859                 iir = I915_READ(VLV_IIR);
1860
1861                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1862                         break;
1863
1864                 ret = IRQ_HANDLED;
1865
1866                 /*
1867                  * Theory on interrupt generation, based on empirical evidence:
1868                  *
1869                  * x = ((VLV_IIR & VLV_IER) ||
1870                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1871                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1872                  *
1873                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1874                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1875                  * guarantee the CPU interrupt will be raised again even if we
1876                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1877                  * bits this time around.
1878                  */
1879                 I915_WRITE(VLV_MASTER_IER, 0);
1880                 ier = I915_READ(VLV_IER);
1881                 I915_WRITE(VLV_IER, 0);
1882
1883                 if (gt_iir)
1884                         I915_WRITE(GTIIR, gt_iir);
1885                 if (pm_iir)
1886                         I915_WRITE(GEN6_PMIIR, pm_iir);
1887
1888                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1889                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1890
1891                 /* Call regardless, as some status bits might not be
1892                  * signalled in iir */
1893                 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1894
1895                 /*
1896                  * VLV_IIR is single buffered, and reflects the level
1897                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1898                  */
1899                 if (iir)
1900                         I915_WRITE(VLV_IIR, iir);
1901
1902                 I915_WRITE(VLV_IER, ier);
1903                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1904                 POSTING_READ(VLV_MASTER_IER);
1905
1906                 if (gt_iir)
1907                         snb_gt_irq_handler(dev_priv, gt_iir);
1908                 if (pm_iir)
1909                         gen6_rps_irq_handler(dev_priv, pm_iir);
1910
1911                 if (hotplug_status)
1912                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1913
1914                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1915         } while (0);
1916
1917         enable_rpm_wakeref_asserts(dev_priv);
1918
1919         return ret;
1920 }
1921
1922 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1923 {
1924         struct drm_device *dev = arg;
1925         struct drm_i915_private *dev_priv = to_i915(dev);
1926         irqreturn_t ret = IRQ_NONE;
1927
1928         if (!intel_irqs_enabled(dev_priv))
1929                 return IRQ_NONE;
1930
1931         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1932         disable_rpm_wakeref_asserts(dev_priv);
1933
1934         do {
1935                 u32 master_ctl, iir;
1936                 u32 gt_iir[4] = {};
1937                 u32 pipe_stats[I915_MAX_PIPES] = {};
1938                 u32 hotplug_status = 0;
1939                 u32 ier = 0;
1940
1941                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1942                 iir = I915_READ(VLV_IIR);
1943
1944                 if (master_ctl == 0 && iir == 0)
1945                         break;
1946
1947                 ret = IRQ_HANDLED;
1948
1949                 /*
1950                  * Theory on interrupt generation, based on empirical evidence:
1951                  *
1952                  * x = ((VLV_IIR & VLV_IER) ||
1953                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1954                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1955                  *
1956                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1957                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1958                  * guarantee the CPU interrupt will be raised again even if we
1959                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1960                  * bits this time around.
1961                  */
1962                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1963                 ier = I915_READ(VLV_IER);
1964                 I915_WRITE(VLV_IER, 0);
1965
1966                 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1967
1968                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1969                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1970
1971                 /* Call regardless, as some status bits might not be
1972                  * signalled in iir */
1973                 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1974
1975                 /*
1976                  * VLV_IIR is single buffered, and reflects the level
1977                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1978                  */
1979                 if (iir)
1980                         I915_WRITE(VLV_IIR, iir);
1981
1982                 I915_WRITE(VLV_IER, ier);
1983                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1984                 POSTING_READ(GEN8_MASTER_IRQ);
1985
1986                 gen8_gt_irq_handler(dev_priv, gt_iir);
1987
1988                 if (hotplug_status)
1989                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1990
1991                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1992         } while (0);
1993
1994         enable_rpm_wakeref_asserts(dev_priv);
1995
1996         return ret;
1997 }
1998
1999 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2000                                 u32 hotplug_trigger,
2001                                 const u32 hpd[HPD_NUM_PINS])
2002 {
2003         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2004
2005         /*
2006          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2007          * unless we touch the hotplug register, even if hotplug_trigger is
2008          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2009          * errors.
2010          */
2011         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2012         if (!hotplug_trigger) {
2013                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2014                         PORTD_HOTPLUG_STATUS_MASK |
2015                         PORTC_HOTPLUG_STATUS_MASK |
2016                         PORTB_HOTPLUG_STATUS_MASK;
2017                 dig_hotplug_reg &= ~mask;
2018         }
2019
2020         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2021         if (!hotplug_trigger)
2022                 return;
2023
2024         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2025                            dig_hotplug_reg, hpd,
2026                            pch_port_hotplug_long_detect);
2027
2028         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2029 }
2030
2031 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2032 {
2033         int pipe;
2034         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2035
2036         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2037
2038         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2039                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2040                                SDE_AUDIO_POWER_SHIFT);
2041                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2042                                  port_name(port));
2043         }
2044
2045         if (pch_iir & SDE_AUX_MASK)
2046                 dp_aux_irq_handler(dev_priv);
2047
2048         if (pch_iir & SDE_GMBUS)
2049                 gmbus_irq_handler(dev_priv);
2050
2051         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2052                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2053
2054         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2055                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2056
2057         if (pch_iir & SDE_POISON)
2058                 DRM_ERROR("PCH poison interrupt\n");
2059
2060         if (pch_iir & SDE_FDI_MASK)
2061                 for_each_pipe(dev_priv, pipe)
2062                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2063                                          pipe_name(pipe),
2064                                          I915_READ(FDI_RX_IIR(pipe)));
2065
2066         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2067                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2068
2069         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2070                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2071
2072         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2073                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2074
2075         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2076                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2077 }
2078
2079 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2080 {
2081         u32 err_int = I915_READ(GEN7_ERR_INT);
2082         enum pipe pipe;
2083
2084         if (err_int & ERR_INT_POISON)
2085                 DRM_ERROR("Poison interrupt\n");
2086
2087         for_each_pipe(dev_priv, pipe) {
2088                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2089                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2090
2091                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2092                         if (IS_IVYBRIDGE(dev_priv))
2093                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2094                         else
2095                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2096                 }
2097         }
2098
2099         I915_WRITE(GEN7_ERR_INT, err_int);
2100 }
2101
2102 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2103 {
2104         u32 serr_int = I915_READ(SERR_INT);
2105
2106         if (serr_int & SERR_INT_POISON)
2107                 DRM_ERROR("PCH poison interrupt\n");
2108
2109         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2110                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2111
2112         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2113                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2114
2115         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2116                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2117
2118         I915_WRITE(SERR_INT, serr_int);
2119 }
2120
2121 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2122 {
2123         int pipe;
2124         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2125
2126         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2127
2128         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2129                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2130                                SDE_AUDIO_POWER_SHIFT_CPT);
2131                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2132                                  port_name(port));
2133         }
2134
2135         if (pch_iir & SDE_AUX_MASK_CPT)
2136                 dp_aux_irq_handler(dev_priv);
2137
2138         if (pch_iir & SDE_GMBUS_CPT)
2139                 gmbus_irq_handler(dev_priv);
2140
2141         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2142                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2143
2144         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2145                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2146
2147         if (pch_iir & SDE_FDI_MASK_CPT)
2148                 for_each_pipe(dev_priv, pipe)
2149                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2150                                          pipe_name(pipe),
2151                                          I915_READ(FDI_RX_IIR(pipe)));
2152
2153         if (pch_iir & SDE_ERROR_CPT)
2154                 cpt_serr_int_handler(dev_priv);
2155 }
2156
2157 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2158 {
2159         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2160                 ~SDE_PORTE_HOTPLUG_SPT;
2161         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2162         u32 pin_mask = 0, long_mask = 0;
2163
2164         if (hotplug_trigger) {
2165                 u32 dig_hotplug_reg;
2166
2167                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2168                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2169
2170                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2171                                    dig_hotplug_reg, hpd_spt,
2172                                    spt_port_hotplug_long_detect);
2173         }
2174
2175         if (hotplug2_trigger) {
2176                 u32 dig_hotplug_reg;
2177
2178                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2179                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2180
2181                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2182                                    dig_hotplug_reg, hpd_spt,
2183                                    spt_port_hotplug2_long_detect);
2184         }
2185
2186         if (pin_mask)
2187                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2188
2189         if (pch_iir & SDE_GMBUS_CPT)
2190                 gmbus_irq_handler(dev_priv);
2191 }
2192
2193 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2194                                 u32 hotplug_trigger,
2195                                 const u32 hpd[HPD_NUM_PINS])
2196 {
2197         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2198
2199         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2200         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2201
2202         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2203                            dig_hotplug_reg, hpd,
2204                            ilk_port_hotplug_long_detect);
2205
2206         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2207 }
2208
2209 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2210                                     u32 de_iir)
2211 {
2212         enum pipe pipe;
2213         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2214
2215         if (hotplug_trigger)
2216                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2217
2218         if (de_iir & DE_AUX_CHANNEL_A)
2219                 dp_aux_irq_handler(dev_priv);
2220
2221         if (de_iir & DE_GSE)
2222                 intel_opregion_asle_intr(dev_priv);
2223
2224         if (de_iir & DE_POISON)
2225                 DRM_ERROR("Poison interrupt\n");
2226
2227         for_each_pipe(dev_priv, pipe) {
2228                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2229                     intel_pipe_handle_vblank(dev_priv, pipe))
2230                         intel_check_page_flip(dev_priv, pipe);
2231
2232                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2233                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2234
2235                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2236                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2237
2238                 /* plane/pipes map 1:1 on ilk+ */
2239                 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2240                         intel_finish_page_flip_cs(dev_priv, pipe);
2241         }
2242
2243         /* check event from PCH */
2244         if (de_iir & DE_PCH_EVENT) {
2245                 u32 pch_iir = I915_READ(SDEIIR);
2246
2247                 if (HAS_PCH_CPT(dev_priv))
2248                         cpt_irq_handler(dev_priv, pch_iir);
2249                 else
2250                         ibx_irq_handler(dev_priv, pch_iir);
2251
2252                 /* should clear PCH hotplug event before clear CPU irq */
2253                 I915_WRITE(SDEIIR, pch_iir);
2254         }
2255
2256         if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2257                 ironlake_rps_change_irq_handler(dev_priv);
2258 }
2259
2260 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2261                                     u32 de_iir)
2262 {
2263         enum pipe pipe;
2264         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2265
2266         if (hotplug_trigger)
2267                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2268
2269         if (de_iir & DE_ERR_INT_IVB)
2270                 ivb_err_int_handler(dev_priv);
2271
2272         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2273                 dp_aux_irq_handler(dev_priv);
2274
2275         if (de_iir & DE_GSE_IVB)
2276                 intel_opregion_asle_intr(dev_priv);
2277
2278         for_each_pipe(dev_priv, pipe) {
2279                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2280                     intel_pipe_handle_vblank(dev_priv, pipe))
2281                         intel_check_page_flip(dev_priv, pipe);
2282
2283                 /* plane/pipes map 1:1 on ilk+ */
2284                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2285                         intel_finish_page_flip_cs(dev_priv, pipe);
2286         }
2287
2288         /* check event from PCH */
2289         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2290                 u32 pch_iir = I915_READ(SDEIIR);
2291
2292                 cpt_irq_handler(dev_priv, pch_iir);
2293
2294                 /* clear PCH hotplug event before clear CPU irq */
2295                 I915_WRITE(SDEIIR, pch_iir);
2296         }
2297 }
2298
2299 /*
2300  * To handle irqs with the minimum potential races with fresh interrupts, we:
2301  * 1 - Disable Master Interrupt Control.
2302  * 2 - Find the source(s) of the interrupt.
2303  * 3 - Clear the Interrupt Identity bits (IIR).
2304  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2305  * 5 - Re-enable Master Interrupt Control.
2306  */
2307 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2308 {
2309         struct drm_device *dev = arg;
2310         struct drm_i915_private *dev_priv = to_i915(dev);
2311         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2312         irqreturn_t ret = IRQ_NONE;
2313
2314         if (!intel_irqs_enabled(dev_priv))
2315                 return IRQ_NONE;
2316
2317         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2318         disable_rpm_wakeref_asserts(dev_priv);
2319
2320         /* disable master interrupt before clearing iir  */
2321         de_ier = I915_READ(DEIER);
2322         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2323         POSTING_READ(DEIER);
2324
2325         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2326          * interrupts will will be stored on its back queue, and then we'll be
2327          * able to process them after we restore SDEIER (as soon as we restore
2328          * it, we'll get an interrupt if SDEIIR still has something to process
2329          * due to its back queue). */
2330         if (!HAS_PCH_NOP(dev_priv)) {
2331                 sde_ier = I915_READ(SDEIER);
2332                 I915_WRITE(SDEIER, 0);
2333                 POSTING_READ(SDEIER);
2334         }
2335
2336         /* Find, clear, then process each source of interrupt */
2337
2338         gt_iir = I915_READ(GTIIR);
2339         if (gt_iir) {
2340                 I915_WRITE(GTIIR, gt_iir);
2341                 ret = IRQ_HANDLED;
2342                 if (INTEL_GEN(dev_priv) >= 6)
2343                         snb_gt_irq_handler(dev_priv, gt_iir);
2344                 else
2345                         ilk_gt_irq_handler(dev_priv, gt_iir);
2346         }
2347
2348         de_iir = I915_READ(DEIIR);
2349         if (de_iir) {
2350                 I915_WRITE(DEIIR, de_iir);
2351                 ret = IRQ_HANDLED;
2352                 if (INTEL_GEN(dev_priv) >= 7)
2353                         ivb_display_irq_handler(dev_priv, de_iir);
2354                 else
2355                         ilk_display_irq_handler(dev_priv, de_iir);
2356         }
2357
2358         if (INTEL_GEN(dev_priv) >= 6) {
2359                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2360                 if (pm_iir) {
2361                         I915_WRITE(GEN6_PMIIR, pm_iir);
2362                         ret = IRQ_HANDLED;
2363                         gen6_rps_irq_handler(dev_priv, pm_iir);
2364                 }
2365         }
2366
2367         I915_WRITE(DEIER, de_ier);
2368         POSTING_READ(DEIER);
2369         if (!HAS_PCH_NOP(dev_priv)) {
2370                 I915_WRITE(SDEIER, sde_ier);
2371                 POSTING_READ(SDEIER);
2372         }
2373
2374         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2375         enable_rpm_wakeref_asserts(dev_priv);
2376
2377         return ret;
2378 }
2379
2380 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2381                                 u32 hotplug_trigger,
2382                                 const u32 hpd[HPD_NUM_PINS])
2383 {
2384         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2385
2386         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2387         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2388
2389         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2390                            dig_hotplug_reg, hpd,
2391                            bxt_port_hotplug_long_detect);
2392
2393         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2394 }
2395
2396 static irqreturn_t
2397 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2398 {
2399         irqreturn_t ret = IRQ_NONE;
2400         u32 iir;
2401         enum pipe pipe;
2402
2403         if (master_ctl & GEN8_DE_MISC_IRQ) {
2404                 iir = I915_READ(GEN8_DE_MISC_IIR);
2405                 if (iir) {
2406                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
2407                         ret = IRQ_HANDLED;
2408                         if (iir & GEN8_DE_MISC_GSE)
2409                                 intel_opregion_asle_intr(dev_priv);
2410                         else
2411                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2412                 }
2413                 else
2414                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2415         }
2416
2417         if (master_ctl & GEN8_DE_PORT_IRQ) {
2418                 iir = I915_READ(GEN8_DE_PORT_IIR);
2419                 if (iir) {
2420                         u32 tmp_mask;
2421                         bool found = false;
2422
2423                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
2424                         ret = IRQ_HANDLED;
2425
2426                         tmp_mask = GEN8_AUX_CHANNEL_A;
2427                         if (INTEL_INFO(dev_priv)->gen >= 9)
2428                                 tmp_mask |= GEN9_AUX_CHANNEL_B |
2429                                             GEN9_AUX_CHANNEL_C |
2430                                             GEN9_AUX_CHANNEL_D;
2431
2432                         if (iir & tmp_mask) {
2433                                 dp_aux_irq_handler(dev_priv);
2434                                 found = true;
2435                         }
2436
2437                         if (IS_BROXTON(dev_priv)) {
2438                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2439                                 if (tmp_mask) {
2440                                         bxt_hpd_irq_handler(dev_priv, tmp_mask,
2441                                                             hpd_bxt);
2442                                         found = true;
2443                                 }
2444                         } else if (IS_BROADWELL(dev_priv)) {
2445                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2446                                 if (tmp_mask) {
2447                                         ilk_hpd_irq_handler(dev_priv,
2448                                                             tmp_mask, hpd_bdw);
2449                                         found = true;
2450                                 }
2451                         }
2452
2453                         if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2454                                 gmbus_irq_handler(dev_priv);
2455                                 found = true;
2456                         }
2457
2458                         if (!found)
2459                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2460                 }
2461                 else
2462                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2463         }
2464
2465         for_each_pipe(dev_priv, pipe) {
2466                 u32 flip_done, fault_errors;
2467
2468                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2469                         continue;
2470
2471                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2472                 if (!iir) {
2473                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2474                         continue;
2475                 }
2476
2477                 ret = IRQ_HANDLED;
2478                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2479
2480                 if (iir & GEN8_PIPE_VBLANK &&
2481                     intel_pipe_handle_vblank(dev_priv, pipe))
2482                         intel_check_page_flip(dev_priv, pipe);
2483
2484                 flip_done = iir;
2485                 if (INTEL_INFO(dev_priv)->gen >= 9)
2486                         flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2487                 else
2488                         flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2489
2490                 if (flip_done)
2491                         intel_finish_page_flip_cs(dev_priv, pipe);
2492
2493                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2494                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2495
2496                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2497                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2498
2499                 fault_errors = iir;
2500                 if (INTEL_INFO(dev_priv)->gen >= 9)
2501                         fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2502                 else
2503                         fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2504
2505                 if (fault_errors)
2506                         DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2507                                   pipe_name(pipe),
2508                                   fault_errors);
2509         }
2510
2511         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2512             master_ctl & GEN8_DE_PCH_IRQ) {
2513                 /*
2514                  * FIXME(BDW): Assume for now that the new interrupt handling
2515                  * scheme also closed the SDE interrupt handling race we've seen
2516                  * on older pch-split platforms. But this needs testing.
2517                  */
2518                 iir = I915_READ(SDEIIR);
2519                 if (iir) {
2520                         I915_WRITE(SDEIIR, iir);
2521                         ret = IRQ_HANDLED;
2522
2523                         if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2524                                 spt_irq_handler(dev_priv, iir);
2525                         else
2526                                 cpt_irq_handler(dev_priv, iir);
2527                 } else {
2528                         /*
2529                          * Like on previous PCH there seems to be something
2530                          * fishy going on with forwarding PCH interrupts.
2531                          */
2532                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2533                 }
2534         }
2535
2536         return ret;
2537 }
2538
2539 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2540 {
2541         struct drm_device *dev = arg;
2542         struct drm_i915_private *dev_priv = to_i915(dev);
2543         u32 master_ctl;
2544         u32 gt_iir[4] = {};
2545         irqreturn_t ret;
2546
2547         if (!intel_irqs_enabled(dev_priv))
2548                 return IRQ_NONE;
2549
2550         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2551         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2552         if (!master_ctl)
2553                 return IRQ_NONE;
2554
2555         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2556
2557         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2558         disable_rpm_wakeref_asserts(dev_priv);
2559
2560         /* Find, clear, then process each source of interrupt */
2561         ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2562         gen8_gt_irq_handler(dev_priv, gt_iir);
2563         ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2564
2565         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2566         POSTING_READ_FW(GEN8_MASTER_IRQ);
2567
2568         enable_rpm_wakeref_asserts(dev_priv);
2569
2570         return ret;
2571 }
2572
2573 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2574 {
2575         /*
2576          * Notify all waiters for GPU completion events that reset state has
2577          * been changed, and that they need to restart their wait after
2578          * checking for potential errors (and bail out to drop locks if there is
2579          * a gpu reset pending so that i915_error_work_func can acquire them).
2580          */
2581
2582         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2583         wake_up_all(&dev_priv->gpu_error.wait_queue);
2584
2585         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2586         wake_up_all(&dev_priv->pending_flip_queue);
2587 }
2588
2589 /**
2590  * i915_reset_and_wakeup - do process context error handling work
2591  * @dev_priv: i915 device private
2592  *
2593  * Fire an error uevent so userspace can see that a hang or error
2594  * was detected.
2595  */
2596 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2597 {
2598         struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2599         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2600         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2601         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2602
2603         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2604
2605         DRM_DEBUG_DRIVER("resetting chip\n");
2606         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2607
2608         /*
2609          * In most cases it's guaranteed that we get here with an RPM
2610          * reference held, for example because there is a pending GPU
2611          * request that won't finish until the reset is done. This
2612          * isn't the case at least when we get here by doing a
2613          * simulated reset via debugs, so get an RPM reference.
2614          */
2615         intel_runtime_pm_get(dev_priv);
2616         intel_prepare_reset(dev_priv);
2617
2618         do {
2619                 /*
2620                  * All state reset _must_ be completed before we update the
2621                  * reset counter, for otherwise waiters might miss the reset
2622                  * pending state and not properly drop locks, resulting in
2623                  * deadlocks with the reset work.
2624                  */
2625                 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2626                         i915_reset(dev_priv);
2627                         mutex_unlock(&dev_priv->drm.struct_mutex);
2628                 }
2629
2630                 /* We need to wait for anyone holding the lock to wakeup */
2631         } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2632                                      I915_RESET_IN_PROGRESS,
2633                                      TASK_UNINTERRUPTIBLE,
2634                                      HZ));
2635
2636         intel_finish_reset(dev_priv);
2637         intel_runtime_pm_put(dev_priv);
2638
2639         if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2640                 kobject_uevent_env(kobj,
2641                                    KOBJ_CHANGE, reset_done_event);
2642
2643         /*
2644          * Note: The wake_up also serves as a memory barrier so that
2645          * waiters see the updated value of the dev_priv->gpu_error.
2646          */
2647         wake_up_all(&dev_priv->gpu_error.reset_queue);
2648 }
2649
2650 static inline void
2651 i915_err_print_instdone(struct drm_i915_private *dev_priv,
2652                         struct intel_instdone *instdone)
2653 {
2654         int slice;
2655         int subslice;
2656
2657         pr_err("  INSTDONE: 0x%08x\n", instdone->instdone);
2658
2659         if (INTEL_GEN(dev_priv) <= 3)
2660                 return;
2661
2662         pr_err("  SC_INSTDONE: 0x%08x\n", instdone->slice_common);
2663
2664         if (INTEL_GEN(dev_priv) <= 6)
2665                 return;
2666
2667         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2668                 pr_err("  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
2669                        slice, subslice, instdone->sampler[slice][subslice]);
2670
2671         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2672                 pr_err("  ROW_INSTDONE[%d][%d]: 0x%08x\n",
2673                        slice, subslice, instdone->row[slice][subslice]);
2674 }
2675
2676 static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2677 {
2678         u32 eir;
2679
2680         if (!IS_GEN2(dev_priv))
2681                 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2682
2683         if (INTEL_GEN(dev_priv) < 4)
2684                 I915_WRITE(IPEIR, I915_READ(IPEIR));
2685         else
2686                 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2687
2688         I915_WRITE(EIR, I915_READ(EIR));
2689         eir = I915_READ(EIR);
2690         if (eir) {
2691                 /*
2692                  * some errors might have become stuck,
2693                  * mask them.
2694                  */
2695                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2696                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2697                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2698         }
2699 }
2700
2701 /**
2702  * i915_handle_error - handle a gpu error
2703  * @dev_priv: i915 device private
2704  * @engine_mask: mask representing engines that are hung
2705  * Do some basic checking of register state at error time and
2706  * dump it to the syslog.  Also call i915_capture_error_state() to make
2707  * sure we get a record and make it available in debugfs.  Fire a uevent
2708  * so userspace knows something bad happened (should trigger collection
2709  * of a ring dump etc.).
2710  * @fmt: Error message format string
2711  */
2712 void i915_handle_error(struct drm_i915_private *dev_priv,
2713                        u32 engine_mask,
2714                        const char *fmt, ...)
2715 {
2716         va_list args;
2717         char error_msg[80];
2718
2719         va_start(args, fmt);
2720         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2721         va_end(args);
2722
2723         i915_capture_error_state(dev_priv, engine_mask, error_msg);
2724         i915_clear_error_registers(dev_priv);
2725
2726         if (!engine_mask)
2727                 return;
2728
2729         if (test_and_set_bit(I915_RESET_IN_PROGRESS,
2730                              &dev_priv->gpu_error.flags))
2731                 return;
2732
2733         /*
2734          * Wakeup waiting processes so that the reset function
2735          * i915_reset_and_wakeup doesn't deadlock trying to grab
2736          * various locks. By bumping the reset counter first, the woken
2737          * processes will see a reset in progress and back off,
2738          * releasing their locks and then wait for the reset completion.
2739          * We must do this for _all_ gpu waiters that might hold locks
2740          * that the reset work needs to acquire.
2741          *
2742          * Note: The wake_up also provides a memory barrier to ensure that the
2743          * waiters see the updated value of the reset flags.
2744          */
2745         i915_error_wake_up(dev_priv);
2746
2747         i915_reset_and_wakeup(dev_priv);
2748 }
2749
2750 /* Called from drm generic code, passed 'crtc' which
2751  * we use as a pipe index
2752  */
2753 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2754 {
2755         struct drm_i915_private *dev_priv = to_i915(dev);
2756         unsigned long irqflags;
2757
2758         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2759         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2760         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2761
2762         return 0;
2763 }
2764
2765 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2766 {
2767         struct drm_i915_private *dev_priv = to_i915(dev);
2768         unsigned long irqflags;
2769
2770         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2771         i915_enable_pipestat(dev_priv, pipe,
2772                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2773         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2774
2775         return 0;
2776 }
2777
2778 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2779 {
2780         struct drm_i915_private *dev_priv = to_i915(dev);
2781         unsigned long irqflags;
2782         uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2783                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2784
2785         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2786         ilk_enable_display_irq(dev_priv, bit);
2787         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2788
2789         return 0;
2790 }
2791
2792 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2793 {
2794         struct drm_i915_private *dev_priv = to_i915(dev);
2795         unsigned long irqflags;
2796
2797         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2799         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2800
2801         return 0;
2802 }
2803
2804 /* Called from drm generic code, passed 'crtc' which
2805  * we use as a pipe index
2806  */
2807 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2808 {
2809         struct drm_i915_private *dev_priv = to_i915(dev);
2810         unsigned long irqflags;
2811
2812         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2813         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2814         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2815 }
2816
2817 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2818 {
2819         struct drm_i915_private *dev_priv = to_i915(dev);
2820         unsigned long irqflags;
2821
2822         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2823         i915_disable_pipestat(dev_priv, pipe,
2824                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2825         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2826 }
2827
2828 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2829 {
2830         struct drm_i915_private *dev_priv = to_i915(dev);
2831         unsigned long irqflags;
2832         uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2833                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2834
2835         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2836         ilk_disable_display_irq(dev_priv, bit);
2837         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2838 }
2839
2840 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2841 {
2842         struct drm_i915_private *dev_priv = to_i915(dev);
2843         unsigned long irqflags;
2844
2845         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2846         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2847         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2848 }
2849
2850 static bool
2851 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2852 {
2853         if (INTEL_GEN(engine->i915) >= 8) {
2854                 return (ipehr >> 23) == 0x1c;
2855         } else {
2856                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2857                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2858                                  MI_SEMAPHORE_REGISTER);
2859         }
2860 }
2861
2862 static struct intel_engine_cs *
2863 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2864                                  u64 offset)
2865 {
2866         struct drm_i915_private *dev_priv = engine->i915;
2867         struct intel_engine_cs *signaller;
2868         enum intel_engine_id id;
2869
2870         if (INTEL_GEN(dev_priv) >= 8) {
2871                 for_each_engine(signaller, dev_priv, id) {
2872                         if (engine == signaller)
2873                                 continue;
2874
2875                         if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
2876                                 return signaller;
2877                 }
2878         } else {
2879                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2880
2881                 for_each_engine(signaller, dev_priv, id) {
2882                         if(engine == signaller)
2883                                 continue;
2884
2885                         if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
2886                                 return signaller;
2887                 }
2888         }
2889
2890         DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
2891                          engine->name, ipehr, offset);
2892
2893         return ERR_PTR(-ENODEV);
2894 }
2895
2896 static struct intel_engine_cs *
2897 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2898 {
2899         struct drm_i915_private *dev_priv = engine->i915;
2900         void __iomem *vaddr;
2901         u32 cmd, ipehr, head;
2902         u64 offset = 0;
2903         int i, backwards;
2904
2905         /*
2906          * This function does not support execlist mode - any attempt to
2907          * proceed further into this function will result in a kernel panic
2908          * when dereferencing ring->buffer, which is not set up in execlist
2909          * mode.
2910          *
2911          * The correct way of doing it would be to derive the currently
2912          * executing ring buffer from the current context, which is derived
2913          * from the currently running request. Unfortunately, to get the
2914          * current request we would have to grab the struct_mutex before doing
2915          * anything else, which would be ill-advised since some other thread
2916          * might have grabbed it already and managed to hang itself, causing
2917          * the hang checker to deadlock.
2918          *
2919          * Therefore, this function does not support execlist mode in its
2920          * current form. Just return NULL and move on.
2921          */
2922         if (engine->buffer == NULL)
2923                 return NULL;
2924
2925         ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2926         if (!ipehr_is_semaphore_wait(engine, ipehr))
2927                 return NULL;
2928
2929         /*
2930          * HEAD is likely pointing to the dword after the actual command,
2931          * so scan backwards until we find the MBOX. But limit it to just 3
2932          * or 4 dwords depending on the semaphore wait command size.
2933          * Note that we don't care about ACTHD here since that might
2934          * point at at batch, and semaphores are always emitted into the
2935          * ringbuffer itself.
2936          */
2937         head = I915_READ_HEAD(engine) & HEAD_ADDR;
2938         backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2939         vaddr = (void __iomem *)engine->buffer->vaddr;
2940
2941         for (i = backwards; i; --i) {
2942                 /*
2943                  * Be paranoid and presume the hw has gone off into the wild -
2944                  * our ring is smaller than what the hardware (and hence
2945                  * HEAD_ADDR) allows. Also handles wrap-around.
2946                  */
2947                 head &= engine->buffer->size - 1;
2948
2949                 /* This here seems to blow up */
2950                 cmd = ioread32(vaddr + head);
2951                 if (cmd == ipehr)
2952                         break;
2953
2954                 head -= 4;
2955         }
2956
2957         if (!i)
2958                 return NULL;
2959
2960         *seqno = ioread32(vaddr + head + 4) + 1;
2961         if (INTEL_GEN(dev_priv) >= 8) {
2962                 offset = ioread32(vaddr + head + 12);
2963                 offset <<= 32;
2964                 offset |= ioread32(vaddr + head + 8);
2965         }
2966         return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2967 }
2968
2969 static int semaphore_passed(struct intel_engine_cs *engine)
2970 {
2971         struct drm_i915_private *dev_priv = engine->i915;
2972         struct intel_engine_cs *signaller;
2973         u32 seqno;
2974
2975         engine->hangcheck.deadlock++;
2976
2977         signaller = semaphore_waits_for(engine, &seqno);
2978         if (signaller == NULL)
2979                 return -1;
2980
2981         if (IS_ERR(signaller))
2982                 return 0;
2983
2984         /* Prevent pathological recursion due to driver bugs */
2985         if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2986                 return -1;
2987
2988         if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2989                 return 1;
2990
2991         /* cursory check for an unkickable deadlock */
2992         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2993             semaphore_passed(signaller) < 0)
2994                 return -1;
2995
2996         return 0;
2997 }
2998
2999 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3000 {
3001         struct intel_engine_cs *engine;
3002         enum intel_engine_id id;
3003
3004         for_each_engine(engine, dev_priv, id)
3005                 engine->hangcheck.deadlock = 0;
3006 }
3007
3008 static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
3009 {
3010         u32 tmp = current_instdone | *old_instdone;
3011         bool unchanged;
3012
3013         unchanged = tmp == *old_instdone;
3014         *old_instdone |= tmp;
3015
3016         return unchanged;
3017 }
3018
3019 static bool subunits_stuck(struct intel_engine_cs *engine)
3020 {
3021         struct drm_i915_private *dev_priv = engine->i915;
3022         struct intel_instdone instdone;
3023         struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
3024         bool stuck;
3025         int slice;
3026         int subslice;
3027
3028         if (engine->id != RCS)
3029                 return true;
3030
3031         intel_engine_get_instdone(engine, &instdone);
3032
3033         /* There might be unstable subunit states even when
3034          * actual head is not moving. Filter out the unstable ones by
3035          * accumulating the undone -> done transitions and only
3036          * consider those as progress.
3037          */
3038         stuck = instdone_unchanged(instdone.instdone,
3039                                    &accu_instdone->instdone);
3040         stuck &= instdone_unchanged(instdone.slice_common,
3041                                     &accu_instdone->slice_common);
3042
3043         for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
3044                 stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
3045                                             &accu_instdone->sampler[slice][subslice]);
3046                 stuck &= instdone_unchanged(instdone.row[slice][subslice],
3047                                             &accu_instdone->row[slice][subslice]);
3048         }
3049
3050         return stuck;
3051 }
3052
3053 static enum intel_engine_hangcheck_action
3054 head_stuck(struct intel_engine_cs *engine, u64 acthd)
3055 {
3056         if (acthd != engine->hangcheck.acthd) {
3057
3058                 /* Clear subunit states on head movement */
3059                 memset(&engine->hangcheck.instdone, 0,
3060                        sizeof(engine->hangcheck.instdone));
3061
3062                 return HANGCHECK_ACTIVE;
3063         }
3064
3065         if (!subunits_stuck(engine))
3066                 return HANGCHECK_ACTIVE;
3067
3068         return HANGCHECK_HUNG;
3069 }
3070
3071 static enum intel_engine_hangcheck_action
3072 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3073 {
3074         struct drm_i915_private *dev_priv = engine->i915;
3075         enum intel_engine_hangcheck_action ha;
3076         u32 tmp;
3077
3078         ha = head_stuck(engine, acthd);
3079         if (ha != HANGCHECK_HUNG)
3080                 return ha;
3081
3082         if (IS_GEN2(dev_priv))
3083                 return HANGCHECK_HUNG;
3084
3085         /* Is the chip hanging on a WAIT_FOR_EVENT?
3086          * If so we can simply poke the RB_WAIT bit
3087          * and break the hang. This should work on
3088          * all but the second generation chipsets.
3089          */
3090         tmp = I915_READ_CTL(engine);
3091         if (tmp & RING_WAIT) {
3092                 i915_handle_error(dev_priv, 0,
3093                                   "Kicking stuck wait on %s",
3094                                   engine->name);
3095                 I915_WRITE_CTL(engine, tmp);
3096                 return HANGCHECK_KICK;
3097         }
3098
3099         if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3100                 switch (semaphore_passed(engine)) {
3101                 default:
3102                         return HANGCHECK_HUNG;
3103                 case 1:
3104                         i915_handle_error(dev_priv, 0,
3105                                           "Kicking stuck semaphore on %s",
3106                                           engine->name);
3107                         I915_WRITE_CTL(engine, tmp);
3108                         return HANGCHECK_KICK;
3109                 case 0:
3110                         return HANGCHECK_WAIT;
3111                 }
3112         }
3113
3114         return HANGCHECK_HUNG;
3115 }
3116
3117 /*
3118  * This is called when the chip hasn't reported back with completed
3119  * batchbuffers in a long time. We keep track per ring seqno progress and
3120  * if there are no progress, hangcheck score for that ring is increased.
3121  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3122  * we kick the ring. If we see no progress on three subsequent calls
3123  * we assume chip is wedged and try to fix it by resetting the chip.
3124  */
3125 static void i915_hangcheck_elapsed(struct work_struct *work)
3126 {
3127         struct drm_i915_private *dev_priv =
3128                 container_of(work, typeof(*dev_priv),
3129                              gpu_error.hangcheck_work.work);
3130         struct intel_engine_cs *engine;
3131         enum intel_engine_id id;
3132         unsigned int hung = 0, stuck = 0;
3133         int busy_count = 0;
3134 #define BUSY 1
3135 #define KICK 5
3136 #define HUNG 20
3137 #define ACTIVE_DECAY 15
3138
3139         if (!i915.enable_hangcheck)
3140                 return;
3141
3142         if (!READ_ONCE(dev_priv->gt.awake))
3143                 return;
3144
3145         /* As enabling the GPU requires fairly extensive mmio access,
3146          * periodically arm the mmio checker to see if we are triggering
3147          * any invalid access.
3148          */
3149         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3150
3151         for_each_engine(engine, dev_priv, id) {
3152                 bool busy = intel_engine_has_waiter(engine);
3153                 u64 acthd;
3154                 u32 seqno;
3155                 u32 submit;
3156
3157                 semaphore_clear_deadlocks(dev_priv);
3158
3159                 /* We don't strictly need an irq-barrier here, as we are not
3160                  * serving an interrupt request, be paranoid in case the
3161                  * barrier has side-effects (such as preventing a broken
3162                  * cacheline snoop) and so be sure that we can see the seqno
3163                  * advance. If the seqno should stick, due to a stale
3164                  * cacheline, we would erroneously declare the GPU hung.
3165                  */
3166                 if (engine->irq_seqno_barrier)
3167                         engine->irq_seqno_barrier(engine);
3168
3169                 acthd = intel_engine_get_active_head(engine);
3170                 seqno = intel_engine_get_seqno(engine);
3171                 submit = READ_ONCE(engine->last_submitted_seqno);
3172
3173                 if (engine->hangcheck.seqno == seqno) {
3174                         if (i915_seqno_passed(seqno, submit)) {
3175                                 engine->hangcheck.action = HANGCHECK_IDLE;
3176                         } else {
3177                                 /* We always increment the hangcheck score
3178                                  * if the engine is busy and still processing
3179                                  * the same request, so that no single request
3180                                  * can run indefinitely (such as a chain of
3181                                  * batches). The only time we do not increment
3182                                  * the hangcheck score on this ring, if this
3183                                  * engine is in a legitimate wait for another
3184                                  * engine. In that case the waiting engine is a
3185                                  * victim and we want to be sure we catch the
3186                                  * right culprit. Then every time we do kick
3187                                  * the ring, add a small increment to the
3188                                  * score so that we can catch a batch that is
3189                                  * being repeatedly kicked and so responsible
3190                                  * for stalling the machine.
3191                                  */
3192                                 engine->hangcheck.action =
3193                                         engine_stuck(engine, acthd);
3194
3195                                 switch (engine->hangcheck.action) {
3196                                 case HANGCHECK_IDLE:
3197                                 case HANGCHECK_WAIT:
3198                                         break;
3199                                 case HANGCHECK_ACTIVE:
3200                                         engine->hangcheck.score += BUSY;
3201                                         break;
3202                                 case HANGCHECK_KICK:
3203                                         engine->hangcheck.score += KICK;
3204                                         break;
3205                                 case HANGCHECK_HUNG:
3206                                         engine->hangcheck.score += HUNG;
3207                                         break;
3208                                 }
3209                         }
3210
3211                         if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3212                                 hung |= intel_engine_flag(engine);
3213                                 if (engine->hangcheck.action != HANGCHECK_HUNG)
3214                                         stuck |= intel_engine_flag(engine);
3215                         }
3216                 } else {
3217                         engine->hangcheck.action = HANGCHECK_ACTIVE;
3218
3219                         /* Gradually reduce the count so that we catch DoS
3220                          * attempts across multiple batches.
3221                          */
3222                         if (engine->hangcheck.score > 0)
3223                                 engine->hangcheck.score -= ACTIVE_DECAY;
3224                         if (engine->hangcheck.score < 0)
3225                                 engine->hangcheck.score = 0;
3226
3227                         /* Clear head and subunit states on seqno movement */
3228                         acthd = 0;
3229
3230                         memset(&engine->hangcheck.instdone, 0,
3231                                sizeof(engine->hangcheck.instdone));
3232                 }
3233
3234                 engine->hangcheck.seqno = seqno;
3235                 engine->hangcheck.acthd = acthd;
3236                 busy_count += busy;
3237         }
3238
3239         if (hung) {
3240                 char msg[80];
3241                 unsigned int tmp;
3242                 int len;
3243
3244                 /* If some rings hung but others were still busy, only
3245                  * blame the hanging rings in the synopsis.
3246                  */
3247                 if (stuck != hung)
3248                         hung &= ~stuck;
3249                 len = scnprintf(msg, sizeof(msg),
3250                                 "%s on ", stuck == hung ? "No progress" : "Hang");
3251                 for_each_engine_masked(engine, dev_priv, hung, tmp)
3252                         len += scnprintf(msg + len, sizeof(msg) - len,
3253                                          "%s, ", engine->name);
3254                 msg[len-2] = '\0';
3255
3256                 return i915_handle_error(dev_priv, hung, msg);
3257         }
3258
3259         /* Reset timer in case GPU hangs without another request being added */
3260         if (busy_count)
3261                 i915_queue_hangcheck(dev_priv);
3262 }
3263
3264 static void ibx_irq_reset(struct drm_device *dev)
3265 {
3266         struct drm_i915_private *dev_priv = to_i915(dev);
3267
3268         if (HAS_PCH_NOP(dev_priv))
3269                 return;
3270
3271         GEN5_IRQ_RESET(SDE);
3272
3273         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3274                 I915_WRITE(SERR_INT, 0xffffffff);
3275 }
3276
3277 /*
3278  * SDEIER is also touched by the interrupt handler to work around missed PCH
3279  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3280  * instead we unconditionally enable all PCH interrupt sources here, but then
3281  * only unmask them as needed with SDEIMR.
3282  *
3283  * This function needs to be called before interrupts are enabled.
3284  */
3285 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3286 {
3287         struct drm_i915_private *dev_priv = to_i915(dev);
3288
3289         if (HAS_PCH_NOP(dev_priv))
3290                 return;
3291
3292         WARN_ON(I915_READ(SDEIER) != 0);
3293         I915_WRITE(SDEIER, 0xffffffff);
3294         POSTING_READ(SDEIER);
3295 }
3296
3297 static void gen5_gt_irq_reset(struct drm_device *dev)
3298 {
3299         struct drm_i915_private *dev_priv = to_i915(dev);
3300
3301         GEN5_IRQ_RESET(GT);
3302         if (INTEL_INFO(dev)->gen >= 6)
3303                 GEN5_IRQ_RESET(GEN6_PM);
3304 }
3305
3306 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3307 {
3308         enum pipe pipe;
3309
3310         if (IS_CHERRYVIEW(dev_priv))
3311                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3312         else
3313                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3314
3315         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3316         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3317
3318         for_each_pipe(dev_priv, pipe) {
3319                 I915_WRITE(PIPESTAT(pipe),
3320                            PIPE_FIFO_UNDERRUN_STATUS |
3321                            PIPESTAT_INT_STATUS_MASK);
3322                 dev_priv->pipestat_irq_mask[pipe] = 0;
3323         }
3324
3325         GEN5_IRQ_RESET(VLV_);
3326         dev_priv->irq_mask = ~0;
3327 }
3328
3329 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3330 {
3331         u32 pipestat_mask;
3332         u32 enable_mask;
3333         enum pipe pipe;
3334
3335         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3336                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3337
3338         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3339         for_each_pipe(dev_priv, pipe)
3340                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3341
3342         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3343                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3344                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3345         if (IS_CHERRYVIEW(dev_priv))
3346                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3347
3348         WARN_ON(dev_priv->irq_mask != ~0);
3349
3350         dev_priv->irq_mask = ~enable_mask;
3351
3352         GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3353 }
3354
3355 /* drm_dma.h hooks
3356 */
3357 static void ironlake_irq_reset(struct drm_device *dev)
3358 {
3359         struct drm_i915_private *dev_priv = to_i915(dev);
3360
3361         I915_WRITE(HWSTAM, 0xffffffff);
3362
3363         GEN5_IRQ_RESET(DE);
3364         if (IS_GEN7(dev_priv))
3365                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3366
3367         gen5_gt_irq_reset(dev);
3368
3369         ibx_irq_reset(dev);
3370 }
3371
3372 static void valleyview_irq_preinstall(struct drm_device *dev)
3373 {
3374         struct drm_i915_private *dev_priv = to_i915(dev);
3375
3376         I915_WRITE(VLV_MASTER_IER, 0);
3377         POSTING_READ(VLV_MASTER_IER);
3378
3379         gen5_gt_irq_reset(dev);
3380
3381         spin_lock_irq(&dev_priv->irq_lock);
3382         if (dev_priv->display_irqs_enabled)
3383                 vlv_display_irq_reset(dev_priv);
3384         spin_unlock_irq(&dev_priv->irq_lock);
3385 }
3386
3387 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3388 {
3389         GEN8_IRQ_RESET_NDX(GT, 0);
3390         GEN8_IRQ_RESET_NDX(GT, 1);
3391         GEN8_IRQ_RESET_NDX(GT, 2);
3392         GEN8_IRQ_RESET_NDX(GT, 3);
3393 }
3394
3395 static void gen8_irq_reset(struct drm_device *dev)
3396 {
3397         struct drm_i915_private *dev_priv = to_i915(dev);
3398         int pipe;
3399
3400         I915_WRITE(GEN8_MASTER_IRQ, 0);
3401         POSTING_READ(GEN8_MASTER_IRQ);
3402
3403         gen8_gt_irq_reset(dev_priv);
3404
3405         for_each_pipe(dev_priv, pipe)
3406                 if (intel_display_power_is_enabled(dev_priv,
3407                                                    POWER_DOMAIN_PIPE(pipe)))
3408                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3409
3410         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3411         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3412         GEN5_IRQ_RESET(GEN8_PCU_);
3413
3414         if (HAS_PCH_SPLIT(dev_priv))
3415                 ibx_irq_reset(dev);
3416 }
3417
3418 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3419                                      unsigned int pipe_mask)
3420 {
3421         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3422         enum pipe pipe;
3423
3424         spin_lock_irq(&dev_priv->irq_lock);
3425         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3426                 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3427                                   dev_priv->de_irq_mask[pipe],
3428                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3429         spin_unlock_irq(&dev_priv->irq_lock);
3430 }
3431
3432 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3433                                      unsigned int pipe_mask)
3434 {
3435         enum pipe pipe;
3436
3437         spin_lock_irq(&dev_priv->irq_lock);
3438         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3439                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3440         spin_unlock_irq(&dev_priv->irq_lock);
3441
3442         /* make sure we're done processing display irqs */
3443         synchronize_irq(dev_priv->drm.irq);
3444 }
3445
3446 static void cherryview_irq_preinstall(struct drm_device *dev)
3447 {
3448         struct drm_i915_private *dev_priv = to_i915(dev);
3449
3450         I915_WRITE(GEN8_MASTER_IRQ, 0);
3451         POSTING_READ(GEN8_MASTER_IRQ);
3452
3453         gen8_gt_irq_reset(dev_priv);
3454
3455         GEN5_IRQ_RESET(GEN8_PCU_);
3456
3457         spin_lock_irq(&dev_priv->irq_lock);
3458         if (dev_priv->display_irqs_enabled)
3459                 vlv_display_irq_reset(dev_priv);
3460         spin_unlock_irq(&dev_priv->irq_lock);
3461 }
3462
3463 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3464                                   const u32 hpd[HPD_NUM_PINS])
3465 {
3466         struct intel_encoder *encoder;
3467         u32 enabled_irqs = 0;
3468
3469         for_each_intel_encoder(&dev_priv->drm, encoder)
3470                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3471                         enabled_irqs |= hpd[encoder->hpd_pin];
3472
3473         return enabled_irqs;
3474 }
3475
3476 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3477 {
3478         u32 hotplug_irqs, hotplug, enabled_irqs;
3479
3480         if (HAS_PCH_IBX(dev_priv)) {
3481                 hotplug_irqs = SDE_HOTPLUG_MASK;
3482                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3483         } else {
3484                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3485                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3486         }
3487
3488         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3489
3490         /*
3491          * Enable digital hotplug on the PCH, and configure the DP short pulse
3492          * duration to 2ms (which is the minimum in the Display Port spec).
3493          * The pulse duration bits are reserved on LPT+.
3494          */
3495         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3496         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3497         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3498         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3499         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3500         /*
3501          * When CPU and PCH are on the same package, port A
3502          * HPD must be enabled in both north and south.
3503          */
3504         if (HAS_PCH_LPT_LP(dev_priv))
3505                 hotplug |= PORTA_HOTPLUG_ENABLE;
3506         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3507 }
3508
3509 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3510 {
3511         u32 hotplug_irqs, hotplug, enabled_irqs;
3512
3513         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3514         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3515
3516         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3517
3518         /* Enable digital hotplug on the PCH */
3519         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3520         hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3521                 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3522         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3523
3524         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3525         hotplug |= PORTE_HOTPLUG_ENABLE;
3526         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3527 }
3528
3529 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3530 {
3531         u32 hotplug_irqs, hotplug, enabled_irqs;
3532
3533         if (INTEL_GEN(dev_priv) >= 8) {
3534                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3535                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3536
3537                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3538         } else if (INTEL_GEN(dev_priv) >= 7) {
3539                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3540                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3541
3542                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3543         } else {
3544                 hotplug_irqs = DE_DP_A_HOTPLUG;
3545                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3546
3547                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3548         }
3549
3550         /*
3551          * Enable digital hotplug on the CPU, and configure the DP short pulse
3552          * duration to 2ms (which is the minimum in the Display Port spec)
3553          * The pulse duration bits are reserved on HSW+.
3554          */
3555         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3556         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3557         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3558         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3559
3560         ibx_hpd_irq_setup(dev_priv);
3561 }
3562
3563 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3564 {
3565         u32 hotplug_irqs, hotplug, enabled_irqs;
3566
3567         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3568         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3569
3570         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3571
3572         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3573         hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3574                 PORTA_HOTPLUG_ENABLE;
3575
3576         DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3577                       hotplug, enabled_irqs);
3578         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3579
3580         /*
3581          * For BXT invert bit has to be set based on AOB design
3582          * for HPD detection logic, update it based on VBT fields.
3583          */
3584
3585         if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3586             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3587                 hotplug |= BXT_DDIA_HPD_INVERT;
3588         if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3589             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3590                 hotplug |= BXT_DDIB_HPD_INVERT;
3591         if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3592             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3593                 hotplug |= BXT_DDIC_HPD_INVERT;
3594
3595         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3596 }
3597
3598 static void ibx_irq_postinstall(struct drm_device *dev)
3599 {
3600         struct drm_i915_private *dev_priv = to_i915(dev);
3601         u32 mask;
3602
3603         if (HAS_PCH_NOP(dev_priv))
3604                 return;
3605
3606         if (HAS_PCH_IBX(dev_priv))
3607                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3608         else
3609                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3610
3611         gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3612         I915_WRITE(SDEIMR, ~mask);
3613 }
3614
3615 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3616 {
3617         struct drm_i915_private *dev_priv = to_i915(dev);
3618         u32 pm_irqs, gt_irqs;
3619
3620         pm_irqs = gt_irqs = 0;
3621
3622         dev_priv->gt_irq_mask = ~0;
3623         if (HAS_L3_DPF(dev_priv)) {
3624                 /* L3 parity interrupt is always unmasked. */
3625                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3626                 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3627         }
3628
3629         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3630         if (IS_GEN5(dev_priv)) {
3631                 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3632         } else {
3633                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3634         }
3635
3636         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3637
3638         if (INTEL_INFO(dev)->gen >= 6) {
3639                 /*
3640                  * RPS interrupts will get enabled/disabled on demand when RPS
3641                  * itself is enabled/disabled.
3642                  */
3643                 if (HAS_VEBOX(dev_priv)) {
3644                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3645                         dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3646                 }
3647
3648                 dev_priv->pm_imr = 0xffffffff;
3649                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3650         }
3651 }
3652
3653 static int ironlake_irq_postinstall(struct drm_device *dev)
3654 {
3655         struct drm_i915_private *dev_priv = to_i915(dev);
3656         u32 display_mask, extra_mask;
3657
3658         if (INTEL_INFO(dev)->gen >= 7) {
3659                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3660                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3661                                 DE_PLANEB_FLIP_DONE_IVB |
3662                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3663                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3664                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3665                               DE_DP_A_HOTPLUG_IVB);
3666         } else {
3667                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3668                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3669                                 DE_AUX_CHANNEL_A |
3670                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3671                                 DE_POISON);
3672                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3673                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3674                               DE_DP_A_HOTPLUG);
3675         }
3676
3677         dev_priv->irq_mask = ~display_mask;
3678
3679         I915_WRITE(HWSTAM, 0xeffe);
3680
3681         ibx_irq_pre_postinstall(dev);
3682
3683         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3684
3685         gen5_gt_irq_postinstall(dev);
3686
3687         ibx_irq_postinstall(dev);
3688
3689         if (IS_IRONLAKE_M(dev_priv)) {
3690                 /* Enable PCU event interrupts
3691                  *
3692                  * spinlocking not required here for correctness since interrupt
3693                  * setup is guaranteed to run in single-threaded context. But we
3694                  * need it to make the assert_spin_locked happy. */
3695                 spin_lock_irq(&dev_priv->irq_lock);
3696                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3697                 spin_unlock_irq(&dev_priv->irq_lock);
3698         }
3699
3700         return 0;
3701 }
3702
3703 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3704 {
3705         assert_spin_locked(&dev_priv->irq_lock);
3706
3707         if (dev_priv->display_irqs_enabled)
3708                 return;
3709
3710         dev_priv->display_irqs_enabled = true;
3711
3712         if (intel_irqs_enabled(dev_priv)) {
3713                 vlv_display_irq_reset(dev_priv);
3714                 vlv_display_irq_postinstall(dev_priv);
3715         }
3716 }
3717
3718 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3719 {
3720         assert_spin_locked(&dev_priv->irq_lock);
3721
3722         if (!dev_priv->display_irqs_enabled)
3723                 return;
3724
3725         dev_priv->display_irqs_enabled = false;
3726
3727         if (intel_irqs_enabled(dev_priv))
3728                 vlv_display_irq_reset(dev_priv);
3729 }
3730
3731
3732 static int valleyview_irq_postinstall(struct drm_device *dev)
3733 {
3734         struct drm_i915_private *dev_priv = to_i915(dev);
3735
3736         gen5_gt_irq_postinstall(dev);
3737
3738         spin_lock_irq(&dev_priv->irq_lock);
3739         if (dev_priv->display_irqs_enabled)
3740                 vlv_display_irq_postinstall(dev_priv);
3741         spin_unlock_irq(&dev_priv->irq_lock);
3742
3743         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3744         POSTING_READ(VLV_MASTER_IER);
3745
3746         return 0;
3747 }
3748
3749 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3750 {
3751         /* These are interrupts we'll toggle with the ring mask register */
3752         uint32_t gt_interrupts[] = {
3753                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3754                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3755                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3756                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3757                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3758                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3759                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3760                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3761                 0,
3762                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3763                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3764                 };
3765
3766         if (HAS_L3_DPF(dev_priv))
3767                 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3768
3769         dev_priv->pm_ier = 0x0;
3770         dev_priv->pm_imr = ~dev_priv->pm_ier;
3771         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3772         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3773         /*
3774          * RPS interrupts will get enabled/disabled on demand when RPS itself
3775          * is enabled/disabled. Same wil be the case for GuC interrupts.
3776          */
3777         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3778         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3779 }
3780
3781 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3782 {
3783         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3784         uint32_t de_pipe_enables;
3785         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3786         u32 de_port_enables;
3787         u32 de_misc_masked = GEN8_DE_MISC_GSE;
3788         enum pipe pipe;
3789
3790         if (INTEL_INFO(dev_priv)->gen >= 9) {
3791                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3792                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3793                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3794                                   GEN9_AUX_CHANNEL_D;
3795                 if (IS_BROXTON(dev_priv))
3796                         de_port_masked |= BXT_DE_PORT_GMBUS;
3797         } else {
3798                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3799                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3800         }
3801
3802         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3803                                            GEN8_PIPE_FIFO_UNDERRUN;
3804
3805         de_port_enables = de_port_masked;
3806         if (IS_BROXTON(dev_priv))
3807                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3808         else if (IS_BROADWELL(dev_priv))
3809                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3810
3811         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3812         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3813         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3814
3815         for_each_pipe(dev_priv, pipe)
3816                 if (intel_display_power_is_enabled(dev_priv,
3817                                 POWER_DOMAIN_PIPE(pipe)))
3818                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3819                                           dev_priv->de_irq_mask[pipe],
3820                                           de_pipe_enables);
3821
3822         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3823         GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3824 }
3825
3826 static int gen8_irq_postinstall(struct drm_device *dev)
3827 {
3828         struct drm_i915_private *dev_priv = to_i915(dev);
3829
3830         if (HAS_PCH_SPLIT(dev_priv))
3831                 ibx_irq_pre_postinstall(dev);
3832
3833         gen8_gt_irq_postinstall(dev_priv);
3834         gen8_de_irq_postinstall(dev_priv);
3835
3836         if (HAS_PCH_SPLIT(dev_priv))
3837                 ibx_irq_postinstall(dev);
3838
3839         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3840         POSTING_READ(GEN8_MASTER_IRQ);
3841
3842         return 0;
3843 }
3844
3845 static int cherryview_irq_postinstall(struct drm_device *dev)
3846 {
3847         struct drm_i915_private *dev_priv = to_i915(dev);
3848
3849         gen8_gt_irq_postinstall(dev_priv);
3850
3851         spin_lock_irq(&dev_priv->irq_lock);
3852         if (dev_priv->display_irqs_enabled)
3853                 vlv_display_irq_postinstall(dev_priv);
3854         spin_unlock_irq(&dev_priv->irq_lock);
3855
3856         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3857         POSTING_READ(GEN8_MASTER_IRQ);
3858
3859         return 0;
3860 }
3861
3862 static void gen8_irq_uninstall(struct drm_device *dev)
3863 {
3864         struct drm_i915_private *dev_priv = to_i915(dev);
3865
3866         if (!dev_priv)
3867                 return;
3868
3869         gen8_irq_reset(dev);
3870 }
3871
3872 static void valleyview_irq_uninstall(struct drm_device *dev)
3873 {
3874         struct drm_i915_private *dev_priv = to_i915(dev);
3875
3876         if (!dev_priv)
3877                 return;
3878
3879         I915_WRITE(VLV_MASTER_IER, 0);
3880         POSTING_READ(VLV_MASTER_IER);
3881
3882         gen5_gt_irq_reset(dev);
3883
3884         I915_WRITE(HWSTAM, 0xffffffff);
3885
3886         spin_lock_irq(&dev_priv->irq_lock);
3887         if (dev_priv->display_irqs_enabled)
3888                 vlv_display_irq_reset(dev_priv);
3889         spin_unlock_irq(&dev_priv->irq_lock);
3890 }
3891
3892 static void cherryview_irq_uninstall(struct drm_device *dev)
3893 {
3894         struct drm_i915_private *dev_priv = to_i915(dev);
3895
3896         if (!dev_priv)
3897                 return;
3898
3899         I915_WRITE(GEN8_MASTER_IRQ, 0);
3900         POSTING_READ(GEN8_MASTER_IRQ);
3901
3902         gen8_gt_irq_reset(dev_priv);
3903
3904         GEN5_IRQ_RESET(GEN8_PCU_);
3905
3906         spin_lock_irq(&dev_priv->irq_lock);
3907         if (dev_priv->display_irqs_enabled)
3908                 vlv_display_irq_reset(dev_priv);
3909         spin_unlock_irq(&dev_priv->irq_lock);
3910 }
3911
3912 static void ironlake_irq_uninstall(struct drm_device *dev)
3913 {
3914         struct drm_i915_private *dev_priv = to_i915(dev);
3915
3916         if (!dev_priv)
3917                 return;
3918
3919         ironlake_irq_reset(dev);
3920 }
3921
3922 static void i8xx_irq_preinstall(struct drm_device * dev)
3923 {
3924         struct drm_i915_private *dev_priv = to_i915(dev);
3925         int pipe;
3926
3927         for_each_pipe(dev_priv, pipe)
3928                 I915_WRITE(PIPESTAT(pipe), 0);
3929         I915_WRITE16(IMR, 0xffff);
3930         I915_WRITE16(IER, 0x0);
3931         POSTING_READ16(IER);
3932 }
3933
3934 static int i8xx_irq_postinstall(struct drm_device *dev)
3935 {
3936         struct drm_i915_private *dev_priv = to_i915(dev);
3937
3938         I915_WRITE16(EMR,
3939                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3940
3941         /* Unmask the interrupts that we always want on. */
3942         dev_priv->irq_mask =
3943                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3944                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3945                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3946                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3947         I915_WRITE16(IMR, dev_priv->irq_mask);
3948
3949         I915_WRITE16(IER,
3950                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3951                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3952                      I915_USER_INTERRUPT);
3953         POSTING_READ16(IER);
3954
3955         /* Interrupt setup is already guaranteed to be single-threaded, this is
3956          * just to make the assert_spin_locked check happy. */
3957         spin_lock_irq(&dev_priv->irq_lock);
3958         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3959         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3960         spin_unlock_irq(&dev_priv->irq_lock);
3961
3962         return 0;
3963 }
3964
3965 /*
3966  * Returns true when a page flip has completed.
3967  */
3968 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3969                                int plane, int pipe, u32 iir)
3970 {
3971         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3972
3973         if (!intel_pipe_handle_vblank(dev_priv, pipe))
3974                 return false;
3975
3976         if ((iir & flip_pending) == 0)
3977                 goto check_page_flip;
3978
3979         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3980          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3981          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3982          * the flip is completed (no longer pending). Since this doesn't raise
3983          * an interrupt per se, we watch for the change at vblank.
3984          */
3985         if (I915_READ16(ISR) & flip_pending)
3986                 goto check_page_flip;
3987
3988         intel_finish_page_flip_cs(dev_priv, pipe);
3989         return true;
3990
3991 check_page_flip:
3992         intel_check_page_flip(dev_priv, pipe);
3993         return false;
3994 }
3995
3996 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3997 {
3998         struct drm_device *dev = arg;
3999         struct drm_i915_private *dev_priv = to_i915(dev);
4000         u16 iir, new_iir;
4001         u32 pipe_stats[2];
4002         int pipe;
4003         u16 flip_mask =
4004                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4005                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4006         irqreturn_t ret;
4007
4008         if (!intel_irqs_enabled(dev_priv))
4009                 return IRQ_NONE;
4010
4011         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4012         disable_rpm_wakeref_asserts(dev_priv);
4013
4014         ret = IRQ_NONE;
4015         iir = I915_READ16(IIR);
4016         if (iir == 0)
4017                 goto out;
4018
4019         while (iir & ~flip_mask) {
4020                 /* Can't rely on pipestat interrupt bit in iir as it might
4021                  * have been cleared after the pipestat interrupt was received.
4022                  * It doesn't set the bit in iir again, but it still produces
4023                  * interrupts (for non-MSI).
4024                  */
4025                 spin_lock(&dev_priv->irq_lock);
4026                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4027                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4028
4029                 for_each_pipe(dev_priv, pipe) {
4030                         i915_reg_t reg = PIPESTAT(pipe);
4031                         pipe_stats[pipe] = I915_READ(reg);
4032
4033                         /*
4034                          * Clear the PIPE*STAT regs before the IIR
4035                          */
4036                         if (pipe_stats[pipe] & 0x8000ffff)
4037                                 I915_WRITE(reg, pipe_stats[pipe]);
4038                 }
4039                 spin_unlock(&dev_priv->irq_lock);
4040
4041                 I915_WRITE16(IIR, iir & ~flip_mask);
4042                 new_iir = I915_READ16(IIR); /* Flush posted writes */
4043
4044                 if (iir & I915_USER_INTERRUPT)
4045                         notify_ring(dev_priv->engine[RCS]);
4046
4047                 for_each_pipe(dev_priv, pipe) {
4048                         int plane = pipe;
4049                         if (HAS_FBC(dev_priv))
4050                                 plane = !plane;
4051
4052                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4053                             i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4054                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4055
4056                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4057                                 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4058
4059                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4060                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4061                                                                     pipe);
4062                 }
4063
4064                 iir = new_iir;
4065         }
4066         ret = IRQ_HANDLED;
4067
4068 out:
4069         enable_rpm_wakeref_asserts(dev_priv);
4070
4071         return ret;
4072 }
4073
4074 static void i8xx_irq_uninstall(struct drm_device * dev)
4075 {
4076         struct drm_i915_private *dev_priv = to_i915(dev);
4077         int pipe;
4078
4079         for_each_pipe(dev_priv, pipe) {
4080                 /* Clear enable bits; then clear status bits */
4081                 I915_WRITE(PIPESTAT(pipe), 0);
4082                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4083         }
4084         I915_WRITE16(IMR, 0xffff);
4085         I915_WRITE16(IER, 0x0);
4086         I915_WRITE16(IIR, I915_READ16(IIR));
4087 }
4088
4089 static void i915_irq_preinstall(struct drm_device * dev)
4090 {
4091         struct drm_i915_private *dev_priv = to_i915(dev);
4092         int pipe;
4093
4094         if (I915_HAS_HOTPLUG(dev)) {
4095                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4096                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4097         }
4098
4099         I915_WRITE16(HWSTAM, 0xeffe);
4100         for_each_pipe(dev_priv, pipe)
4101                 I915_WRITE(PIPESTAT(pipe), 0);
4102         I915_WRITE(IMR, 0xffffffff);
4103         I915_WRITE(IER, 0x0);
4104         POSTING_READ(IER);
4105 }
4106
4107 static int i915_irq_postinstall(struct drm_device *dev)
4108 {
4109         struct drm_i915_private *dev_priv = to_i915(dev);
4110         u32 enable_mask;
4111
4112         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4113
4114         /* Unmask the interrupts that we always want on. */
4115         dev_priv->irq_mask =
4116                 ~(I915_ASLE_INTERRUPT |
4117                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4118                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4119                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4120                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4121
4122         enable_mask =
4123                 I915_ASLE_INTERRUPT |
4124                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4125                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4126                 I915_USER_INTERRUPT;
4127
4128         if (I915_HAS_HOTPLUG(dev)) {
4129                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4130                 POSTING_READ(PORT_HOTPLUG_EN);
4131
4132                 /* Enable in IER... */
4133                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4134                 /* and unmask in IMR */
4135                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4136         }
4137
4138         I915_WRITE(IMR, dev_priv->irq_mask);
4139         I915_WRITE(IER, enable_mask);
4140         POSTING_READ(IER);
4141
4142         i915_enable_asle_pipestat(dev_priv);
4143
4144         /* Interrupt setup is already guaranteed to be single-threaded, this is
4145          * just to make the assert_spin_locked check happy. */
4146         spin_lock_irq(&dev_priv->irq_lock);
4147         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4148         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4149         spin_unlock_irq(&dev_priv->irq_lock);
4150
4151         return 0;
4152 }
4153
4154 /*
4155  * Returns true when a page flip has completed.
4156  */
4157 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4158                                int plane, int pipe, u32 iir)
4159 {
4160         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4161
4162         if (!intel_pipe_handle_vblank(dev_priv, pipe))
4163                 return false;
4164
4165         if ((iir & flip_pending) == 0)
4166                 goto check_page_flip;
4167
4168         /* We detect FlipDone by looking for the change in PendingFlip from '1'
4169          * to '0' on the following vblank, i.e. IIR has the Pendingflip
4170          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4171          * the flip is completed (no longer pending). Since this doesn't raise
4172          * an interrupt per se, we watch for the change at vblank.
4173          */
4174         if (I915_READ(ISR) & flip_pending)
4175                 goto check_page_flip;
4176
4177         intel_finish_page_flip_cs(dev_priv, pipe);
4178         return true;
4179
4180 check_page_flip:
4181         intel_check_page_flip(dev_priv, pipe);
4182         return false;
4183 }
4184
4185 static irqreturn_t i915_irq_handler(int irq, void *arg)
4186 {
4187         struct drm_device *dev = arg;
4188         struct drm_i915_private *dev_priv = to_i915(dev);
4189         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4190         u32 flip_mask =
4191                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4192                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4193         int pipe, ret = IRQ_NONE;
4194
4195         if (!intel_irqs_enabled(dev_priv))
4196                 return IRQ_NONE;
4197
4198         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4199         disable_rpm_wakeref_asserts(dev_priv);
4200
4201         iir = I915_READ(IIR);
4202         do {
4203                 bool irq_received = (iir & ~flip_mask) != 0;
4204                 bool blc_event = false;
4205
4206                 /* Can't rely on pipestat interrupt bit in iir as it might
4207                  * have been cleared after the pipestat interrupt was received.
4208                  * It doesn't set the bit in iir again, but it still produces
4209                  * interrupts (for non-MSI).
4210                  */
4211                 spin_lock(&dev_priv->irq_lock);
4212                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4213                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4214
4215                 for_each_pipe(dev_priv, pipe) {
4216                         i915_reg_t reg = PIPESTAT(pipe);
4217                         pipe_stats[pipe] = I915_READ(reg);
4218
4219                         /* Clear the PIPE*STAT regs before the IIR */
4220                         if (pipe_stats[pipe] & 0x8000ffff) {
4221                                 I915_WRITE(reg, pipe_stats[pipe]);
4222                                 irq_received = true;
4223                         }
4224                 }
4225                 spin_unlock(&dev_priv->irq_lock);
4226
4227                 if (!irq_received)
4228                         break;
4229
4230                 /* Consume port.  Then clear IIR or we'll miss events */
4231                 if (I915_HAS_HOTPLUG(dev_priv) &&
4232                     iir & I915_DISPLAY_PORT_INTERRUPT) {
4233                         u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4234                         if (hotplug_status)
4235                                 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4236                 }
4237
4238                 I915_WRITE(IIR, iir & ~flip_mask);
4239                 new_iir = I915_READ(IIR); /* Flush posted writes */
4240
4241                 if (iir & I915_USER_INTERRUPT)
4242                         notify_ring(dev_priv->engine[RCS]);
4243
4244                 for_each_pipe(dev_priv, pipe) {
4245                         int plane = pipe;
4246                         if (HAS_FBC(dev_priv))
4247                                 plane = !plane;
4248
4249                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4250                             i915_handle_vblank(dev_priv, plane, pipe, iir))
4251                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4252
4253                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4254                                 blc_event = true;
4255
4256                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4257                                 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4258
4259                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4260                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4261                                                                     pipe);
4262                 }
4263
4264                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4265                         intel_opregion_asle_intr(dev_priv);
4266
4267                 /* With MSI, interrupts are only generated when iir
4268                  * transitions from zero to nonzero.  If another bit got
4269                  * set while we were handling the existing iir bits, then
4270                  * we would never get another interrupt.
4271                  *
4272                  * This is fine on non-MSI as well, as if we hit this path
4273                  * we avoid exiting the interrupt handler only to generate
4274                  * another one.
4275                  *
4276                  * Note that for MSI this could cause a stray interrupt report
4277                  * if an interrupt landed in the time between writing IIR and
4278                  * the posting read.  This should be rare enough to never
4279                  * trigger the 99% of 100,000 interrupts test for disabling
4280                  * stray interrupts.
4281                  */
4282                 ret = IRQ_HANDLED;
4283                 iir = new_iir;
4284         } while (iir & ~flip_mask);
4285
4286         enable_rpm_wakeref_asserts(dev_priv);
4287
4288         return ret;
4289 }
4290
4291 static void i915_irq_uninstall(struct drm_device * dev)
4292 {
4293         struct drm_i915_private *dev_priv = to_i915(dev);
4294         int pipe;
4295
4296         if (I915_HAS_HOTPLUG(dev)) {
4297                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4298                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4299         }
4300
4301         I915_WRITE16(HWSTAM, 0xffff);
4302         for_each_pipe(dev_priv, pipe) {
4303                 /* Clear enable bits; then clear status bits */
4304                 I915_WRITE(PIPESTAT(pipe), 0);
4305                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4306         }
4307         I915_WRITE(IMR, 0xffffffff);
4308         I915_WRITE(IER, 0x0);
4309
4310         I915_WRITE(IIR, I915_READ(IIR));
4311 }
4312
4313 static void i965_irq_preinstall(struct drm_device * dev)
4314 {
4315         struct drm_i915_private *dev_priv = to_i915(dev);
4316         int pipe;
4317
4318         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4319         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4320
4321         I915_WRITE(HWSTAM, 0xeffe);
4322         for_each_pipe(dev_priv, pipe)
4323                 I915_WRITE(PIPESTAT(pipe), 0);
4324         I915_WRITE(IMR, 0xffffffff);
4325         I915_WRITE(IER, 0x0);
4326         POSTING_READ(IER);
4327 }
4328
4329 static int i965_irq_postinstall(struct drm_device *dev)
4330 {
4331         struct drm_i915_private *dev_priv = to_i915(dev);
4332         u32 enable_mask;
4333         u32 error_mask;
4334
4335         /* Unmask the interrupts that we always want on. */
4336         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4337                                I915_DISPLAY_PORT_INTERRUPT |
4338                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4339                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4340                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4341                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4342                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4343
4344         enable_mask = ~dev_priv->irq_mask;
4345         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4346                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4347         enable_mask |= I915_USER_INTERRUPT;
4348
4349         if (IS_G4X(dev_priv))
4350                 enable_mask |= I915_BSD_USER_INTERRUPT;
4351
4352         /* Interrupt setup is already guaranteed to be single-threaded, this is
4353          * just to make the assert_spin_locked check happy. */
4354         spin_lock_irq(&dev_priv->irq_lock);
4355         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4356         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4357         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4358         spin_unlock_irq(&dev_priv->irq_lock);
4359
4360         /*
4361          * Enable some error detection, note the instruction error mask
4362          * bit is reserved, so we leave it masked.
4363          */
4364         if (IS_G4X(dev_priv)) {
4365                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4366                                GM45_ERROR_MEM_PRIV |
4367                                GM45_ERROR_CP_PRIV |
4368                                I915_ERROR_MEMORY_REFRESH);
4369         } else {
4370                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4371                                I915_ERROR_MEMORY_REFRESH);
4372         }
4373         I915_WRITE(EMR, error_mask);
4374
4375         I915_WRITE(IMR, dev_priv->irq_mask);
4376         I915_WRITE(IER, enable_mask);
4377         POSTING_READ(IER);
4378
4379         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4380         POSTING_READ(PORT_HOTPLUG_EN);
4381
4382         i915_enable_asle_pipestat(dev_priv);
4383
4384         return 0;
4385 }
4386
4387 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4388 {
4389         u32 hotplug_en;
4390
4391         assert_spin_locked(&dev_priv->irq_lock);
4392
4393         /* Note HDMI and DP share hotplug bits */
4394         /* enable bits are the same for all generations */
4395         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4396         /* Programming the CRT detection parameters tends
4397            to generate a spurious hotplug event about three
4398            seconds later.  So just do it once.
4399         */
4400         if (IS_G4X(dev_priv))
4401                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4402         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4403
4404         /* Ignore TV since it's buggy */
4405         i915_hotplug_interrupt_update_locked(dev_priv,
4406                                              HOTPLUG_INT_EN_MASK |
4407                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4408                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4409                                              hotplug_en);
4410 }
4411
4412 static irqreturn_t i965_irq_handler(int irq, void *arg)
4413 {
4414         struct drm_device *dev = arg;
4415         struct drm_i915_private *dev_priv = to_i915(dev);
4416         u32 iir, new_iir;
4417         u32 pipe_stats[I915_MAX_PIPES];
4418         int ret = IRQ_NONE, pipe;
4419         u32 flip_mask =
4420                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4421                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4422
4423         if (!intel_irqs_enabled(dev_priv))
4424                 return IRQ_NONE;
4425
4426         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4427         disable_rpm_wakeref_asserts(dev_priv);
4428
4429         iir = I915_READ(IIR);
4430
4431         for (;;) {
4432                 bool irq_received = (iir & ~flip_mask) != 0;
4433                 bool blc_event = false;
4434
4435                 /* Can't rely on pipestat interrupt bit in iir as it might
4436                  * have been cleared after the pipestat interrupt was received.
4437                  * It doesn't set the bit in iir again, but it still produces
4438                  * interrupts (for non-MSI).
4439                  */
4440                 spin_lock(&dev_priv->irq_lock);
4441                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4442                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4443
4444                 for_each_pipe(dev_priv, pipe) {
4445                         i915_reg_t reg = PIPESTAT(pipe);
4446                         pipe_stats[pipe] = I915_READ(reg);
4447
4448                         /*
4449                          * Clear the PIPE*STAT regs before the IIR
4450                          */
4451                         if (pipe_stats[pipe] & 0x8000ffff) {
4452                                 I915_WRITE(reg, pipe_stats[pipe]);
4453                                 irq_received = true;
4454                         }
4455                 }
4456                 spin_unlock(&dev_priv->irq_lock);
4457
4458                 if (!irq_received)
4459                         break;
4460
4461                 ret = IRQ_HANDLED;
4462
4463                 /* Consume port.  Then clear IIR or we'll miss events */
4464                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4465                         u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4466                         if (hotplug_status)
4467                                 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4468                 }
4469
4470                 I915_WRITE(IIR, iir & ~flip_mask);
4471                 new_iir = I915_READ(IIR); /* Flush posted writes */
4472
4473                 if (iir & I915_USER_INTERRUPT)
4474                         notify_ring(dev_priv->engine[RCS]);
4475                 if (iir & I915_BSD_USER_INTERRUPT)
4476                         notify_ring(dev_priv->engine[VCS]);
4477
4478                 for_each_pipe(dev_priv, pipe) {
4479                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4480                             i915_handle_vblank(dev_priv, pipe, pipe, iir))
4481                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4482
4483                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4484                                 blc_event = true;
4485
4486                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4487                                 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4488
4489                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4490                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4491                 }
4492
4493                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4494                         intel_opregion_asle_intr(dev_priv);
4495
4496                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4497                         gmbus_irq_handler(dev_priv);
4498
4499                 /* With MSI, interrupts are only generated when iir
4500                  * transitions from zero to nonzero.  If another bit got
4501                  * set while we were handling the existing iir bits, then
4502                  * we would never get another interrupt.
4503                  *
4504                  * This is fine on non-MSI as well, as if we hit this path
4505                  * we avoid exiting the interrupt handler only to generate
4506                  * another one.
4507                  *
4508                  * Note that for MSI this could cause a stray interrupt report
4509                  * if an interrupt landed in the time between writing IIR and
4510                  * the posting read.  This should be rare enough to never
4511                  * trigger the 99% of 100,000 interrupts test for disabling
4512                  * stray interrupts.
4513                  */
4514                 iir = new_iir;
4515         }
4516
4517         enable_rpm_wakeref_asserts(dev_priv);
4518
4519         return ret;
4520 }
4521
4522 static void i965_irq_uninstall(struct drm_device * dev)
4523 {
4524         struct drm_i915_private *dev_priv = to_i915(dev);
4525         int pipe;
4526
4527         if (!dev_priv)
4528                 return;
4529
4530         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4531         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4532
4533         I915_WRITE(HWSTAM, 0xffffffff);
4534         for_each_pipe(dev_priv, pipe)
4535                 I915_WRITE(PIPESTAT(pipe), 0);
4536         I915_WRITE(IMR, 0xffffffff);
4537         I915_WRITE(IER, 0x0);
4538
4539         for_each_pipe(dev_priv, pipe)
4540                 I915_WRITE(PIPESTAT(pipe),
4541                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4542         I915_WRITE(IIR, I915_READ(IIR));
4543 }
4544
4545 /**
4546  * intel_irq_init - initializes irq support
4547  * @dev_priv: i915 device instance
4548  *
4549  * This function initializes all the irq support including work items, timers
4550  * and all the vtables. It does not setup the interrupt itself though.
4551  */
4552 void intel_irq_init(struct drm_i915_private *dev_priv)
4553 {
4554         struct drm_device *dev = &dev_priv->drm;
4555
4556         intel_hpd_init_work(dev_priv);
4557
4558         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4559         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4560
4561         if (HAS_GUC_SCHED(dev))
4562                 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4563
4564         /* Let's track the enabled rps events */
4565         if (IS_VALLEYVIEW(dev_priv))
4566                 /* WaGsvRC0ResidencyMethod:vlv */
4567                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4568         else
4569                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4570
4571         dev_priv->rps.pm_intr_keep = 0;
4572
4573         /*
4574          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4575          * if GEN6_PM_UP_EI_EXPIRED is masked.
4576          *
4577          * TODO: verify if this can be reproduced on VLV,CHV.
4578          */
4579         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4580                 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4581
4582         if (INTEL_INFO(dev_priv)->gen >= 8)
4583                 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4584
4585         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4586                           i915_hangcheck_elapsed);
4587
4588         if (IS_GEN2(dev_priv)) {
4589                 /* Gen2 doesn't have a hardware frame counter */
4590                 dev->max_vblank_count = 0;
4591                 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4592         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4593                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4594                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4595         } else {
4596                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4597                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4598         }
4599
4600         /*
4601          * Opt out of the vblank disable timer on everything except gen2.
4602          * Gen2 doesn't have a hardware frame counter and so depends on
4603          * vblank interrupts to produce sane vblank seuquence numbers.
4604          */
4605         if (!IS_GEN2(dev_priv))
4606                 dev->vblank_disable_immediate = true;
4607
4608         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4609         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4610
4611         if (IS_CHERRYVIEW(dev_priv)) {
4612                 dev->driver->irq_handler = cherryview_irq_handler;
4613                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4614                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4615                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4616                 dev->driver->enable_vblank = i965_enable_vblank;
4617                 dev->driver->disable_vblank = i965_disable_vblank;
4618                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4619         } else if (IS_VALLEYVIEW(dev_priv)) {
4620                 dev->driver->irq_handler = valleyview_irq_handler;
4621                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4622                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4623                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4624                 dev->driver->enable_vblank = i965_enable_vblank;
4625                 dev->driver->disable_vblank = i965_disable_vblank;
4626                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4627         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4628                 dev->driver->irq_handler = gen8_irq_handler;
4629                 dev->driver->irq_preinstall = gen8_irq_reset;
4630                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4631                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4632                 dev->driver->enable_vblank = gen8_enable_vblank;
4633                 dev->driver->disable_vblank = gen8_disable_vblank;
4634                 if (IS_BROXTON(dev_priv))
4635                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4636                 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
4637                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4638                 else
4639                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4640         } else if (HAS_PCH_SPLIT(dev_priv)) {
4641                 dev->driver->irq_handler = ironlake_irq_handler;
4642                 dev->driver->irq_preinstall = ironlake_irq_reset;
4643                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4644                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4645                 dev->driver->enable_vblank = ironlake_enable_vblank;
4646                 dev->driver->disable_vblank = ironlake_disable_vblank;
4647                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4648         } else {
4649                 if (IS_GEN2(dev_priv)) {
4650                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4651                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4652                         dev->driver->irq_handler = i8xx_irq_handler;
4653                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4654                         dev->driver->enable_vblank = i8xx_enable_vblank;
4655                         dev->driver->disable_vblank = i8xx_disable_vblank;
4656                 } else if (IS_GEN3(dev_priv)) {
4657                         dev->driver->irq_preinstall = i915_irq_preinstall;
4658                         dev->driver->irq_postinstall = i915_irq_postinstall;
4659                         dev->driver->irq_uninstall = i915_irq_uninstall;
4660                         dev->driver->irq_handler = i915_irq_handler;
4661                         dev->driver->enable_vblank = i8xx_enable_vblank;
4662                         dev->driver->disable_vblank = i8xx_disable_vblank;
4663                 } else {
4664                         dev->driver->irq_preinstall = i965_irq_preinstall;
4665                         dev->driver->irq_postinstall = i965_irq_postinstall;
4666                         dev->driver->irq_uninstall = i965_irq_uninstall;
4667                         dev->driver->irq_handler = i965_irq_handler;
4668                         dev->driver->enable_vblank = i965_enable_vblank;
4669                         dev->driver->disable_vblank = i965_disable_vblank;
4670                 }
4671                 if (I915_HAS_HOTPLUG(dev_priv))
4672                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4673         }
4674 }
4675
4676 /**
4677  * intel_irq_install - enables the hardware interrupt
4678  * @dev_priv: i915 device instance
4679  *
4680  * This function enables the hardware interrupt handling, but leaves the hotplug
4681  * handling still disabled. It is called after intel_irq_init().
4682  *
4683  * In the driver load and resume code we need working interrupts in a few places
4684  * but don't want to deal with the hassle of concurrent probe and hotplug
4685  * workers. Hence the split into this two-stage approach.
4686  */
4687 int intel_irq_install(struct drm_i915_private *dev_priv)
4688 {
4689         /*
4690          * We enable some interrupt sources in our postinstall hooks, so mark
4691          * interrupts as enabled _before_ actually enabling them to avoid
4692          * special cases in our ordering checks.
4693          */
4694         dev_priv->pm.irqs_enabled = true;
4695
4696         return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4697 }
4698
4699 /**
4700  * intel_irq_uninstall - finilizes all irq handling
4701  * @dev_priv: i915 device instance
4702  *
4703  * This stops interrupt and hotplug handling and unregisters and frees all
4704  * resources acquired in the init functions.
4705  */
4706 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4707 {
4708         drm_irq_uninstall(&dev_priv->drm);
4709         intel_hpd_cancel_work(dev_priv);
4710         dev_priv->pm.irqs_enabled = false;
4711 }
4712
4713 /**
4714  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4715  * @dev_priv: i915 device instance
4716  *
4717  * This function is used to disable interrupts at runtime, both in the runtime
4718  * pm and the system suspend/resume code.
4719  */
4720 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4721 {
4722         dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4723         dev_priv->pm.irqs_enabled = false;
4724         synchronize_irq(dev_priv->drm.irq);
4725 }
4726
4727 /**
4728  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4729  * @dev_priv: i915 device instance
4730  *
4731  * This function is used to enable interrupts at runtime, both in the runtime
4732  * pm and the system suspend/resume code.
4733  */
4734 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4735 {
4736         dev_priv->pm.irqs_enabled = true;
4737         dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4738         dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4739 }