1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 /* For display hotplug interrupt */
42 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
44 if ((dev_priv->irq_mask & mask) != 0) {
45 dev_priv->irq_mask &= ~mask;
46 I915_WRITE(DEIMR, dev_priv->irq_mask);
52 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
54 if ((dev_priv->irq_mask & mask) != mask) {
55 dev_priv->irq_mask |= mask;
56 I915_WRITE(DEIMR, dev_priv->irq_mask);
62 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
64 if ((dev_priv->pipestat[pipe] & mask) != mask) {
65 u32 reg = PIPESTAT(pipe);
67 dev_priv->pipestat[pipe] |= mask;
68 /* Enable the interrupt, clear any pending status */
69 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
75 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
77 if ((dev_priv->pipestat[pipe] & mask) != 0) {
78 u32 reg = PIPESTAT(pipe);
80 dev_priv->pipestat[pipe] &= ~mask;
81 I915_WRITE(reg, dev_priv->pipestat[pipe]);
87 * intel_enable_asle - enable ASLE interrupt for OpRegion
89 void intel_enable_asle(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private;
92 unsigned long irqflags;
94 /* FIXME: opregion/asle for VLV */
95 if (IS_VALLEYVIEW(dev))
98 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
100 if (HAS_PCH_SPLIT(dev))
101 ironlake_enable_display_irq(dev_priv, DE_GSE);
103 i915_enable_pipestat(dev_priv, 1,
104 PIPE_LEGACY_BLC_EVENT_ENABLE);
105 if (INTEL_INFO(dev)->gen >= 4)
106 i915_enable_pipestat(dev_priv, 0,
107 PIPE_LEGACY_BLC_EVENT_ENABLE);
110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
114 * i915_pipe_enabled - check if a pipe is enabled
116 * @pipe: pipe to check
118 * Reading certain registers when the pipe is disabled can hang the chip.
119 * Use this routine to make sure the PLL is running and the pipe is active
120 * before reading such registers if unsure.
123 i915_pipe_enabled(struct drm_device *dev, int pipe)
125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
126 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
129 /* Called from drm generic code, passed a 'crtc', which
130 * we use as a pipe index
132 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
135 unsigned long high_frame;
136 unsigned long low_frame;
137 u32 high1, high2, low;
139 if (!i915_pipe_enabled(dev, pipe)) {
140 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
141 "pipe %c\n", pipe_name(pipe));
145 high_frame = PIPEFRAME(pipe);
146 low_frame = PIPEFRAMEPIXEL(pipe);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
155 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
156 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
157 } while (high1 != high2);
159 high1 >>= PIPE_FRAME_HIGH_SHIFT;
160 low >>= PIPE_FRAME_LOW_SHIFT;
161 return (high1 << 8) | low;
164 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
166 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
167 int reg = PIPE_FRMCOUNT_GM45(pipe);
169 if (!i915_pipe_enabled(dev, pipe)) {
170 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
171 "pipe %c\n", pipe_name(pipe));
175 return I915_READ(reg);
178 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
179 int *vpos, int *hpos)
181 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
182 u32 vbl = 0, position = 0;
183 int vbl_start, vbl_end, htotal, vtotal;
187 if (!i915_pipe_enabled(dev, pipe)) {
188 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
189 "pipe %c\n", pipe_name(pipe));
194 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
196 if (INTEL_INFO(dev)->gen >= 4) {
197 /* No obvious pixelcount register. Only query vertical
198 * scanout position from Display scan line register.
200 position = I915_READ(PIPEDSL(pipe));
202 /* Decode into vertical scanout position. Don't have
203 * horizontal scanout position.
205 *vpos = position & 0x1fff;
208 /* Have access to pixelcount since start of frame.
209 * We can split this into vertical and horizontal
212 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
214 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
215 *vpos = position / htotal;
216 *hpos = position - (*vpos * htotal);
219 /* Query vblank area. */
220 vbl = I915_READ(VBLANK(pipe));
222 /* Test position against vblank region. */
223 vbl_start = vbl & 0x1fff;
224 vbl_end = (vbl >> 16) & 0x1fff;
226 if ((*vpos < vbl_start) || (*vpos > vbl_end))
229 /* Inside "upper part" of vblank area? Apply corrective offset: */
230 if (in_vbl && (*vpos >= vbl_start))
231 *vpos = *vpos - vtotal;
233 /* Readouts valid? */
235 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
239 ret |= DRM_SCANOUTPOS_INVBL;
244 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
246 struct timeval *vblank_time,
249 struct drm_i915_private *dev_priv = dev->dev_private;
250 struct drm_crtc *crtc;
252 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
253 DRM_ERROR("Invalid crtc %d\n", pipe);
257 /* Get drm_crtc to timestamp: */
258 crtc = intel_get_crtc_for_pipe(dev, pipe);
260 DRM_ERROR("Invalid crtc %d\n", pipe);
264 if (!crtc->enabled) {
265 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
269 /* Helper routine in DRM core does all the work: */
270 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
276 * Handle hotplug events outside the interrupt handler proper.
278 static void i915_hotplug_work_func(struct work_struct *work)
280 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
282 struct drm_device *dev = dev_priv->dev;
283 struct drm_mode_config *mode_config = &dev->mode_config;
284 struct intel_encoder *encoder;
286 mutex_lock(&mode_config->mutex);
287 DRM_DEBUG_KMS("running encoder hotplug functions\n");
289 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
290 if (encoder->hot_plug)
291 encoder->hot_plug(encoder);
293 mutex_unlock(&mode_config->mutex);
295 /* Just fire off a uevent and let userspace tell us what to do */
296 drm_helper_hpd_irq_event(dev);
299 static void i915_handle_rps_change(struct drm_device *dev)
301 drm_i915_private_t *dev_priv = dev->dev_private;
302 u32 busy_up, busy_down, max_avg, min_avg;
303 u8 new_delay = dev_priv->cur_delay;
305 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
306 busy_up = I915_READ(RCPREVBSYTUPAVG);
307 busy_down = I915_READ(RCPREVBSYTDNAVG);
308 max_avg = I915_READ(RCBMAXAVG);
309 min_avg = I915_READ(RCBMINAVG);
311 /* Handle RCS change request from hw */
312 if (busy_up > max_avg) {
313 if (dev_priv->cur_delay != dev_priv->max_delay)
314 new_delay = dev_priv->cur_delay - 1;
315 if (new_delay < dev_priv->max_delay)
316 new_delay = dev_priv->max_delay;
317 } else if (busy_down < min_avg) {
318 if (dev_priv->cur_delay != dev_priv->min_delay)
319 new_delay = dev_priv->cur_delay + 1;
320 if (new_delay > dev_priv->min_delay)
321 new_delay = dev_priv->min_delay;
324 if (ironlake_set_drps(dev, new_delay))
325 dev_priv->cur_delay = new_delay;
330 static void notify_ring(struct drm_device *dev,
331 struct intel_ring_buffer *ring)
333 struct drm_i915_private *dev_priv = dev->dev_private;
335 if (ring->obj == NULL)
338 trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
340 wake_up_all(&ring->irq_queue);
341 if (i915_enable_hangcheck) {
342 dev_priv->hangcheck_count = 0;
343 mod_timer(&dev_priv->hangcheck_timer,
345 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
349 static void gen6_pm_rps_work(struct work_struct *work)
351 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
353 u8 new_delay = dev_priv->cur_delay;
356 spin_lock_irq(&dev_priv->rps_lock);
357 pm_iir = dev_priv->pm_iir;
358 dev_priv->pm_iir = 0;
359 pm_imr = I915_READ(GEN6_PMIMR);
360 I915_WRITE(GEN6_PMIMR, 0);
361 spin_unlock_irq(&dev_priv->rps_lock);
366 mutex_lock(&dev_priv->dev->struct_mutex);
367 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
368 if (dev_priv->cur_delay != dev_priv->max_delay)
369 new_delay = dev_priv->cur_delay + 1;
370 if (new_delay > dev_priv->max_delay)
371 new_delay = dev_priv->max_delay;
372 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
373 gen6_gt_force_wake_get(dev_priv);
374 if (dev_priv->cur_delay != dev_priv->min_delay)
375 new_delay = dev_priv->cur_delay - 1;
376 if (new_delay < dev_priv->min_delay) {
377 new_delay = dev_priv->min_delay;
378 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
379 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
380 ((new_delay << 16) & 0x3f0000));
382 /* Make sure we continue to get down interrupts
383 * until we hit the minimum frequency */
384 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
385 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
387 gen6_gt_force_wake_put(dev_priv);
390 gen6_set_rps(dev_priv->dev, new_delay);
391 dev_priv->cur_delay = new_delay;
394 * rps_lock not held here because clearing is non-destructive. There is
395 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
396 * by holding struct_mutex for the duration of the write.
398 mutex_unlock(&dev_priv->dev->struct_mutex);
401 static void snb_gt_irq_handler(struct drm_device *dev,
402 struct drm_i915_private *dev_priv,
406 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
407 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
408 notify_ring(dev, &dev_priv->ring[RCS]);
409 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
410 notify_ring(dev, &dev_priv->ring[VCS]);
411 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
412 notify_ring(dev, &dev_priv->ring[BCS]);
414 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
415 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
416 GT_RENDER_CS_ERROR_INTERRUPT)) {
417 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
418 i915_handle_error(dev, false);
422 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
428 * IIR bits should never already be set because IMR should
429 * prevent an interrupt from being shown in IIR. The warning
430 * displays a case where we've unsafely cleared
431 * dev_priv->pm_iir. Although missing an interrupt of the same
432 * type is not a problem, it displays a problem in the logic.
434 * The mask bit in IMR is cleared by rps_work.
437 spin_lock_irqsave(&dev_priv->rps_lock, flags);
438 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
439 dev_priv->pm_iir |= pm_iir;
440 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
441 POSTING_READ(GEN6_PMIMR);
442 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
444 queue_work(dev_priv->wq, &dev_priv->rps_work);
447 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
449 struct drm_device *dev = (struct drm_device *) arg;
450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
451 u32 iir, gt_iir, pm_iir;
452 irqreturn_t ret = IRQ_NONE;
453 unsigned long irqflags;
455 u32 pipe_stats[I915_MAX_PIPES];
460 atomic_inc(&dev_priv->irq_received);
462 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
463 PIPE_VBLANK_INTERRUPT_STATUS;
466 iir = I915_READ(VLV_IIR);
467 gt_iir = I915_READ(GTIIR);
468 pm_iir = I915_READ(GEN6_PMIIR);
470 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
475 snb_gt_irq_handler(dev, dev_priv, gt_iir);
477 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
478 for_each_pipe(pipe) {
479 int reg = PIPESTAT(pipe);
480 pipe_stats[pipe] = I915_READ(reg);
483 * Clear the PIPE*STAT regs before the IIR
485 if (pipe_stats[pipe] & 0x8000ffff) {
486 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
487 DRM_DEBUG_DRIVER("pipe %c underrun\n",
489 I915_WRITE(reg, pipe_stats[pipe]);
492 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
494 /* Consume port. Then clear IIR or we'll miss events */
495 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
496 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
498 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
500 if (hotplug_status & dev_priv->hotplug_supported_mask)
501 queue_work(dev_priv->wq,
502 &dev_priv->hotplug_work);
504 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
505 I915_READ(PORT_HOTPLUG_STAT);
509 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
510 drm_handle_vblank(dev, 0);
512 intel_finish_page_flip(dev, 0);
515 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
516 drm_handle_vblank(dev, 1);
518 intel_finish_page_flip(dev, 0);
521 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
524 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
525 gen6_queue_rps_work(dev_priv, pm_iir);
527 I915_WRITE(GTIIR, gt_iir);
528 I915_WRITE(GEN6_PMIIR, pm_iir);
529 I915_WRITE(VLV_IIR, iir);
536 static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
538 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
541 if (pch_iir & SDE_AUDIO_POWER_MASK)
542 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
543 (pch_iir & SDE_AUDIO_POWER_MASK) >>
544 SDE_AUDIO_POWER_SHIFT);
546 if (pch_iir & SDE_GMBUS)
547 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
549 if (pch_iir & SDE_AUDIO_HDCP_MASK)
550 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
552 if (pch_iir & SDE_AUDIO_TRANS_MASK)
553 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
555 if (pch_iir & SDE_POISON)
556 DRM_ERROR("PCH poison interrupt\n");
558 if (pch_iir & SDE_FDI_MASK)
560 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
562 I915_READ(FDI_RX_IIR(pipe)));
564 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
565 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
567 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
568 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
570 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
571 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
572 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
573 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
576 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
578 struct drm_device *dev = (struct drm_device *) arg;
579 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
580 u32 de_iir, gt_iir, de_ier, pm_iir;
581 irqreturn_t ret = IRQ_NONE;
584 atomic_inc(&dev_priv->irq_received);
586 /* disable master interrupt before clearing iir */
587 de_ier = I915_READ(DEIER);
588 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
590 gt_iir = I915_READ(GTIIR);
592 snb_gt_irq_handler(dev, dev_priv, gt_iir);
593 I915_WRITE(GTIIR, gt_iir);
597 de_iir = I915_READ(DEIIR);
599 if (de_iir & DE_GSE_IVB)
600 intel_opregion_gse_intr(dev);
602 for (i = 0; i < 3; i++) {
603 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
604 intel_prepare_page_flip(dev, i);
605 intel_finish_page_flip_plane(dev, i);
607 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
608 drm_handle_vblank(dev, i);
611 /* check event from PCH */
612 if (de_iir & DE_PCH_EVENT_IVB) {
613 u32 pch_iir = I915_READ(SDEIIR);
615 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
616 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
617 pch_irq_handler(dev, pch_iir);
619 /* clear PCH hotplug event before clear CPU irq */
620 I915_WRITE(SDEIIR, pch_iir);
623 I915_WRITE(DEIIR, de_iir);
627 pm_iir = I915_READ(GEN6_PMIIR);
629 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
630 gen6_queue_rps_work(dev_priv, pm_iir);
631 I915_WRITE(GEN6_PMIIR, pm_iir);
635 I915_WRITE(DEIER, de_ier);
641 static void ilk_gt_irq_handler(struct drm_device *dev,
642 struct drm_i915_private *dev_priv,
645 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
646 notify_ring(dev, &dev_priv->ring[RCS]);
647 if (gt_iir & GT_BSD_USER_INTERRUPT)
648 notify_ring(dev, &dev_priv->ring[VCS]);
651 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
653 struct drm_device *dev = (struct drm_device *) arg;
654 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
656 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
659 atomic_inc(&dev_priv->irq_received);
661 /* disable master interrupt before clearing iir */
662 de_ier = I915_READ(DEIER);
663 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
666 de_iir = I915_READ(DEIIR);
667 gt_iir = I915_READ(GTIIR);
668 pch_iir = I915_READ(SDEIIR);
669 pm_iir = I915_READ(GEN6_PMIIR);
671 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
672 (!IS_GEN6(dev) || pm_iir == 0))
675 if (HAS_PCH_CPT(dev))
676 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
678 hotplug_mask = SDE_HOTPLUG_MASK;
683 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
685 snb_gt_irq_handler(dev, dev_priv, gt_iir);
688 intel_opregion_gse_intr(dev);
690 if (de_iir & DE_PLANEA_FLIP_DONE) {
691 intel_prepare_page_flip(dev, 0);
692 intel_finish_page_flip_plane(dev, 0);
695 if (de_iir & DE_PLANEB_FLIP_DONE) {
696 intel_prepare_page_flip(dev, 1);
697 intel_finish_page_flip_plane(dev, 1);
700 if (de_iir & DE_PIPEA_VBLANK)
701 drm_handle_vblank(dev, 0);
703 if (de_iir & DE_PIPEB_VBLANK)
704 drm_handle_vblank(dev, 1);
706 /* check event from PCH */
707 if (de_iir & DE_PCH_EVENT) {
708 if (pch_iir & hotplug_mask)
709 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
710 pch_irq_handler(dev, pch_iir);
713 if (de_iir & DE_PCU_EVENT) {
714 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
715 i915_handle_rps_change(dev);
718 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
719 gen6_queue_rps_work(dev_priv, pm_iir);
721 /* should clear PCH hotplug event before clear CPU irq */
722 I915_WRITE(SDEIIR, pch_iir);
723 I915_WRITE(GTIIR, gt_iir);
724 I915_WRITE(DEIIR, de_iir);
725 I915_WRITE(GEN6_PMIIR, pm_iir);
728 I915_WRITE(DEIER, de_ier);
735 * i915_error_work_func - do process context error handling work
738 * Fire an error uevent so userspace can see that a hang or error
741 static void i915_error_work_func(struct work_struct *work)
743 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
745 struct drm_device *dev = dev_priv->dev;
746 char *error_event[] = { "ERROR=1", NULL };
747 char *reset_event[] = { "RESET=1", NULL };
748 char *reset_done_event[] = { "ERROR=0", NULL };
750 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
752 if (atomic_read(&dev_priv->mm.wedged)) {
753 DRM_DEBUG_DRIVER("resetting chip\n");
754 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
755 if (!i915_reset(dev)) {
756 atomic_set(&dev_priv->mm.wedged, 0);
757 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
759 complete_all(&dev_priv->error_completion);
763 #ifdef CONFIG_DEBUG_FS
764 static struct drm_i915_error_object *
765 i915_error_object_create(struct drm_i915_private *dev_priv,
766 struct drm_i915_gem_object *src)
768 struct drm_i915_error_object *dst;
769 int page, page_count;
772 if (src == NULL || src->pages == NULL)
775 page_count = src->base.size / PAGE_SIZE;
777 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
781 reloc_offset = src->gtt_offset;
782 for (page = 0; page < page_count; page++) {
786 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
790 local_irq_save(flags);
791 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
792 src->has_global_gtt_mapping) {
795 /* Simply ignore tiling or any overlapping fence.
796 * It's part of the error state, and this hopefully
797 * captures what the GPU read.
800 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
802 memcpy_fromio(d, s, PAGE_SIZE);
803 io_mapping_unmap_atomic(s);
807 drm_clflush_pages(&src->pages[page], 1);
809 s = kmap_atomic(src->pages[page]);
810 memcpy(d, s, PAGE_SIZE);
813 drm_clflush_pages(&src->pages[page], 1);
815 local_irq_restore(flags);
817 dst->pages[page] = d;
819 reloc_offset += PAGE_SIZE;
821 dst->page_count = page_count;
822 dst->gtt_offset = src->gtt_offset;
828 kfree(dst->pages[page]);
834 i915_error_object_free(struct drm_i915_error_object *obj)
841 for (page = 0; page < obj->page_count; page++)
842 kfree(obj->pages[page]);
848 i915_error_state_free(struct kref *error_ref)
850 struct drm_i915_error_state *error = container_of(error_ref,
851 typeof(*error), ref);
854 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
855 i915_error_object_free(error->ring[i].batchbuffer);
856 i915_error_object_free(error->ring[i].ringbuffer);
857 kfree(error->ring[i].requests);
860 kfree(error->active_bo);
861 kfree(error->overlay);
864 static void capture_bo(struct drm_i915_error_buffer *err,
865 struct drm_i915_gem_object *obj)
867 err->size = obj->base.size;
868 err->name = obj->base.name;
869 err->seqno = obj->last_rendering_seqno;
870 err->gtt_offset = obj->gtt_offset;
871 err->read_domains = obj->base.read_domains;
872 err->write_domain = obj->base.write_domain;
873 err->fence_reg = obj->fence_reg;
875 if (obj->pin_count > 0)
877 if (obj->user_pin_count > 0)
879 err->tiling = obj->tiling_mode;
880 err->dirty = obj->dirty;
881 err->purgeable = obj->madv != I915_MADV_WILLNEED;
882 err->ring = obj->ring ? obj->ring->id : -1;
883 err->cache_level = obj->cache_level;
886 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
887 int count, struct list_head *head)
889 struct drm_i915_gem_object *obj;
892 list_for_each_entry(obj, head, mm_list) {
893 capture_bo(err++, obj);
901 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
902 int count, struct list_head *head)
904 struct drm_i915_gem_object *obj;
907 list_for_each_entry(obj, head, gtt_list) {
908 if (obj->pin_count == 0)
911 capture_bo(err++, obj);
919 static void i915_gem_record_fences(struct drm_device *dev,
920 struct drm_i915_error_state *error)
922 struct drm_i915_private *dev_priv = dev->dev_private;
926 switch (INTEL_INFO(dev)->gen) {
929 for (i = 0; i < 16; i++)
930 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
934 for (i = 0; i < 16; i++)
935 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
938 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
939 for (i = 0; i < 8; i++)
940 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
942 for (i = 0; i < 8; i++)
943 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
949 static struct drm_i915_error_object *
950 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
951 struct intel_ring_buffer *ring)
953 struct drm_i915_gem_object *obj;
956 if (!ring->get_seqno)
959 seqno = ring->get_seqno(ring);
960 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
961 if (obj->ring != ring)
964 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
967 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
970 /* We need to copy these to an anonymous buffer as the simplest
971 * method to avoid being overwritten by userspace.
973 return i915_error_object_create(dev_priv, obj);
979 static void i915_record_ring_state(struct drm_device *dev,
980 struct drm_i915_error_state *error,
981 struct intel_ring_buffer *ring)
983 struct drm_i915_private *dev_priv = dev->dev_private;
985 if (INTEL_INFO(dev)->gen >= 6) {
986 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
987 error->semaphore_mboxes[ring->id][0]
988 = I915_READ(RING_SYNC_0(ring->mmio_base));
989 error->semaphore_mboxes[ring->id][1]
990 = I915_READ(RING_SYNC_1(ring->mmio_base));
993 if (INTEL_INFO(dev)->gen >= 4) {
994 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
995 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
996 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
997 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
998 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
999 if (ring->id == RCS) {
1000 error->instdone1 = I915_READ(INSTDONE1);
1001 error->bbaddr = I915_READ64(BB_ADDR);
1004 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1005 error->ipeir[ring->id] = I915_READ(IPEIR);
1006 error->ipehr[ring->id] = I915_READ(IPEHR);
1007 error->instdone[ring->id] = I915_READ(INSTDONE);
1010 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1011 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1012 error->seqno[ring->id] = ring->get_seqno(ring);
1013 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1014 error->head[ring->id] = I915_READ_HEAD(ring);
1015 error->tail[ring->id] = I915_READ_TAIL(ring);
1017 error->cpu_ring_head[ring->id] = ring->head;
1018 error->cpu_ring_tail[ring->id] = ring->tail;
1021 static void i915_gem_record_rings(struct drm_device *dev,
1022 struct drm_i915_error_state *error)
1024 struct drm_i915_private *dev_priv = dev->dev_private;
1025 struct intel_ring_buffer *ring;
1026 struct drm_i915_gem_request *request;
1029 for_each_ring(ring, dev_priv, i) {
1030 i915_record_ring_state(dev, error, ring);
1032 error->ring[i].batchbuffer =
1033 i915_error_first_batchbuffer(dev_priv, ring);
1035 error->ring[i].ringbuffer =
1036 i915_error_object_create(dev_priv, ring->obj);
1039 list_for_each_entry(request, &ring->request_list, list)
1042 error->ring[i].num_requests = count;
1043 error->ring[i].requests =
1044 kmalloc(count*sizeof(struct drm_i915_error_request),
1046 if (error->ring[i].requests == NULL) {
1047 error->ring[i].num_requests = 0;
1052 list_for_each_entry(request, &ring->request_list, list) {
1053 struct drm_i915_error_request *erq;
1055 erq = &error->ring[i].requests[count++];
1056 erq->seqno = request->seqno;
1057 erq->jiffies = request->emitted_jiffies;
1058 erq->tail = request->tail;
1064 * i915_capture_error_state - capture an error record for later analysis
1067 * Should be called when an error is detected (either a hang or an error
1068 * interrupt) to capture error state from the time of the error. Fills
1069 * out a structure which becomes available in debugfs for user level tools
1072 static void i915_capture_error_state(struct drm_device *dev)
1074 struct drm_i915_private *dev_priv = dev->dev_private;
1075 struct drm_i915_gem_object *obj;
1076 struct drm_i915_error_state *error;
1077 unsigned long flags;
1080 spin_lock_irqsave(&dev_priv->error_lock, flags);
1081 error = dev_priv->first_error;
1082 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1086 /* Account for pipe specific data like PIPE*STAT */
1087 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1089 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1093 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1094 dev->primary->index);
1096 kref_init(&error->ref);
1097 error->eir = I915_READ(EIR);
1098 error->pgtbl_er = I915_READ(PGTBL_ER);
1100 if (HAS_PCH_SPLIT(dev))
1101 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1102 else if (IS_VALLEYVIEW(dev))
1103 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1104 else if (IS_GEN2(dev))
1105 error->ier = I915_READ16(IER);
1107 error->ier = I915_READ(IER);
1110 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1112 if (INTEL_INFO(dev)->gen >= 6) {
1113 error->error = I915_READ(ERROR_GEN6);
1114 error->done_reg = I915_READ(DONE_REG);
1117 i915_gem_record_fences(dev, error);
1118 i915_gem_record_rings(dev, error);
1120 /* Record buffers on the active and pinned lists. */
1121 error->active_bo = NULL;
1122 error->pinned_bo = NULL;
1125 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1127 error->active_bo_count = i;
1128 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
1131 error->pinned_bo_count = i - error->active_bo_count;
1133 error->active_bo = NULL;
1134 error->pinned_bo = NULL;
1136 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1138 if (error->active_bo)
1140 error->active_bo + error->active_bo_count;
1143 if (error->active_bo)
1144 error->active_bo_count =
1145 capture_active_bo(error->active_bo,
1146 error->active_bo_count,
1147 &dev_priv->mm.active_list);
1149 if (error->pinned_bo)
1150 error->pinned_bo_count =
1151 capture_pinned_bo(error->pinned_bo,
1152 error->pinned_bo_count,
1153 &dev_priv->mm.gtt_list);
1155 do_gettimeofday(&error->time);
1157 error->overlay = intel_overlay_capture_error_state(dev);
1158 error->display = intel_display_capture_error_state(dev);
1160 spin_lock_irqsave(&dev_priv->error_lock, flags);
1161 if (dev_priv->first_error == NULL) {
1162 dev_priv->first_error = error;
1165 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1168 i915_error_state_free(&error->ref);
1171 void i915_destroy_error_state(struct drm_device *dev)
1173 struct drm_i915_private *dev_priv = dev->dev_private;
1174 struct drm_i915_error_state *error;
1175 unsigned long flags;
1177 spin_lock_irqsave(&dev_priv->error_lock, flags);
1178 error = dev_priv->first_error;
1179 dev_priv->first_error = NULL;
1180 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1183 kref_put(&error->ref, i915_error_state_free);
1186 #define i915_capture_error_state(x)
1189 static void i915_report_and_clear_eir(struct drm_device *dev)
1191 struct drm_i915_private *dev_priv = dev->dev_private;
1192 u32 eir = I915_READ(EIR);
1198 pr_err("render error detected, EIR: 0x%08x\n", eir);
1201 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1202 u32 ipeir = I915_READ(IPEIR_I965);
1204 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1205 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1206 pr_err(" INSTDONE: 0x%08x\n",
1207 I915_READ(INSTDONE_I965));
1208 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1209 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1210 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1211 I915_WRITE(IPEIR_I965, ipeir);
1212 POSTING_READ(IPEIR_I965);
1214 if (eir & GM45_ERROR_PAGE_TABLE) {
1215 u32 pgtbl_err = I915_READ(PGTBL_ER);
1216 pr_err("page table error\n");
1217 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1218 I915_WRITE(PGTBL_ER, pgtbl_err);
1219 POSTING_READ(PGTBL_ER);
1223 if (!IS_GEN2(dev)) {
1224 if (eir & I915_ERROR_PAGE_TABLE) {
1225 u32 pgtbl_err = I915_READ(PGTBL_ER);
1226 pr_err("page table error\n");
1227 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1228 I915_WRITE(PGTBL_ER, pgtbl_err);
1229 POSTING_READ(PGTBL_ER);
1233 if (eir & I915_ERROR_MEMORY_REFRESH) {
1234 pr_err("memory refresh error:\n");
1236 pr_err("pipe %c stat: 0x%08x\n",
1237 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1238 /* pipestat has already been acked */
1240 if (eir & I915_ERROR_INSTRUCTION) {
1241 pr_err("instruction error\n");
1242 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1243 if (INTEL_INFO(dev)->gen < 4) {
1244 u32 ipeir = I915_READ(IPEIR);
1246 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1247 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1248 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1249 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1250 I915_WRITE(IPEIR, ipeir);
1251 POSTING_READ(IPEIR);
1253 u32 ipeir = I915_READ(IPEIR_I965);
1255 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1256 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1257 pr_err(" INSTDONE: 0x%08x\n",
1258 I915_READ(INSTDONE_I965));
1259 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1260 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1261 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1262 I915_WRITE(IPEIR_I965, ipeir);
1263 POSTING_READ(IPEIR_I965);
1267 I915_WRITE(EIR, eir);
1269 eir = I915_READ(EIR);
1272 * some errors might have become stuck,
1275 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1276 I915_WRITE(EMR, I915_READ(EMR) | eir);
1277 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1282 * i915_handle_error - handle an error interrupt
1285 * Do some basic checking of regsiter state at error interrupt time and
1286 * dump it to the syslog. Also call i915_capture_error_state() to make
1287 * sure we get a record and make it available in debugfs. Fire a uevent
1288 * so userspace knows something bad happened (should trigger collection
1289 * of a ring dump etc.).
1291 void i915_handle_error(struct drm_device *dev, bool wedged)
1293 struct drm_i915_private *dev_priv = dev->dev_private;
1294 struct intel_ring_buffer *ring;
1297 i915_capture_error_state(dev);
1298 i915_report_and_clear_eir(dev);
1301 INIT_COMPLETION(dev_priv->error_completion);
1302 atomic_set(&dev_priv->mm.wedged, 1);
1305 * Wakeup waiting processes so they don't hang
1307 for_each_ring(ring, dev_priv, i)
1308 wake_up_all(&ring->irq_queue);
1311 queue_work(dev_priv->wq, &dev_priv->error_work);
1314 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1316 drm_i915_private_t *dev_priv = dev->dev_private;
1317 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1318 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1319 struct drm_i915_gem_object *obj;
1320 struct intel_unpin_work *work;
1321 unsigned long flags;
1322 bool stall_detected;
1324 /* Ignore early vblank irqs */
1325 if (intel_crtc == NULL)
1328 spin_lock_irqsave(&dev->event_lock, flags);
1329 work = intel_crtc->unpin_work;
1331 if (work == NULL || work->pending || !work->enable_stall_check) {
1332 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1333 spin_unlock_irqrestore(&dev->event_lock, flags);
1337 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1338 obj = work->pending_flip_obj;
1339 if (INTEL_INFO(dev)->gen >= 4) {
1340 int dspsurf = DSPSURF(intel_crtc->plane);
1341 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1344 int dspaddr = DSPADDR(intel_crtc->plane);
1345 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1346 crtc->y * crtc->fb->pitches[0] +
1347 crtc->x * crtc->fb->bits_per_pixel/8);
1350 spin_unlock_irqrestore(&dev->event_lock, flags);
1352 if (stall_detected) {
1353 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1354 intel_prepare_page_flip(dev, intel_crtc->plane);
1358 /* Called from drm generic code, passed 'crtc' which
1359 * we use as a pipe index
1361 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1363 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1364 unsigned long irqflags;
1366 if (!i915_pipe_enabled(dev, pipe))
1369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1370 if (INTEL_INFO(dev)->gen >= 4)
1371 i915_enable_pipestat(dev_priv, pipe,
1372 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1374 i915_enable_pipestat(dev_priv, pipe,
1375 PIPE_VBLANK_INTERRUPT_ENABLE);
1377 /* maintain vblank delivery even in deep C-states */
1378 if (dev_priv->info->gen == 3)
1379 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1380 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1385 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1387 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1388 unsigned long irqflags;
1390 if (!i915_pipe_enabled(dev, pipe))
1393 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1394 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1395 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1396 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1401 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1403 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1404 unsigned long irqflags;
1406 if (!i915_pipe_enabled(dev, pipe))
1409 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1410 ironlake_enable_display_irq(dev_priv,
1411 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1412 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1417 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1420 unsigned long irqflags;
1423 if (!i915_pipe_enabled(dev, pipe))
1426 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1427 dpfl = I915_READ(VLV_DPFLIPSTAT);
1428 imr = I915_READ(VLV_IMR);
1430 dpfl |= PIPEA_VBLANK_INT_EN;
1431 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1433 dpfl |= PIPEA_VBLANK_INT_EN;
1434 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1436 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1437 I915_WRITE(VLV_IMR, imr);
1438 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1443 /* Called from drm generic code, passed 'crtc' which
1444 * we use as a pipe index
1446 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1448 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1449 unsigned long irqflags;
1451 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1452 if (dev_priv->info->gen == 3)
1453 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1455 i915_disable_pipestat(dev_priv, pipe,
1456 PIPE_VBLANK_INTERRUPT_ENABLE |
1457 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1458 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1461 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1463 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1464 unsigned long irqflags;
1466 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1467 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1468 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1469 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1472 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1474 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1475 unsigned long irqflags;
1477 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1478 ironlake_disable_display_irq(dev_priv,
1479 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1480 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1483 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1486 unsigned long irqflags;
1489 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1490 dpfl = I915_READ(VLV_DPFLIPSTAT);
1491 imr = I915_READ(VLV_IMR);
1493 dpfl &= ~PIPEA_VBLANK_INT_EN;
1494 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1496 dpfl &= ~PIPEB_VBLANK_INT_EN;
1497 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1499 I915_WRITE(VLV_IMR, imr);
1500 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1505 ring_last_seqno(struct intel_ring_buffer *ring)
1507 return list_entry(ring->request_list.prev,
1508 struct drm_i915_gem_request, list)->seqno;
1511 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1513 if (list_empty(&ring->request_list) ||
1514 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1515 /* Issue a wake-up to catch stuck h/w. */
1516 if (waitqueue_active(&ring->irq_queue)) {
1517 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1519 wake_up_all(&ring->irq_queue);
1527 static bool kick_ring(struct intel_ring_buffer *ring)
1529 struct drm_device *dev = ring->dev;
1530 struct drm_i915_private *dev_priv = dev->dev_private;
1531 u32 tmp = I915_READ_CTL(ring);
1532 if (tmp & RING_WAIT) {
1533 DRM_ERROR("Kicking stuck wait on %s\n",
1535 I915_WRITE_CTL(ring, tmp);
1541 static bool i915_hangcheck_hung(struct drm_device *dev)
1543 drm_i915_private_t *dev_priv = dev->dev_private;
1545 if (dev_priv->hangcheck_count++ > 1) {
1548 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1549 i915_handle_error(dev, true);
1551 if (!IS_GEN2(dev)) {
1552 struct intel_ring_buffer *ring;
1555 /* Is the chip hanging on a WAIT_FOR_EVENT?
1556 * If so we can simply poke the RB_WAIT bit
1557 * and break the hang. This should work on
1558 * all but the second generation chipsets.
1560 for_each_ring(ring, dev_priv, i)
1561 hung &= !kick_ring(ring);
1571 * This is called when the chip hasn't reported back with completed
1572 * batchbuffers in a long time. The first time this is called we simply record
1573 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1574 * again, we assume the chip is wedged and try to fix it.
1576 void i915_hangcheck_elapsed(unsigned long data)
1578 struct drm_device *dev = (struct drm_device *)data;
1579 drm_i915_private_t *dev_priv = dev->dev_private;
1580 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
1581 struct intel_ring_buffer *ring;
1582 bool err = false, idle;
1585 if (!i915_enable_hangcheck)
1588 memset(acthd, 0, sizeof(acthd));
1590 for_each_ring(ring, dev_priv, i) {
1591 idle &= i915_hangcheck_ring_idle(ring, &err);
1592 acthd[i] = intel_ring_get_active_head(ring);
1595 /* If all work is done then ACTHD clearly hasn't advanced. */
1598 if (i915_hangcheck_hung(dev))
1604 dev_priv->hangcheck_count = 0;
1608 if (INTEL_INFO(dev)->gen < 4) {
1609 instdone = I915_READ(INSTDONE);
1612 instdone = I915_READ(INSTDONE_I965);
1613 instdone1 = I915_READ(INSTDONE1);
1616 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1617 dev_priv->last_instdone == instdone &&
1618 dev_priv->last_instdone1 == instdone1) {
1619 if (i915_hangcheck_hung(dev))
1622 dev_priv->hangcheck_count = 0;
1624 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1625 dev_priv->last_instdone = instdone;
1626 dev_priv->last_instdone1 = instdone1;
1630 /* Reset timer case chip hangs without another request being added */
1631 mod_timer(&dev_priv->hangcheck_timer,
1632 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1637 static void ironlake_irq_preinstall(struct drm_device *dev)
1639 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1641 atomic_set(&dev_priv->irq_received, 0);
1644 I915_WRITE(HWSTAM, 0xeffe);
1646 /* XXX hotplug from PCH */
1648 I915_WRITE(DEIMR, 0xffffffff);
1649 I915_WRITE(DEIER, 0x0);
1650 POSTING_READ(DEIER);
1653 I915_WRITE(GTIMR, 0xffffffff);
1654 I915_WRITE(GTIER, 0x0);
1655 POSTING_READ(GTIER);
1657 /* south display irq */
1658 I915_WRITE(SDEIMR, 0xffffffff);
1659 I915_WRITE(SDEIER, 0x0);
1660 POSTING_READ(SDEIER);
1663 static void valleyview_irq_preinstall(struct drm_device *dev)
1665 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1668 atomic_set(&dev_priv->irq_received, 0);
1671 I915_WRITE(VLV_IMR, 0);
1672 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1673 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1674 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1677 I915_WRITE(GTIIR, I915_READ(GTIIR));
1678 I915_WRITE(GTIIR, I915_READ(GTIIR));
1679 I915_WRITE(GTIMR, 0xffffffff);
1680 I915_WRITE(GTIER, 0x0);
1681 POSTING_READ(GTIER);
1683 I915_WRITE(DPINVGTT, 0xff);
1685 I915_WRITE(PORT_HOTPLUG_EN, 0);
1686 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1688 I915_WRITE(PIPESTAT(pipe), 0xffff);
1689 I915_WRITE(VLV_IIR, 0xffffffff);
1690 I915_WRITE(VLV_IMR, 0xffffffff);
1691 I915_WRITE(VLV_IER, 0x0);
1692 POSTING_READ(VLV_IER);
1696 * Enable digital hotplug on the PCH, and configure the DP short pulse
1697 * duration to 2ms (which is the minimum in the Display Port spec)
1699 * This register is the same on all known PCH chips.
1702 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1704 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1707 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1708 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1709 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1710 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1711 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1712 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1715 static int ironlake_irq_postinstall(struct drm_device *dev)
1717 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1718 /* enable kind of interrupts always enabled */
1719 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1720 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1724 dev_priv->irq_mask = ~display_mask;
1726 /* should always can generate irq */
1727 I915_WRITE(DEIIR, I915_READ(DEIIR));
1728 I915_WRITE(DEIMR, dev_priv->irq_mask);
1729 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1730 POSTING_READ(DEIER);
1732 dev_priv->gt_irq_mask = ~0;
1734 I915_WRITE(GTIIR, I915_READ(GTIIR));
1735 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1740 GEN6_BSD_USER_INTERRUPT |
1741 GEN6_BLITTER_USER_INTERRUPT;
1746 GT_BSD_USER_INTERRUPT;
1747 I915_WRITE(GTIER, render_irqs);
1748 POSTING_READ(GTIER);
1750 if (HAS_PCH_CPT(dev)) {
1751 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1752 SDE_PORTB_HOTPLUG_CPT |
1753 SDE_PORTC_HOTPLUG_CPT |
1754 SDE_PORTD_HOTPLUG_CPT);
1756 hotplug_mask = (SDE_CRT_HOTPLUG |
1763 dev_priv->pch_irq_mask = ~hotplug_mask;
1765 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1766 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1767 I915_WRITE(SDEIER, hotplug_mask);
1768 POSTING_READ(SDEIER);
1770 ironlake_enable_pch_hotplug(dev);
1772 if (IS_IRONLAKE_M(dev)) {
1773 /* Clear & enable PCU event interrupts */
1774 I915_WRITE(DEIIR, DE_PCU_EVENT);
1775 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1776 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1782 static int ivybridge_irq_postinstall(struct drm_device *dev)
1784 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1785 /* enable kind of interrupts always enabled */
1787 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1788 DE_PLANEC_FLIP_DONE_IVB |
1789 DE_PLANEB_FLIP_DONE_IVB |
1790 DE_PLANEA_FLIP_DONE_IVB;
1794 dev_priv->irq_mask = ~display_mask;
1796 /* should always can generate irq */
1797 I915_WRITE(DEIIR, I915_READ(DEIIR));
1798 I915_WRITE(DEIMR, dev_priv->irq_mask);
1801 DE_PIPEC_VBLANK_IVB |
1802 DE_PIPEB_VBLANK_IVB |
1803 DE_PIPEA_VBLANK_IVB);
1804 POSTING_READ(DEIER);
1806 dev_priv->gt_irq_mask = ~0;
1808 I915_WRITE(GTIIR, I915_READ(GTIIR));
1809 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1811 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1812 GEN6_BLITTER_USER_INTERRUPT;
1813 I915_WRITE(GTIER, render_irqs);
1814 POSTING_READ(GTIER);
1816 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1817 SDE_PORTB_HOTPLUG_CPT |
1818 SDE_PORTC_HOTPLUG_CPT |
1819 SDE_PORTD_HOTPLUG_CPT);
1820 dev_priv->pch_irq_mask = ~hotplug_mask;
1822 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1823 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1824 I915_WRITE(SDEIER, hotplug_mask);
1825 POSTING_READ(SDEIER);
1827 ironlake_enable_pch_hotplug(dev);
1832 static int valleyview_irq_postinstall(struct drm_device *dev)
1834 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1837 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1840 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1841 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1842 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1844 dev_priv->irq_mask = ~enable_mask;
1846 dev_priv->pipestat[0] = 0;
1847 dev_priv->pipestat[1] = 0;
1849 /* Hack for broken MSIs on VLV */
1850 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1851 pci_read_config_word(dev->pdev, 0x98, &msid);
1852 msid &= 0xff; /* mask out delivery bits */
1854 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1856 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1857 I915_WRITE(VLV_IER, enable_mask);
1858 I915_WRITE(VLV_IIR, 0xffffffff);
1859 I915_WRITE(PIPESTAT(0), 0xffff);
1860 I915_WRITE(PIPESTAT(1), 0xffff);
1861 POSTING_READ(VLV_IER);
1863 I915_WRITE(VLV_IIR, 0xffffffff);
1864 I915_WRITE(VLV_IIR, 0xffffffff);
1866 render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1867 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1868 GT_GEN6_BLT_USER_INTERRUPT |
1869 GT_GEN6_BSD_USER_INTERRUPT |
1870 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1871 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1873 GT_RENDER_CS_ERROR_INTERRUPT |
1877 dev_priv->gt_irq_mask = ~render_irqs;
1879 I915_WRITE(GTIIR, I915_READ(GTIIR));
1880 I915_WRITE(GTIIR, I915_READ(GTIIR));
1881 I915_WRITE(GTIMR, 0);
1882 I915_WRITE(GTIER, render_irqs);
1883 POSTING_READ(GTIER);
1885 /* ack & enable invalid PTE error interrupts */
1886 #if 0 /* FIXME: add support to irq handler for checking these bits */
1887 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1888 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1891 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1892 #if 0 /* FIXME: check register definitions; some have moved */
1893 /* Note HDMI and DP share bits */
1894 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1895 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1896 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1897 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1898 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1899 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1900 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1901 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1902 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1903 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1904 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1905 hotplug_en |= CRT_HOTPLUG_INT_EN;
1906 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1910 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1915 static void valleyview_irq_uninstall(struct drm_device *dev)
1917 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1924 I915_WRITE(PIPESTAT(pipe), 0xffff);
1926 I915_WRITE(HWSTAM, 0xffffffff);
1927 I915_WRITE(PORT_HOTPLUG_EN, 0);
1928 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1930 I915_WRITE(PIPESTAT(pipe), 0xffff);
1931 I915_WRITE(VLV_IIR, 0xffffffff);
1932 I915_WRITE(VLV_IMR, 0xffffffff);
1933 I915_WRITE(VLV_IER, 0x0);
1934 POSTING_READ(VLV_IER);
1937 static void ironlake_irq_uninstall(struct drm_device *dev)
1939 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1944 I915_WRITE(HWSTAM, 0xffffffff);
1946 I915_WRITE(DEIMR, 0xffffffff);
1947 I915_WRITE(DEIER, 0x0);
1948 I915_WRITE(DEIIR, I915_READ(DEIIR));
1950 I915_WRITE(GTIMR, 0xffffffff);
1951 I915_WRITE(GTIER, 0x0);
1952 I915_WRITE(GTIIR, I915_READ(GTIIR));
1954 I915_WRITE(SDEIMR, 0xffffffff);
1955 I915_WRITE(SDEIER, 0x0);
1956 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1959 static void i8xx_irq_preinstall(struct drm_device * dev)
1961 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1964 atomic_set(&dev_priv->irq_received, 0);
1967 I915_WRITE(PIPESTAT(pipe), 0);
1968 I915_WRITE16(IMR, 0xffff);
1969 I915_WRITE16(IER, 0x0);
1970 POSTING_READ16(IER);
1973 static int i8xx_irq_postinstall(struct drm_device *dev)
1975 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1977 dev_priv->pipestat[0] = 0;
1978 dev_priv->pipestat[1] = 0;
1981 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1983 /* Unmask the interrupts that we always want on. */
1984 dev_priv->irq_mask =
1985 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1986 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1987 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1988 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1989 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1990 I915_WRITE16(IMR, dev_priv->irq_mask);
1993 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1994 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1995 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1996 I915_USER_INTERRUPT);
1997 POSTING_READ16(IER);
2002 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2004 struct drm_device *dev = (struct drm_device *) arg;
2005 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2008 unsigned long irqflags;
2012 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2013 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2015 atomic_inc(&dev_priv->irq_received);
2017 iir = I915_READ16(IIR);
2021 while (iir & ~flip_mask) {
2022 /* Can't rely on pipestat interrupt bit in iir as it might
2023 * have been cleared after the pipestat interrupt was received.
2024 * It doesn't set the bit in iir again, but it still produces
2025 * interrupts (for non-MSI).
2027 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2028 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2029 i915_handle_error(dev, false);
2031 for_each_pipe(pipe) {
2032 int reg = PIPESTAT(pipe);
2033 pipe_stats[pipe] = I915_READ(reg);
2036 * Clear the PIPE*STAT regs before the IIR
2038 if (pipe_stats[pipe] & 0x8000ffff) {
2039 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2040 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2042 I915_WRITE(reg, pipe_stats[pipe]);
2046 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2048 I915_WRITE16(IIR, iir & ~flip_mask);
2049 new_iir = I915_READ16(IIR); /* Flush posted writes */
2051 i915_update_dri1_breadcrumb(dev);
2053 if (iir & I915_USER_INTERRUPT)
2054 notify_ring(dev, &dev_priv->ring[RCS]);
2056 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2057 drm_handle_vblank(dev, 0)) {
2058 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2059 intel_prepare_page_flip(dev, 0);
2060 intel_finish_page_flip(dev, 0);
2061 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2065 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2066 drm_handle_vblank(dev, 1)) {
2067 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2068 intel_prepare_page_flip(dev, 1);
2069 intel_finish_page_flip(dev, 1);
2070 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2080 static void i8xx_irq_uninstall(struct drm_device * dev)
2082 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2085 for_each_pipe(pipe) {
2086 /* Clear enable bits; then clear status bits */
2087 I915_WRITE(PIPESTAT(pipe), 0);
2088 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2090 I915_WRITE16(IMR, 0xffff);
2091 I915_WRITE16(IER, 0x0);
2092 I915_WRITE16(IIR, I915_READ16(IIR));
2095 static void i915_irq_preinstall(struct drm_device * dev)
2097 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2100 atomic_set(&dev_priv->irq_received, 0);
2102 if (I915_HAS_HOTPLUG(dev)) {
2103 I915_WRITE(PORT_HOTPLUG_EN, 0);
2104 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2107 I915_WRITE16(HWSTAM, 0xeffe);
2109 I915_WRITE(PIPESTAT(pipe), 0);
2110 I915_WRITE(IMR, 0xffffffff);
2111 I915_WRITE(IER, 0x0);
2115 static int i915_irq_postinstall(struct drm_device *dev)
2117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2120 dev_priv->pipestat[0] = 0;
2121 dev_priv->pipestat[1] = 0;
2123 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2125 /* Unmask the interrupts that we always want on. */
2126 dev_priv->irq_mask =
2127 ~(I915_ASLE_INTERRUPT |
2128 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2129 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2130 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2131 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2132 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2135 I915_ASLE_INTERRUPT |
2136 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2137 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2138 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2139 I915_USER_INTERRUPT;
2141 if (I915_HAS_HOTPLUG(dev)) {
2142 /* Enable in IER... */
2143 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2144 /* and unmask in IMR */
2145 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2148 I915_WRITE(IMR, dev_priv->irq_mask);
2149 I915_WRITE(IER, enable_mask);
2152 if (I915_HAS_HOTPLUG(dev)) {
2153 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2155 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2156 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2157 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2158 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2159 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2160 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2161 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2162 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2163 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2164 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2165 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2166 hotplug_en |= CRT_HOTPLUG_INT_EN;
2167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2170 /* Ignore TV since it's buggy */
2172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2175 intel_opregion_enable_asle(dev);
2180 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2182 struct drm_device *dev = (struct drm_device *) arg;
2183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2184 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2185 unsigned long irqflags;
2187 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2188 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2190 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2191 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2193 int pipe, ret = IRQ_NONE;
2195 atomic_inc(&dev_priv->irq_received);
2197 iir = I915_READ(IIR);
2199 bool irq_received = (iir & ~flip_mask) != 0;
2200 bool blc_event = false;
2202 /* Can't rely on pipestat interrupt bit in iir as it might
2203 * have been cleared after the pipestat interrupt was received.
2204 * It doesn't set the bit in iir again, but it still produces
2205 * interrupts (for non-MSI).
2207 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2208 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2209 i915_handle_error(dev, false);
2211 for_each_pipe(pipe) {
2212 int reg = PIPESTAT(pipe);
2213 pipe_stats[pipe] = I915_READ(reg);
2215 /* Clear the PIPE*STAT regs before the IIR */
2216 if (pipe_stats[pipe] & 0x8000ffff) {
2217 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2218 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2220 I915_WRITE(reg, pipe_stats[pipe]);
2221 irq_received = true;
2224 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2229 /* Consume port. Then clear IIR or we'll miss events */
2230 if ((I915_HAS_HOTPLUG(dev)) &&
2231 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2232 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2234 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2236 if (hotplug_status & dev_priv->hotplug_supported_mask)
2237 queue_work(dev_priv->wq,
2238 &dev_priv->hotplug_work);
2240 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2241 POSTING_READ(PORT_HOTPLUG_STAT);
2244 I915_WRITE(IIR, iir & ~flip_mask);
2245 new_iir = I915_READ(IIR); /* Flush posted writes */
2247 if (iir & I915_USER_INTERRUPT)
2248 notify_ring(dev, &dev_priv->ring[RCS]);
2250 for_each_pipe(pipe) {
2254 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2255 drm_handle_vblank(dev, pipe)) {
2256 if (iir & flip[plane]) {
2257 intel_prepare_page_flip(dev, plane);
2258 intel_finish_page_flip(dev, pipe);
2259 flip_mask &= ~flip[plane];
2263 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2267 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2268 intel_opregion_asle_intr(dev);
2270 /* With MSI, interrupts are only generated when iir
2271 * transitions from zero to nonzero. If another bit got
2272 * set while we were handling the existing iir bits, then
2273 * we would never get another interrupt.
2275 * This is fine on non-MSI as well, as if we hit this path
2276 * we avoid exiting the interrupt handler only to generate
2279 * Note that for MSI this could cause a stray interrupt report
2280 * if an interrupt landed in the time between writing IIR and
2281 * the posting read. This should be rare enough to never
2282 * trigger the 99% of 100,000 interrupts test for disabling
2287 } while (iir & ~flip_mask);
2289 i915_update_dri1_breadcrumb(dev);
2294 static void i915_irq_uninstall(struct drm_device * dev)
2296 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2299 if (I915_HAS_HOTPLUG(dev)) {
2300 I915_WRITE(PORT_HOTPLUG_EN, 0);
2301 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2304 I915_WRITE16(HWSTAM, 0xffff);
2305 for_each_pipe(pipe) {
2306 /* Clear enable bits; then clear status bits */
2307 I915_WRITE(PIPESTAT(pipe), 0);
2308 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2310 I915_WRITE(IMR, 0xffffffff);
2311 I915_WRITE(IER, 0x0);
2313 I915_WRITE(IIR, I915_READ(IIR));
2316 static void i965_irq_preinstall(struct drm_device * dev)
2318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2321 atomic_set(&dev_priv->irq_received, 0);
2323 if (I915_HAS_HOTPLUG(dev)) {
2324 I915_WRITE(PORT_HOTPLUG_EN, 0);
2325 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2328 I915_WRITE(HWSTAM, 0xeffe);
2330 I915_WRITE(PIPESTAT(pipe), 0);
2331 I915_WRITE(IMR, 0xffffffff);
2332 I915_WRITE(IER, 0x0);
2336 static int i965_irq_postinstall(struct drm_device *dev)
2338 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2342 /* Unmask the interrupts that we always want on. */
2343 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2344 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2345 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2346 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2347 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2348 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2350 enable_mask = ~dev_priv->irq_mask;
2351 enable_mask |= I915_USER_INTERRUPT;
2354 enable_mask |= I915_BSD_USER_INTERRUPT;
2356 dev_priv->pipestat[0] = 0;
2357 dev_priv->pipestat[1] = 0;
2359 if (I915_HAS_HOTPLUG(dev)) {
2360 /* Enable in IER... */
2361 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2362 /* and unmask in IMR */
2363 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2367 * Enable some error detection, note the instruction error mask
2368 * bit is reserved, so we leave it masked.
2371 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2372 GM45_ERROR_MEM_PRIV |
2373 GM45_ERROR_CP_PRIV |
2374 I915_ERROR_MEMORY_REFRESH);
2376 error_mask = ~(I915_ERROR_PAGE_TABLE |
2377 I915_ERROR_MEMORY_REFRESH);
2379 I915_WRITE(EMR, error_mask);
2381 I915_WRITE(IMR, dev_priv->irq_mask);
2382 I915_WRITE(IER, enable_mask);
2385 if (I915_HAS_HOTPLUG(dev)) {
2386 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2388 /* Note HDMI and DP share bits */
2389 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2390 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2391 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2392 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2393 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2394 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2395 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2396 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2397 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2398 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2399 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2400 hotplug_en |= CRT_HOTPLUG_INT_EN;
2402 /* Programming the CRT detection parameters tends
2403 to generate a spurious hotplug event about three
2404 seconds later. So just do it once.
2407 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2408 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2411 /* Ignore TV since it's buggy */
2413 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2416 intel_opregion_enable_asle(dev);
2421 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2423 struct drm_device *dev = (struct drm_device *) arg;
2424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2426 u32 pipe_stats[I915_MAX_PIPES];
2427 unsigned long irqflags;
2429 int ret = IRQ_NONE, pipe;
2431 atomic_inc(&dev_priv->irq_received);
2433 iir = I915_READ(IIR);
2436 bool blc_event = false;
2438 irq_received = iir != 0;
2440 /* Can't rely on pipestat interrupt bit in iir as it might
2441 * have been cleared after the pipestat interrupt was received.
2442 * It doesn't set the bit in iir again, but it still produces
2443 * interrupts (for non-MSI).
2445 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2446 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2447 i915_handle_error(dev, false);
2449 for_each_pipe(pipe) {
2450 int reg = PIPESTAT(pipe);
2451 pipe_stats[pipe] = I915_READ(reg);
2454 * Clear the PIPE*STAT regs before the IIR
2456 if (pipe_stats[pipe] & 0x8000ffff) {
2457 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2458 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2460 I915_WRITE(reg, pipe_stats[pipe]);
2464 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2471 /* Consume port. Then clear IIR or we'll miss events */
2472 if ((I915_HAS_HOTPLUG(dev)) &&
2473 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2474 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2476 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2478 if (hotplug_status & dev_priv->hotplug_supported_mask)
2479 queue_work(dev_priv->wq,
2480 &dev_priv->hotplug_work);
2482 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2483 I915_READ(PORT_HOTPLUG_STAT);
2486 I915_WRITE(IIR, iir);
2487 new_iir = I915_READ(IIR); /* Flush posted writes */
2489 if (iir & I915_USER_INTERRUPT)
2490 notify_ring(dev, &dev_priv->ring[RCS]);
2491 if (iir & I915_BSD_USER_INTERRUPT)
2492 notify_ring(dev, &dev_priv->ring[VCS]);
2494 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2495 intel_prepare_page_flip(dev, 0);
2497 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2498 intel_prepare_page_flip(dev, 1);
2500 for_each_pipe(pipe) {
2501 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2502 drm_handle_vblank(dev, pipe)) {
2503 i915_pageflip_stall_check(dev, pipe);
2504 intel_finish_page_flip(dev, pipe);
2507 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2512 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2513 intel_opregion_asle_intr(dev);
2515 /* With MSI, interrupts are only generated when iir
2516 * transitions from zero to nonzero. If another bit got
2517 * set while we were handling the existing iir bits, then
2518 * we would never get another interrupt.
2520 * This is fine on non-MSI as well, as if we hit this path
2521 * we avoid exiting the interrupt handler only to generate
2524 * Note that for MSI this could cause a stray interrupt report
2525 * if an interrupt landed in the time between writing IIR and
2526 * the posting read. This should be rare enough to never
2527 * trigger the 99% of 100,000 interrupts test for disabling
2533 i915_update_dri1_breadcrumb(dev);
2538 static void i965_irq_uninstall(struct drm_device * dev)
2540 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2546 if (I915_HAS_HOTPLUG(dev)) {
2547 I915_WRITE(PORT_HOTPLUG_EN, 0);
2548 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2551 I915_WRITE(HWSTAM, 0xffffffff);
2553 I915_WRITE(PIPESTAT(pipe), 0);
2554 I915_WRITE(IMR, 0xffffffff);
2555 I915_WRITE(IER, 0x0);
2558 I915_WRITE(PIPESTAT(pipe),
2559 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2560 I915_WRITE(IIR, I915_READ(IIR));
2563 void intel_irq_init(struct drm_device *dev)
2565 struct drm_i915_private *dev_priv = dev->dev_private;
2567 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2568 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2569 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
2571 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2572 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2573 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2574 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2575 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2578 if (drm_core_check_feature(dev, DRIVER_MODESET))
2579 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2581 dev->driver->get_vblank_timestamp = NULL;
2582 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2584 if (IS_VALLEYVIEW(dev)) {
2585 dev->driver->irq_handler = valleyview_irq_handler;
2586 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2587 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2588 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2589 dev->driver->enable_vblank = valleyview_enable_vblank;
2590 dev->driver->disable_vblank = valleyview_disable_vblank;
2591 } else if (IS_IVYBRIDGE(dev)) {
2592 /* Share pre & uninstall handlers with ILK/SNB */
2593 dev->driver->irq_handler = ivybridge_irq_handler;
2594 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2595 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2596 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2597 dev->driver->enable_vblank = ivybridge_enable_vblank;
2598 dev->driver->disable_vblank = ivybridge_disable_vblank;
2599 } else if (IS_HASWELL(dev)) {
2600 /* Share interrupts handling with IVB */
2601 dev->driver->irq_handler = ivybridge_irq_handler;
2602 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2603 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2604 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2605 dev->driver->enable_vblank = ivybridge_enable_vblank;
2606 dev->driver->disable_vblank = ivybridge_disable_vblank;
2607 } else if (HAS_PCH_SPLIT(dev)) {
2608 dev->driver->irq_handler = ironlake_irq_handler;
2609 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2610 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2611 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2612 dev->driver->enable_vblank = ironlake_enable_vblank;
2613 dev->driver->disable_vblank = ironlake_disable_vblank;
2615 if (INTEL_INFO(dev)->gen == 2) {
2616 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2617 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2618 dev->driver->irq_handler = i8xx_irq_handler;
2619 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2620 } else if (INTEL_INFO(dev)->gen == 3) {
2621 /* IIR "flip pending" means done if this bit is set */
2622 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
2624 dev->driver->irq_preinstall = i915_irq_preinstall;
2625 dev->driver->irq_postinstall = i915_irq_postinstall;
2626 dev->driver->irq_uninstall = i915_irq_uninstall;
2627 dev->driver->irq_handler = i915_irq_handler;
2629 dev->driver->irq_preinstall = i965_irq_preinstall;
2630 dev->driver->irq_postinstall = i965_irq_postinstall;
2631 dev->driver->irq_uninstall = i965_irq_uninstall;
2632 dev->driver->irq_handler = i965_irq_handler;
2634 dev->driver->enable_vblank = i915_enable_vblank;
2635 dev->driver->disable_vblank = i915_disable_vblank;