1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 /* For display hotplug interrupt */
41 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
43 if ((dev_priv->irq_mask & mask) != 0) {
44 dev_priv->irq_mask &= ~mask;
45 I915_WRITE(DEIMR, dev_priv->irq_mask);
51 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
53 if ((dev_priv->irq_mask & mask) != mask) {
54 dev_priv->irq_mask |= mask;
55 I915_WRITE(DEIMR, dev_priv->irq_mask);
61 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
63 if ((dev_priv->pipestat[pipe] & mask) != mask) {
64 u32 reg = PIPESTAT(pipe);
66 dev_priv->pipestat[pipe] |= mask;
67 /* Enable the interrupt, clear any pending status */
68 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
74 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
76 if ((dev_priv->pipestat[pipe] & mask) != 0) {
77 u32 reg = PIPESTAT(pipe);
79 dev_priv->pipestat[pipe] &= ~mask;
80 I915_WRITE(reg, dev_priv->pipestat[pipe]);
86 * intel_enable_asle - enable ASLE interrupt for OpRegion
88 void intel_enable_asle(struct drm_device *dev)
90 drm_i915_private_t *dev_priv = dev->dev_private;
91 unsigned long irqflags;
93 /* FIXME: opregion/asle for VLV */
94 if (IS_VALLEYVIEW(dev))
97 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
99 if (HAS_PCH_SPLIT(dev))
100 ironlake_enable_display_irq(dev_priv, DE_GSE);
102 i915_enable_pipestat(dev_priv, 1,
103 PIPE_LEGACY_BLC_EVENT_ENABLE);
104 if (INTEL_INFO(dev)->gen >= 4)
105 i915_enable_pipestat(dev_priv, 0,
106 PIPE_LEGACY_BLC_EVENT_ENABLE);
109 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
113 * i915_pipe_enabled - check if a pipe is enabled
115 * @pipe: pipe to check
117 * Reading certain registers when the pipe is disabled can hang the chip.
118 * Use this routine to make sure the PLL is running and the pipe is active
119 * before reading such registers if unsure.
122 i915_pipe_enabled(struct drm_device *dev, int pipe)
124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
131 /* Called from drm generic code, passed a 'crtc', which
132 * we use as a pipe index
134 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
137 unsigned long high_frame;
138 unsigned long low_frame;
139 u32 high1, high2, low;
141 if (!i915_pipe_enabled(dev, pipe)) {
142 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
143 "pipe %c\n", pipe_name(pipe));
147 high_frame = PIPEFRAME(pipe);
148 low_frame = PIPEFRAMEPIXEL(pipe);
151 * High & low register fields aren't synchronized, so make sure
152 * we get a low value that's stable across two reads of the high
156 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
157 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
158 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
159 } while (high1 != high2);
161 high1 >>= PIPE_FRAME_HIGH_SHIFT;
162 low >>= PIPE_FRAME_LOW_SHIFT;
163 return (high1 << 8) | low;
166 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
169 int reg = PIPE_FRMCOUNT_GM45(pipe);
171 if (!i915_pipe_enabled(dev, pipe)) {
172 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
173 "pipe %c\n", pipe_name(pipe));
177 return I915_READ(reg);
180 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
181 int *vpos, int *hpos)
183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
184 u32 vbl = 0, position = 0;
185 int vbl_start, vbl_end, htotal, vtotal;
188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
191 if (!i915_pipe_enabled(dev, pipe)) {
192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
193 "pipe %c\n", pipe_name(pipe));
198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
200 if (INTEL_INFO(dev)->gen >= 4) {
201 /* No obvious pixelcount register. Only query vertical
202 * scanout position from Display scan line register.
204 position = I915_READ(PIPEDSL(pipe));
206 /* Decode into vertical scanout position. Don't have
207 * horizontal scanout position.
209 *vpos = position & 0x1fff;
212 /* Have access to pixelcount since start of frame.
213 * We can split this into vertical and horizontal
216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
219 *vpos = position / htotal;
220 *hpos = position - (*vpos * htotal);
223 /* Query vblank area. */
224 vbl = I915_READ(VBLANK(cpu_transcoder));
226 /* Test position against vblank region. */
227 vbl_start = vbl & 0x1fff;
228 vbl_end = (vbl >> 16) & 0x1fff;
230 if ((*vpos < vbl_start) || (*vpos > vbl_end))
233 /* Inside "upper part" of vblank area? Apply corrective offset: */
234 if (in_vbl && (*vpos >= vbl_start))
235 *vpos = *vpos - vtotal;
237 /* Readouts valid? */
239 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
243 ret |= DRM_SCANOUTPOS_INVBL;
248 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
250 struct timeval *vblank_time,
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct drm_crtc *crtc;
256 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
257 DRM_ERROR("Invalid crtc %d\n", pipe);
261 /* Get drm_crtc to timestamp: */
262 crtc = intel_get_crtc_for_pipe(dev, pipe);
264 DRM_ERROR("Invalid crtc %d\n", pipe);
268 if (!crtc->enabled) {
269 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
273 /* Helper routine in DRM core does all the work: */
274 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
280 * Handle hotplug events outside the interrupt handler proper.
282 static void i915_hotplug_work_func(struct work_struct *work)
284 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
286 struct drm_device *dev = dev_priv->dev;
287 struct drm_mode_config *mode_config = &dev->mode_config;
288 struct intel_encoder *encoder;
290 /* HPD irq before everything is fully set up. */
291 if (!dev_priv->enable_hotplug_processing)
294 mutex_lock(&mode_config->mutex);
295 DRM_DEBUG_KMS("running encoder hotplug functions\n");
297 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
298 if (encoder->hot_plug)
299 encoder->hot_plug(encoder);
301 mutex_unlock(&mode_config->mutex);
303 /* Just fire off a uevent and let userspace tell us what to do */
304 drm_helper_hpd_irq_event(dev);
307 static void ironlake_handle_rps_change(struct drm_device *dev)
309 drm_i915_private_t *dev_priv = dev->dev_private;
310 u32 busy_up, busy_down, max_avg, min_avg;
314 spin_lock_irqsave(&mchdev_lock, flags);
316 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
318 new_delay = dev_priv->ips.cur_delay;
320 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321 busy_up = I915_READ(RCPREVBSYTUPAVG);
322 busy_down = I915_READ(RCPREVBSYTDNAVG);
323 max_avg = I915_READ(RCBMAXAVG);
324 min_avg = I915_READ(RCBMINAVG);
326 /* Handle RCS change request from hw */
327 if (busy_up > max_avg) {
328 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
329 new_delay = dev_priv->ips.cur_delay - 1;
330 if (new_delay < dev_priv->ips.max_delay)
331 new_delay = dev_priv->ips.max_delay;
332 } else if (busy_down < min_avg) {
333 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
334 new_delay = dev_priv->ips.cur_delay + 1;
335 if (new_delay > dev_priv->ips.min_delay)
336 new_delay = dev_priv->ips.min_delay;
339 if (ironlake_set_drps(dev, new_delay))
340 dev_priv->ips.cur_delay = new_delay;
342 spin_unlock_irqrestore(&mchdev_lock, flags);
347 static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring)
350 struct drm_i915_private *dev_priv = dev->dev_private;
352 if (ring->obj == NULL)
355 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
357 wake_up_all(&ring->irq_queue);
358 if (i915_enable_hangcheck) {
359 dev_priv->hangcheck_count = 0;
360 mod_timer(&dev_priv->hangcheck_timer,
361 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
365 static void gen6_pm_rps_work(struct work_struct *work)
367 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
372 spin_lock_irq(&dev_priv->rps.lock);
373 pm_iir = dev_priv->rps.pm_iir;
374 dev_priv->rps.pm_iir = 0;
375 pm_imr = I915_READ(GEN6_PMIMR);
376 I915_WRITE(GEN6_PMIMR, 0);
377 spin_unlock_irq(&dev_priv->rps.lock);
379 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
382 mutex_lock(&dev_priv->rps.hw_lock);
384 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
385 new_delay = dev_priv->rps.cur_delay + 1;
387 new_delay = dev_priv->rps.cur_delay - 1;
389 /* sysfs frequency interfaces may have snuck in while servicing the
392 if (!(new_delay > dev_priv->rps.max_delay ||
393 new_delay < dev_priv->rps.min_delay)) {
394 gen6_set_rps(dev_priv->dev, new_delay);
397 mutex_unlock(&dev_priv->rps.hw_lock);
402 * ivybridge_parity_work - Workqueue called when a parity error interrupt
404 * @work: workqueue struct
406 * Doesn't actually do anything except notify userspace. As a consequence of
407 * this event, userspace should try to remap the bad rows since statistically
408 * it is likely the same row is more likely to go bad again.
410 static void ivybridge_parity_work(struct work_struct *work)
412 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
413 l3_parity.error_work);
414 u32 error_status, row, bank, subbank;
415 char *parity_event[5];
419 /* We must turn off DOP level clock gating to access the L3 registers.
420 * In order to prevent a get/put style interface, acquire struct mutex
421 * any time we access those registers.
423 mutex_lock(&dev_priv->dev->struct_mutex);
425 misccpctl = I915_READ(GEN7_MISCCPCTL);
426 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
427 POSTING_READ(GEN7_MISCCPCTL);
429 error_status = I915_READ(GEN7_L3CDERRST1);
430 row = GEN7_PARITY_ERROR_ROW(error_status);
431 bank = GEN7_PARITY_ERROR_BANK(error_status);
432 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
434 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
435 GEN7_L3CDERRST1_ENABLE);
436 POSTING_READ(GEN7_L3CDERRST1);
438 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
440 spin_lock_irqsave(&dev_priv->irq_lock, flags);
441 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
442 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
443 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
445 mutex_unlock(&dev_priv->dev->struct_mutex);
447 parity_event[0] = "L3_PARITY_ERROR=1";
448 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
449 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
450 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
451 parity_event[4] = NULL;
453 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
454 KOBJ_CHANGE, parity_event);
456 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
459 kfree(parity_event[3]);
460 kfree(parity_event[2]);
461 kfree(parity_event[1]);
464 static void ivybridge_handle_parity_error(struct drm_device *dev)
466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
469 if (!HAS_L3_GPU_CACHE(dev))
472 spin_lock_irqsave(&dev_priv->irq_lock, flags);
473 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
474 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
475 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
477 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
480 static void snb_gt_irq_handler(struct drm_device *dev,
481 struct drm_i915_private *dev_priv,
485 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
486 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
487 notify_ring(dev, &dev_priv->ring[RCS]);
488 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
489 notify_ring(dev, &dev_priv->ring[VCS]);
490 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
491 notify_ring(dev, &dev_priv->ring[BCS]);
493 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
494 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
495 GT_RENDER_CS_ERROR_INTERRUPT)) {
496 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
497 i915_handle_error(dev, false);
500 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
501 ivybridge_handle_parity_error(dev);
504 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
510 * IIR bits should never already be set because IMR should
511 * prevent an interrupt from being shown in IIR. The warning
512 * displays a case where we've unsafely cleared
513 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
514 * type is not a problem, it displays a problem in the logic.
516 * The mask bit in IMR is cleared by dev_priv->rps.work.
519 spin_lock_irqsave(&dev_priv->rps.lock, flags);
520 dev_priv->rps.pm_iir |= pm_iir;
521 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
522 POSTING_READ(GEN6_PMIMR);
523 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
525 queue_work(dev_priv->wq, &dev_priv->rps.work);
528 static void gmbus_irq_handler(struct drm_device *dev)
530 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
532 wake_up_all(&dev_priv->gmbus_wait_queue);
535 static void dp_aux_irq_handler(struct drm_device *dev)
537 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
539 wake_up_all(&dev_priv->gmbus_wait_queue);
542 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
544 struct drm_device *dev = (struct drm_device *) arg;
545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
546 u32 iir, gt_iir, pm_iir;
547 irqreturn_t ret = IRQ_NONE;
548 unsigned long irqflags;
550 u32 pipe_stats[I915_MAX_PIPES];
552 atomic_inc(&dev_priv->irq_received);
555 iir = I915_READ(VLV_IIR);
556 gt_iir = I915_READ(GTIIR);
557 pm_iir = I915_READ(GEN6_PMIIR);
559 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
564 snb_gt_irq_handler(dev, dev_priv, gt_iir);
566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
567 for_each_pipe(pipe) {
568 int reg = PIPESTAT(pipe);
569 pipe_stats[pipe] = I915_READ(reg);
572 * Clear the PIPE*STAT regs before the IIR
574 if (pipe_stats[pipe] & 0x8000ffff) {
575 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
576 DRM_DEBUG_DRIVER("pipe %c underrun\n",
578 I915_WRITE(reg, pipe_stats[pipe]);
581 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
583 for_each_pipe(pipe) {
584 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
585 drm_handle_vblank(dev, pipe);
587 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
588 intel_prepare_page_flip(dev, pipe);
589 intel_finish_page_flip(dev, pipe);
593 /* Consume port. Then clear IIR or we'll miss events */
594 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
595 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
597 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
599 if (hotplug_status & dev_priv->hotplug_supported_mask)
600 queue_work(dev_priv->wq,
601 &dev_priv->hotplug_work);
603 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
604 I915_READ(PORT_HOTPLUG_STAT);
607 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
608 gmbus_irq_handler(dev);
610 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
611 gen6_queue_rps_work(dev_priv, pm_iir);
613 I915_WRITE(GTIIR, gt_iir);
614 I915_WRITE(GEN6_PMIIR, pm_iir);
615 I915_WRITE(VLV_IIR, iir);
622 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
627 if (pch_iir & SDE_HOTPLUG_MASK)
628 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
630 if (pch_iir & SDE_AUDIO_POWER_MASK)
631 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
632 (pch_iir & SDE_AUDIO_POWER_MASK) >>
633 SDE_AUDIO_POWER_SHIFT);
635 if (pch_iir & SDE_AUX_MASK)
636 dp_aux_irq_handler(dev);
638 if (pch_iir & SDE_GMBUS)
639 gmbus_irq_handler(dev);
641 if (pch_iir & SDE_AUDIO_HDCP_MASK)
642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
644 if (pch_iir & SDE_AUDIO_TRANS_MASK)
645 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
647 if (pch_iir & SDE_POISON)
648 DRM_ERROR("PCH poison interrupt\n");
650 if (pch_iir & SDE_FDI_MASK)
652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
654 I915_READ(FDI_RX_IIR(pipe)));
656 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
657 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
659 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
660 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
662 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
663 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
664 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
665 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
668 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
670 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
673 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
674 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
676 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
677 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
678 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
679 SDE_AUDIO_POWER_SHIFT_CPT);
681 if (pch_iir & SDE_AUX_MASK_CPT)
682 dp_aux_irq_handler(dev);
684 if (pch_iir & SDE_GMBUS_CPT)
685 gmbus_irq_handler(dev);
687 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
690 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
691 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
693 if (pch_iir & SDE_FDI_MASK_CPT)
695 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
697 I915_READ(FDI_RX_IIR(pipe)));
700 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
702 struct drm_device *dev = (struct drm_device *) arg;
703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
704 u32 de_iir, gt_iir, de_ier, pm_iir;
705 irqreturn_t ret = IRQ_NONE;
708 atomic_inc(&dev_priv->irq_received);
710 /* disable master interrupt before clearing iir */
711 de_ier = I915_READ(DEIER);
712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
714 gt_iir = I915_READ(GTIIR);
716 snb_gt_irq_handler(dev, dev_priv, gt_iir);
717 I915_WRITE(GTIIR, gt_iir);
721 de_iir = I915_READ(DEIIR);
723 if (de_iir & DE_AUX_CHANNEL_A_IVB)
724 dp_aux_irq_handler(dev);
726 if (de_iir & DE_GSE_IVB)
727 intel_opregion_gse_intr(dev);
729 for (i = 0; i < 3; i++) {
730 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
731 drm_handle_vblank(dev, i);
732 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
733 intel_prepare_page_flip(dev, i);
734 intel_finish_page_flip_plane(dev, i);
738 /* check event from PCH */
739 if (de_iir & DE_PCH_EVENT_IVB) {
740 u32 pch_iir = I915_READ(SDEIIR);
742 cpt_irq_handler(dev, pch_iir);
744 /* clear PCH hotplug event before clear CPU irq */
745 I915_WRITE(SDEIIR, pch_iir);
748 I915_WRITE(DEIIR, de_iir);
752 pm_iir = I915_READ(GEN6_PMIIR);
754 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
755 gen6_queue_rps_work(dev_priv, pm_iir);
756 I915_WRITE(GEN6_PMIIR, pm_iir);
760 I915_WRITE(DEIER, de_ier);
766 static void ilk_gt_irq_handler(struct drm_device *dev,
767 struct drm_i915_private *dev_priv,
770 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
771 notify_ring(dev, &dev_priv->ring[RCS]);
772 if (gt_iir & GT_BSD_USER_INTERRUPT)
773 notify_ring(dev, &dev_priv->ring[VCS]);
776 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
778 struct drm_device *dev = (struct drm_device *) arg;
779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
781 u32 de_iir, gt_iir, de_ier, pm_iir;
783 atomic_inc(&dev_priv->irq_received);
785 /* disable master interrupt before clearing iir */
786 de_ier = I915_READ(DEIER);
787 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
790 de_iir = I915_READ(DEIIR);
791 gt_iir = I915_READ(GTIIR);
792 pm_iir = I915_READ(GEN6_PMIIR);
794 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
800 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
802 snb_gt_irq_handler(dev, dev_priv, gt_iir);
804 if (de_iir & DE_AUX_CHANNEL_A)
805 dp_aux_irq_handler(dev);
808 intel_opregion_gse_intr(dev);
810 if (de_iir & DE_PIPEA_VBLANK)
811 drm_handle_vblank(dev, 0);
813 if (de_iir & DE_PIPEB_VBLANK)
814 drm_handle_vblank(dev, 1);
816 if (de_iir & DE_PLANEA_FLIP_DONE) {
817 intel_prepare_page_flip(dev, 0);
818 intel_finish_page_flip_plane(dev, 0);
821 if (de_iir & DE_PLANEB_FLIP_DONE) {
822 intel_prepare_page_flip(dev, 1);
823 intel_finish_page_flip_plane(dev, 1);
826 /* check event from PCH */
827 if (de_iir & DE_PCH_EVENT) {
828 u32 pch_iir = I915_READ(SDEIIR);
830 if (HAS_PCH_CPT(dev))
831 cpt_irq_handler(dev, pch_iir);
833 ibx_irq_handler(dev, pch_iir);
835 /* should clear PCH hotplug event before clear CPU irq */
836 I915_WRITE(SDEIIR, pch_iir);
839 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
840 ironlake_handle_rps_change(dev);
842 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
843 gen6_queue_rps_work(dev_priv, pm_iir);
845 I915_WRITE(GTIIR, gt_iir);
846 I915_WRITE(DEIIR, de_iir);
847 I915_WRITE(GEN6_PMIIR, pm_iir);
850 I915_WRITE(DEIER, de_ier);
857 * i915_error_work_func - do process context error handling work
860 * Fire an error uevent so userspace can see that a hang or error
863 static void i915_error_work_func(struct work_struct *work)
865 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
867 struct drm_device *dev = dev_priv->dev;
868 char *error_event[] = { "ERROR=1", NULL };
869 char *reset_event[] = { "RESET=1", NULL };
870 char *reset_done_event[] = { "ERROR=0", NULL };
872 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
874 if (atomic_read(&dev_priv->mm.wedged)) {
875 DRM_DEBUG_DRIVER("resetting chip\n");
876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
877 if (!i915_reset(dev)) {
878 atomic_set(&dev_priv->mm.wedged, 0);
879 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
881 complete_all(&dev_priv->error_completion);
885 /* NB: please notice the memset */
886 static void i915_get_extra_instdone(struct drm_device *dev,
889 struct drm_i915_private *dev_priv = dev->dev_private;
890 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
892 switch(INTEL_INFO(dev)->gen) {
895 instdone[0] = I915_READ(INSTDONE);
900 instdone[0] = I915_READ(INSTDONE_I965);
901 instdone[1] = I915_READ(INSTDONE1);
904 WARN_ONCE(1, "Unsupported platform\n");
906 instdone[0] = I915_READ(GEN7_INSTDONE_1);
907 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
908 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
909 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
914 #ifdef CONFIG_DEBUG_FS
915 static struct drm_i915_error_object *
916 i915_error_object_create(struct drm_i915_private *dev_priv,
917 struct drm_i915_gem_object *src)
919 struct drm_i915_error_object *dst;
923 if (src == NULL || src->pages == NULL)
926 count = src->base.size / PAGE_SIZE;
928 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
932 reloc_offset = src->gtt_offset;
933 for (i = 0; i < count; i++) {
937 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
941 local_irq_save(flags);
942 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
943 src->has_global_gtt_mapping) {
946 /* Simply ignore tiling or any overlapping fence.
947 * It's part of the error state, and this hopefully
948 * captures what the GPU read.
951 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
953 memcpy_fromio(d, s, PAGE_SIZE);
954 io_mapping_unmap_atomic(s);
955 } else if (src->stolen) {
956 unsigned long offset;
958 offset = dev_priv->mm.stolen_base;
959 offset += src->stolen->start;
960 offset += i << PAGE_SHIFT;
962 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
967 page = i915_gem_object_get_page(src, i);
969 drm_clflush_pages(&page, 1);
971 s = kmap_atomic(page);
972 memcpy(d, s, PAGE_SIZE);
975 drm_clflush_pages(&page, 1);
977 local_irq_restore(flags);
981 reloc_offset += PAGE_SIZE;
983 dst->page_count = count;
984 dst->gtt_offset = src->gtt_offset;
990 kfree(dst->pages[i]);
996 i915_error_object_free(struct drm_i915_error_object *obj)
1003 for (page = 0; page < obj->page_count; page++)
1004 kfree(obj->pages[page]);
1010 i915_error_state_free(struct kref *error_ref)
1012 struct drm_i915_error_state *error = container_of(error_ref,
1013 typeof(*error), ref);
1016 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1017 i915_error_object_free(error->ring[i].batchbuffer);
1018 i915_error_object_free(error->ring[i].ringbuffer);
1019 kfree(error->ring[i].requests);
1022 kfree(error->active_bo);
1023 kfree(error->overlay);
1026 static void capture_bo(struct drm_i915_error_buffer *err,
1027 struct drm_i915_gem_object *obj)
1029 err->size = obj->base.size;
1030 err->name = obj->base.name;
1031 err->rseqno = obj->last_read_seqno;
1032 err->wseqno = obj->last_write_seqno;
1033 err->gtt_offset = obj->gtt_offset;
1034 err->read_domains = obj->base.read_domains;
1035 err->write_domain = obj->base.write_domain;
1036 err->fence_reg = obj->fence_reg;
1038 if (obj->pin_count > 0)
1040 if (obj->user_pin_count > 0)
1042 err->tiling = obj->tiling_mode;
1043 err->dirty = obj->dirty;
1044 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1045 err->ring = obj->ring ? obj->ring->id : -1;
1046 err->cache_level = obj->cache_level;
1049 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1050 int count, struct list_head *head)
1052 struct drm_i915_gem_object *obj;
1055 list_for_each_entry(obj, head, mm_list) {
1056 capture_bo(err++, obj);
1064 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1065 int count, struct list_head *head)
1067 struct drm_i915_gem_object *obj;
1070 list_for_each_entry(obj, head, gtt_list) {
1071 if (obj->pin_count == 0)
1074 capture_bo(err++, obj);
1082 static void i915_gem_record_fences(struct drm_device *dev,
1083 struct drm_i915_error_state *error)
1085 struct drm_i915_private *dev_priv = dev->dev_private;
1089 switch (INTEL_INFO(dev)->gen) {
1092 for (i = 0; i < 16; i++)
1093 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1097 for (i = 0; i < 16; i++)
1098 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1101 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1102 for (i = 0; i < 8; i++)
1103 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1105 for (i = 0; i < 8; i++)
1106 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1114 static struct drm_i915_error_object *
1115 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1116 struct intel_ring_buffer *ring)
1118 struct drm_i915_gem_object *obj;
1121 if (!ring->get_seqno)
1124 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1125 u32 acthd = I915_READ(ACTHD);
1127 if (WARN_ON(ring->id != RCS))
1130 obj = ring->private;
1131 if (acthd >= obj->gtt_offset &&
1132 acthd < obj->gtt_offset + obj->base.size)
1133 return i915_error_object_create(dev_priv, obj);
1136 seqno = ring->get_seqno(ring, false);
1137 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1138 if (obj->ring != ring)
1141 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1144 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1147 /* We need to copy these to an anonymous buffer as the simplest
1148 * method to avoid being overwritten by userspace.
1150 return i915_error_object_create(dev_priv, obj);
1156 static void i915_record_ring_state(struct drm_device *dev,
1157 struct drm_i915_error_state *error,
1158 struct intel_ring_buffer *ring)
1160 struct drm_i915_private *dev_priv = dev->dev_private;
1162 if (INTEL_INFO(dev)->gen >= 6) {
1163 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1164 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1165 error->semaphore_mboxes[ring->id][0]
1166 = I915_READ(RING_SYNC_0(ring->mmio_base));
1167 error->semaphore_mboxes[ring->id][1]
1168 = I915_READ(RING_SYNC_1(ring->mmio_base));
1169 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1170 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1173 if (INTEL_INFO(dev)->gen >= 4) {
1174 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1175 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1176 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1177 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1178 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1179 if (ring->id == RCS)
1180 error->bbaddr = I915_READ64(BB_ADDR);
1182 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1183 error->ipeir[ring->id] = I915_READ(IPEIR);
1184 error->ipehr[ring->id] = I915_READ(IPEHR);
1185 error->instdone[ring->id] = I915_READ(INSTDONE);
1188 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1189 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1190 error->seqno[ring->id] = ring->get_seqno(ring, false);
1191 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1192 error->head[ring->id] = I915_READ_HEAD(ring);
1193 error->tail[ring->id] = I915_READ_TAIL(ring);
1195 error->cpu_ring_head[ring->id] = ring->head;
1196 error->cpu_ring_tail[ring->id] = ring->tail;
1199 static void i915_gem_record_rings(struct drm_device *dev,
1200 struct drm_i915_error_state *error)
1202 struct drm_i915_private *dev_priv = dev->dev_private;
1203 struct intel_ring_buffer *ring;
1204 struct drm_i915_gem_request *request;
1207 for_each_ring(ring, dev_priv, i) {
1208 i915_record_ring_state(dev, error, ring);
1210 error->ring[i].batchbuffer =
1211 i915_error_first_batchbuffer(dev_priv, ring);
1213 error->ring[i].ringbuffer =
1214 i915_error_object_create(dev_priv, ring->obj);
1217 list_for_each_entry(request, &ring->request_list, list)
1220 error->ring[i].num_requests = count;
1221 error->ring[i].requests =
1222 kmalloc(count*sizeof(struct drm_i915_error_request),
1224 if (error->ring[i].requests == NULL) {
1225 error->ring[i].num_requests = 0;
1230 list_for_each_entry(request, &ring->request_list, list) {
1231 struct drm_i915_error_request *erq;
1233 erq = &error->ring[i].requests[count++];
1234 erq->seqno = request->seqno;
1235 erq->jiffies = request->emitted_jiffies;
1236 erq->tail = request->tail;
1242 * i915_capture_error_state - capture an error record for later analysis
1245 * Should be called when an error is detected (either a hang or an error
1246 * interrupt) to capture error state from the time of the error. Fills
1247 * out a structure which becomes available in debugfs for user level tools
1250 static void i915_capture_error_state(struct drm_device *dev)
1252 struct drm_i915_private *dev_priv = dev->dev_private;
1253 struct drm_i915_gem_object *obj;
1254 struct drm_i915_error_state *error;
1255 unsigned long flags;
1258 spin_lock_irqsave(&dev_priv->error_lock, flags);
1259 error = dev_priv->first_error;
1260 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1264 /* Account for pipe specific data like PIPE*STAT */
1265 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1267 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1271 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1272 dev->primary->index);
1274 kref_init(&error->ref);
1275 error->eir = I915_READ(EIR);
1276 error->pgtbl_er = I915_READ(PGTBL_ER);
1277 error->ccid = I915_READ(CCID);
1279 if (HAS_PCH_SPLIT(dev))
1280 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1281 else if (IS_VALLEYVIEW(dev))
1282 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1283 else if (IS_GEN2(dev))
1284 error->ier = I915_READ16(IER);
1286 error->ier = I915_READ(IER);
1289 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1291 if (INTEL_INFO(dev)->gen >= 6) {
1292 error->error = I915_READ(ERROR_GEN6);
1293 error->done_reg = I915_READ(DONE_REG);
1296 if (INTEL_INFO(dev)->gen == 7)
1297 error->err_int = I915_READ(GEN7_ERR_INT);
1299 i915_get_extra_instdone(dev, error->extra_instdone);
1301 i915_gem_record_fences(dev, error);
1302 i915_gem_record_rings(dev, error);
1304 /* Record buffers on the active and pinned lists. */
1305 error->active_bo = NULL;
1306 error->pinned_bo = NULL;
1309 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1311 error->active_bo_count = i;
1312 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1315 error->pinned_bo_count = i - error->active_bo_count;
1317 error->active_bo = NULL;
1318 error->pinned_bo = NULL;
1320 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1322 if (error->active_bo)
1324 error->active_bo + error->active_bo_count;
1327 if (error->active_bo)
1328 error->active_bo_count =
1329 capture_active_bo(error->active_bo,
1330 error->active_bo_count,
1331 &dev_priv->mm.active_list);
1333 if (error->pinned_bo)
1334 error->pinned_bo_count =
1335 capture_pinned_bo(error->pinned_bo,
1336 error->pinned_bo_count,
1337 &dev_priv->mm.bound_list);
1339 do_gettimeofday(&error->time);
1341 error->overlay = intel_overlay_capture_error_state(dev);
1342 error->display = intel_display_capture_error_state(dev);
1344 spin_lock_irqsave(&dev_priv->error_lock, flags);
1345 if (dev_priv->first_error == NULL) {
1346 dev_priv->first_error = error;
1349 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1352 i915_error_state_free(&error->ref);
1355 void i915_destroy_error_state(struct drm_device *dev)
1357 struct drm_i915_private *dev_priv = dev->dev_private;
1358 struct drm_i915_error_state *error;
1359 unsigned long flags;
1361 spin_lock_irqsave(&dev_priv->error_lock, flags);
1362 error = dev_priv->first_error;
1363 dev_priv->first_error = NULL;
1364 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1367 kref_put(&error->ref, i915_error_state_free);
1370 #define i915_capture_error_state(x)
1373 static void i915_report_and_clear_eir(struct drm_device *dev)
1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 uint32_t instdone[I915_NUM_INSTDONE_REG];
1377 u32 eir = I915_READ(EIR);
1383 pr_err("render error detected, EIR: 0x%08x\n", eir);
1385 i915_get_extra_instdone(dev, instdone);
1388 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1389 u32 ipeir = I915_READ(IPEIR_I965);
1391 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1392 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1393 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1394 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1395 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1396 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1397 I915_WRITE(IPEIR_I965, ipeir);
1398 POSTING_READ(IPEIR_I965);
1400 if (eir & GM45_ERROR_PAGE_TABLE) {
1401 u32 pgtbl_err = I915_READ(PGTBL_ER);
1402 pr_err("page table error\n");
1403 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1404 I915_WRITE(PGTBL_ER, pgtbl_err);
1405 POSTING_READ(PGTBL_ER);
1409 if (!IS_GEN2(dev)) {
1410 if (eir & I915_ERROR_PAGE_TABLE) {
1411 u32 pgtbl_err = I915_READ(PGTBL_ER);
1412 pr_err("page table error\n");
1413 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1414 I915_WRITE(PGTBL_ER, pgtbl_err);
1415 POSTING_READ(PGTBL_ER);
1419 if (eir & I915_ERROR_MEMORY_REFRESH) {
1420 pr_err("memory refresh error:\n");
1422 pr_err("pipe %c stat: 0x%08x\n",
1423 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1424 /* pipestat has already been acked */
1426 if (eir & I915_ERROR_INSTRUCTION) {
1427 pr_err("instruction error\n");
1428 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1429 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1430 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1431 if (INTEL_INFO(dev)->gen < 4) {
1432 u32 ipeir = I915_READ(IPEIR);
1434 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1435 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1436 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1437 I915_WRITE(IPEIR, ipeir);
1438 POSTING_READ(IPEIR);
1440 u32 ipeir = I915_READ(IPEIR_I965);
1442 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1443 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1444 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1445 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1446 I915_WRITE(IPEIR_I965, ipeir);
1447 POSTING_READ(IPEIR_I965);
1451 I915_WRITE(EIR, eir);
1453 eir = I915_READ(EIR);
1456 * some errors might have become stuck,
1459 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1460 I915_WRITE(EMR, I915_READ(EMR) | eir);
1461 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1466 * i915_handle_error - handle an error interrupt
1469 * Do some basic checking of regsiter state at error interrupt time and
1470 * dump it to the syslog. Also call i915_capture_error_state() to make
1471 * sure we get a record and make it available in debugfs. Fire a uevent
1472 * so userspace knows something bad happened (should trigger collection
1473 * of a ring dump etc.).
1475 void i915_handle_error(struct drm_device *dev, bool wedged)
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1478 struct intel_ring_buffer *ring;
1481 i915_capture_error_state(dev);
1482 i915_report_and_clear_eir(dev);
1485 INIT_COMPLETION(dev_priv->error_completion);
1486 atomic_set(&dev_priv->mm.wedged, 1);
1489 * Wakeup waiting processes so they don't hang
1491 for_each_ring(ring, dev_priv, i)
1492 wake_up_all(&ring->irq_queue);
1495 queue_work(dev_priv->wq, &dev_priv->error_work);
1498 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1500 drm_i915_private_t *dev_priv = dev->dev_private;
1501 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1502 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1503 struct drm_i915_gem_object *obj;
1504 struct intel_unpin_work *work;
1505 unsigned long flags;
1506 bool stall_detected;
1508 /* Ignore early vblank irqs */
1509 if (intel_crtc == NULL)
1512 spin_lock_irqsave(&dev->event_lock, flags);
1513 work = intel_crtc->unpin_work;
1516 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1517 !work->enable_stall_check) {
1518 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1519 spin_unlock_irqrestore(&dev->event_lock, flags);
1523 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1524 obj = work->pending_flip_obj;
1525 if (INTEL_INFO(dev)->gen >= 4) {
1526 int dspsurf = DSPSURF(intel_crtc->plane);
1527 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1530 int dspaddr = DSPADDR(intel_crtc->plane);
1531 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1532 crtc->y * crtc->fb->pitches[0] +
1533 crtc->x * crtc->fb->bits_per_pixel/8);
1536 spin_unlock_irqrestore(&dev->event_lock, flags);
1538 if (stall_detected) {
1539 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1540 intel_prepare_page_flip(dev, intel_crtc->plane);
1544 /* Called from drm generic code, passed 'crtc' which
1545 * we use as a pipe index
1547 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1549 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1550 unsigned long irqflags;
1552 if (!i915_pipe_enabled(dev, pipe))
1555 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1556 if (INTEL_INFO(dev)->gen >= 4)
1557 i915_enable_pipestat(dev_priv, pipe,
1558 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1560 i915_enable_pipestat(dev_priv, pipe,
1561 PIPE_VBLANK_INTERRUPT_ENABLE);
1563 /* maintain vblank delivery even in deep C-states */
1564 if (dev_priv->info->gen == 3)
1565 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1566 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1571 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1574 unsigned long irqflags;
1576 if (!i915_pipe_enabled(dev, pipe))
1579 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1580 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1581 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1582 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1587 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1590 unsigned long irqflags;
1592 if (!i915_pipe_enabled(dev, pipe))
1595 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1596 ironlake_enable_display_irq(dev_priv,
1597 DE_PIPEA_VBLANK_IVB << (5 * pipe));
1598 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1603 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1605 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1606 unsigned long irqflags;
1609 if (!i915_pipe_enabled(dev, pipe))
1612 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1613 imr = I915_READ(VLV_IMR);
1615 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1617 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1618 I915_WRITE(VLV_IMR, imr);
1619 i915_enable_pipestat(dev_priv, pipe,
1620 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1621 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1626 /* Called from drm generic code, passed 'crtc' which
1627 * we use as a pipe index
1629 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1632 unsigned long irqflags;
1634 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1635 if (dev_priv->info->gen == 3)
1636 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1638 i915_disable_pipestat(dev_priv, pipe,
1639 PIPE_VBLANK_INTERRUPT_ENABLE |
1640 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1641 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1644 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1647 unsigned long irqflags;
1649 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1650 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1651 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1652 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1655 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1657 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1658 unsigned long irqflags;
1660 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1661 ironlake_disable_display_irq(dev_priv,
1662 DE_PIPEA_VBLANK_IVB << (pipe * 5));
1663 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1666 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1668 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1669 unsigned long irqflags;
1672 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1673 i915_disable_pipestat(dev_priv, pipe,
1674 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1675 imr = I915_READ(VLV_IMR);
1677 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1679 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1680 I915_WRITE(VLV_IMR, imr);
1681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1685 ring_last_seqno(struct intel_ring_buffer *ring)
1687 return list_entry(ring->request_list.prev,
1688 struct drm_i915_gem_request, list)->seqno;
1691 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1693 if (list_empty(&ring->request_list) ||
1694 i915_seqno_passed(ring->get_seqno(ring, false),
1695 ring_last_seqno(ring))) {
1696 /* Issue a wake-up to catch stuck h/w. */
1697 if (waitqueue_active(&ring->irq_queue)) {
1698 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1700 wake_up_all(&ring->irq_queue);
1708 static bool kick_ring(struct intel_ring_buffer *ring)
1710 struct drm_device *dev = ring->dev;
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 u32 tmp = I915_READ_CTL(ring);
1713 if (tmp & RING_WAIT) {
1714 DRM_ERROR("Kicking stuck wait on %s\n",
1716 I915_WRITE_CTL(ring, tmp);
1722 static bool i915_hangcheck_hung(struct drm_device *dev)
1724 drm_i915_private_t *dev_priv = dev->dev_private;
1726 if (dev_priv->hangcheck_count++ > 1) {
1729 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1730 i915_handle_error(dev, true);
1732 if (!IS_GEN2(dev)) {
1733 struct intel_ring_buffer *ring;
1736 /* Is the chip hanging on a WAIT_FOR_EVENT?
1737 * If so we can simply poke the RB_WAIT bit
1738 * and break the hang. This should work on
1739 * all but the second generation chipsets.
1741 for_each_ring(ring, dev_priv, i)
1742 hung &= !kick_ring(ring);
1752 * This is called when the chip hasn't reported back with completed
1753 * batchbuffers in a long time. The first time this is called we simply record
1754 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1755 * again, we assume the chip is wedged and try to fix it.
1757 void i915_hangcheck_elapsed(unsigned long data)
1759 struct drm_device *dev = (struct drm_device *)data;
1760 drm_i915_private_t *dev_priv = dev->dev_private;
1761 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1762 struct intel_ring_buffer *ring;
1763 bool err = false, idle;
1766 if (!i915_enable_hangcheck)
1769 memset(acthd, 0, sizeof(acthd));
1771 for_each_ring(ring, dev_priv, i) {
1772 idle &= i915_hangcheck_ring_idle(ring, &err);
1773 acthd[i] = intel_ring_get_active_head(ring);
1776 /* If all work is done then ACTHD clearly hasn't advanced. */
1779 if (i915_hangcheck_hung(dev))
1785 dev_priv->hangcheck_count = 0;
1789 i915_get_extra_instdone(dev, instdone);
1790 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1791 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1792 if (i915_hangcheck_hung(dev))
1795 dev_priv->hangcheck_count = 0;
1797 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1798 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1802 /* Reset timer case chip hangs without another request being added */
1803 mod_timer(&dev_priv->hangcheck_timer,
1804 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1809 static void ironlake_irq_preinstall(struct drm_device *dev)
1811 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1813 atomic_set(&dev_priv->irq_received, 0);
1815 I915_WRITE(HWSTAM, 0xeffe);
1817 /* XXX hotplug from PCH */
1819 I915_WRITE(DEIMR, 0xffffffff);
1820 I915_WRITE(DEIER, 0x0);
1821 POSTING_READ(DEIER);
1824 I915_WRITE(GTIMR, 0xffffffff);
1825 I915_WRITE(GTIER, 0x0);
1826 POSTING_READ(GTIER);
1828 /* south display irq */
1829 I915_WRITE(SDEIMR, 0xffffffff);
1830 I915_WRITE(SDEIER, 0x0);
1831 POSTING_READ(SDEIER);
1834 static void valleyview_irq_preinstall(struct drm_device *dev)
1836 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1839 atomic_set(&dev_priv->irq_received, 0);
1842 I915_WRITE(VLV_IMR, 0);
1843 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1844 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1845 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1848 I915_WRITE(GTIIR, I915_READ(GTIIR));
1849 I915_WRITE(GTIIR, I915_READ(GTIIR));
1850 I915_WRITE(GTIMR, 0xffffffff);
1851 I915_WRITE(GTIER, 0x0);
1852 POSTING_READ(GTIER);
1854 I915_WRITE(DPINVGTT, 0xff);
1856 I915_WRITE(PORT_HOTPLUG_EN, 0);
1857 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1859 I915_WRITE(PIPESTAT(pipe), 0xffff);
1860 I915_WRITE(VLV_IIR, 0xffffffff);
1861 I915_WRITE(VLV_IMR, 0xffffffff);
1862 I915_WRITE(VLV_IER, 0x0);
1863 POSTING_READ(VLV_IER);
1867 * Enable digital hotplug on the PCH, and configure the DP short pulse
1868 * duration to 2ms (which is the minimum in the Display Port spec)
1870 * This register is the same on all known PCH chips.
1873 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1875 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1878 hotplug = I915_READ(PCH_PORT_HOTPLUG);
1879 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1880 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1881 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1882 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1883 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1886 static int ironlake_irq_postinstall(struct drm_device *dev)
1888 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1889 /* enable kind of interrupts always enabled */
1890 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1891 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1896 dev_priv->irq_mask = ~display_mask;
1898 /* should always can generate irq */
1899 I915_WRITE(DEIIR, I915_READ(DEIIR));
1900 I915_WRITE(DEIMR, dev_priv->irq_mask);
1901 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1902 POSTING_READ(DEIER);
1904 dev_priv->gt_irq_mask = ~0;
1906 I915_WRITE(GTIIR, I915_READ(GTIIR));
1907 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1912 GEN6_BSD_USER_INTERRUPT |
1913 GEN6_BLITTER_USER_INTERRUPT;
1918 GT_BSD_USER_INTERRUPT;
1919 I915_WRITE(GTIER, render_irqs);
1920 POSTING_READ(GTIER);
1922 if (HAS_PCH_CPT(dev)) {
1923 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1924 SDE_PORTB_HOTPLUG_CPT |
1925 SDE_PORTC_HOTPLUG_CPT |
1926 SDE_PORTD_HOTPLUG_CPT |
1930 hotplug_mask = (SDE_CRT_HOTPLUG |
1938 dev_priv->pch_irq_mask = ~hotplug_mask;
1940 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1941 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1942 I915_WRITE(SDEIER, hotplug_mask);
1943 POSTING_READ(SDEIER);
1945 ironlake_enable_pch_hotplug(dev);
1947 if (IS_IRONLAKE_M(dev)) {
1948 /* Clear & enable PCU event interrupts */
1949 I915_WRITE(DEIIR, DE_PCU_EVENT);
1950 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1951 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1957 static int ivybridge_irq_postinstall(struct drm_device *dev)
1959 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1960 /* enable kind of interrupts always enabled */
1962 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1963 DE_PLANEC_FLIP_DONE_IVB |
1964 DE_PLANEB_FLIP_DONE_IVB |
1965 DE_PLANEA_FLIP_DONE_IVB |
1966 DE_AUX_CHANNEL_A_IVB;
1970 dev_priv->irq_mask = ~display_mask;
1972 /* should always can generate irq */
1973 I915_WRITE(DEIIR, I915_READ(DEIIR));
1974 I915_WRITE(DEIMR, dev_priv->irq_mask);
1977 DE_PIPEC_VBLANK_IVB |
1978 DE_PIPEB_VBLANK_IVB |
1979 DE_PIPEA_VBLANK_IVB);
1980 POSTING_READ(DEIER);
1982 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1984 I915_WRITE(GTIIR, I915_READ(GTIIR));
1985 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1987 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1988 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1989 I915_WRITE(GTIER, render_irqs);
1990 POSTING_READ(GTIER);
1992 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1993 SDE_PORTB_HOTPLUG_CPT |
1994 SDE_PORTC_HOTPLUG_CPT |
1995 SDE_PORTD_HOTPLUG_CPT |
1998 dev_priv->pch_irq_mask = ~hotplug_mask;
2000 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2001 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
2002 I915_WRITE(SDEIER, hotplug_mask);
2003 POSTING_READ(SDEIER);
2005 ironlake_enable_pch_hotplug(dev);
2010 static int valleyview_irq_postinstall(struct drm_device *dev)
2012 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2014 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2018 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2019 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2020 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2021 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2022 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2025 *Leave vblank interrupts masked initially. enable/disable will
2026 * toggle them based on usage.
2028 dev_priv->irq_mask = (~enable_mask) |
2029 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2030 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2032 dev_priv->pipestat[0] = 0;
2033 dev_priv->pipestat[1] = 0;
2035 /* Hack for broken MSIs on VLV */
2036 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2037 pci_read_config_word(dev->pdev, 0x98, &msid);
2038 msid &= 0xff; /* mask out delivery bits */
2040 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2042 I915_WRITE(PORT_HOTPLUG_EN, 0);
2043 POSTING_READ(PORT_HOTPLUG_EN);
2045 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2046 I915_WRITE(VLV_IER, enable_mask);
2047 I915_WRITE(VLV_IIR, 0xffffffff);
2048 I915_WRITE(PIPESTAT(0), 0xffff);
2049 I915_WRITE(PIPESTAT(1), 0xffff);
2050 POSTING_READ(VLV_IER);
2052 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2053 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2054 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2056 I915_WRITE(VLV_IIR, 0xffffffff);
2057 I915_WRITE(VLV_IIR, 0xffffffff);
2059 I915_WRITE(GTIIR, I915_READ(GTIIR));
2060 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2062 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2063 GEN6_BLITTER_USER_INTERRUPT;
2064 I915_WRITE(GTIER, render_irqs);
2065 POSTING_READ(GTIER);
2067 /* ack & enable invalid PTE error interrupts */
2068 #if 0 /* FIXME: add support to irq handler for checking these bits */
2069 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2070 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2073 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2078 static void valleyview_hpd_irq_setup(struct drm_device *dev)
2080 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2081 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2083 /* Note HDMI and DP share bits */
2084 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2085 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2086 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2087 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2088 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2089 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2090 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2091 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2092 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2093 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2094 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2095 hotplug_en |= CRT_HOTPLUG_INT_EN;
2096 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2099 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2102 static void valleyview_irq_uninstall(struct drm_device *dev)
2104 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2111 I915_WRITE(PIPESTAT(pipe), 0xffff);
2113 I915_WRITE(HWSTAM, 0xffffffff);
2114 I915_WRITE(PORT_HOTPLUG_EN, 0);
2115 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2117 I915_WRITE(PIPESTAT(pipe), 0xffff);
2118 I915_WRITE(VLV_IIR, 0xffffffff);
2119 I915_WRITE(VLV_IMR, 0xffffffff);
2120 I915_WRITE(VLV_IER, 0x0);
2121 POSTING_READ(VLV_IER);
2124 static void ironlake_irq_uninstall(struct drm_device *dev)
2126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2131 I915_WRITE(HWSTAM, 0xffffffff);
2133 I915_WRITE(DEIMR, 0xffffffff);
2134 I915_WRITE(DEIER, 0x0);
2135 I915_WRITE(DEIIR, I915_READ(DEIIR));
2137 I915_WRITE(GTIMR, 0xffffffff);
2138 I915_WRITE(GTIER, 0x0);
2139 I915_WRITE(GTIIR, I915_READ(GTIIR));
2141 I915_WRITE(SDEIMR, 0xffffffff);
2142 I915_WRITE(SDEIER, 0x0);
2143 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2146 static void i8xx_irq_preinstall(struct drm_device * dev)
2148 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2151 atomic_set(&dev_priv->irq_received, 0);
2154 I915_WRITE(PIPESTAT(pipe), 0);
2155 I915_WRITE16(IMR, 0xffff);
2156 I915_WRITE16(IER, 0x0);
2157 POSTING_READ16(IER);
2160 static int i8xx_irq_postinstall(struct drm_device *dev)
2162 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2164 dev_priv->pipestat[0] = 0;
2165 dev_priv->pipestat[1] = 0;
2168 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2170 /* Unmask the interrupts that we always want on. */
2171 dev_priv->irq_mask =
2172 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2173 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2174 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2175 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2176 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2177 I915_WRITE16(IMR, dev_priv->irq_mask);
2180 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2181 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2182 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2183 I915_USER_INTERRUPT);
2184 POSTING_READ16(IER);
2189 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2191 struct drm_device *dev = (struct drm_device *) arg;
2192 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2195 unsigned long irqflags;
2199 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2200 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2202 atomic_inc(&dev_priv->irq_received);
2204 iir = I915_READ16(IIR);
2208 while (iir & ~flip_mask) {
2209 /* Can't rely on pipestat interrupt bit in iir as it might
2210 * have been cleared after the pipestat interrupt was received.
2211 * It doesn't set the bit in iir again, but it still produces
2212 * interrupts (for non-MSI).
2214 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2215 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2216 i915_handle_error(dev, false);
2218 for_each_pipe(pipe) {
2219 int reg = PIPESTAT(pipe);
2220 pipe_stats[pipe] = I915_READ(reg);
2223 * Clear the PIPE*STAT regs before the IIR
2225 if (pipe_stats[pipe] & 0x8000ffff) {
2226 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2227 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2229 I915_WRITE(reg, pipe_stats[pipe]);
2233 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2235 I915_WRITE16(IIR, iir & ~flip_mask);
2236 new_iir = I915_READ16(IIR); /* Flush posted writes */
2238 i915_update_dri1_breadcrumb(dev);
2240 if (iir & I915_USER_INTERRUPT)
2241 notify_ring(dev, &dev_priv->ring[RCS]);
2243 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2244 drm_handle_vblank(dev, 0)) {
2245 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2246 intel_prepare_page_flip(dev, 0);
2247 intel_finish_page_flip(dev, 0);
2248 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2252 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2253 drm_handle_vblank(dev, 1)) {
2254 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2255 intel_prepare_page_flip(dev, 1);
2256 intel_finish_page_flip(dev, 1);
2257 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2267 static void i8xx_irq_uninstall(struct drm_device * dev)
2269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2272 for_each_pipe(pipe) {
2273 /* Clear enable bits; then clear status bits */
2274 I915_WRITE(PIPESTAT(pipe), 0);
2275 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2277 I915_WRITE16(IMR, 0xffff);
2278 I915_WRITE16(IER, 0x0);
2279 I915_WRITE16(IIR, I915_READ16(IIR));
2282 static void i915_irq_preinstall(struct drm_device * dev)
2284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2287 atomic_set(&dev_priv->irq_received, 0);
2289 if (I915_HAS_HOTPLUG(dev)) {
2290 I915_WRITE(PORT_HOTPLUG_EN, 0);
2291 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2294 I915_WRITE16(HWSTAM, 0xeffe);
2296 I915_WRITE(PIPESTAT(pipe), 0);
2297 I915_WRITE(IMR, 0xffffffff);
2298 I915_WRITE(IER, 0x0);
2302 static int i915_irq_postinstall(struct drm_device *dev)
2304 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307 dev_priv->pipestat[0] = 0;
2308 dev_priv->pipestat[1] = 0;
2310 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2312 /* Unmask the interrupts that we always want on. */
2313 dev_priv->irq_mask =
2314 ~(I915_ASLE_INTERRUPT |
2315 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2316 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2317 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2318 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2319 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2322 I915_ASLE_INTERRUPT |
2323 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2324 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2325 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2326 I915_USER_INTERRUPT;
2328 if (I915_HAS_HOTPLUG(dev)) {
2329 I915_WRITE(PORT_HOTPLUG_EN, 0);
2330 POSTING_READ(PORT_HOTPLUG_EN);
2332 /* Enable in IER... */
2333 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2334 /* and unmask in IMR */
2335 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2338 I915_WRITE(IMR, dev_priv->irq_mask);
2339 I915_WRITE(IER, enable_mask);
2342 intel_opregion_enable_asle(dev);
2347 static void i915_hpd_irq_setup(struct drm_device *dev)
2349 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2352 if (I915_HAS_HOTPLUG(dev)) {
2353 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2355 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2356 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2357 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2358 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2359 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2360 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2361 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2362 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2363 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2364 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2365 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2366 hotplug_en |= CRT_HOTPLUG_INT_EN;
2367 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2370 /* Ignore TV since it's buggy */
2372 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2376 static irqreturn_t i915_irq_handler(int irq, void *arg)
2378 struct drm_device *dev = (struct drm_device *) arg;
2379 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2380 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2381 unsigned long irqflags;
2383 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2384 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2386 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2387 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2389 int pipe, ret = IRQ_NONE;
2391 atomic_inc(&dev_priv->irq_received);
2393 iir = I915_READ(IIR);
2395 bool irq_received = (iir & ~flip_mask) != 0;
2396 bool blc_event = false;
2398 /* Can't rely on pipestat interrupt bit in iir as it might
2399 * have been cleared after the pipestat interrupt was received.
2400 * It doesn't set the bit in iir again, but it still produces
2401 * interrupts (for non-MSI).
2403 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2404 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2405 i915_handle_error(dev, false);
2407 for_each_pipe(pipe) {
2408 int reg = PIPESTAT(pipe);
2409 pipe_stats[pipe] = I915_READ(reg);
2411 /* Clear the PIPE*STAT regs before the IIR */
2412 if (pipe_stats[pipe] & 0x8000ffff) {
2413 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2414 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2416 I915_WRITE(reg, pipe_stats[pipe]);
2417 irq_received = true;
2420 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2425 /* Consume port. Then clear IIR or we'll miss events */
2426 if ((I915_HAS_HOTPLUG(dev)) &&
2427 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2428 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2430 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2432 if (hotplug_status & dev_priv->hotplug_supported_mask)
2433 queue_work(dev_priv->wq,
2434 &dev_priv->hotplug_work);
2436 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2437 POSTING_READ(PORT_HOTPLUG_STAT);
2440 I915_WRITE(IIR, iir & ~flip_mask);
2441 new_iir = I915_READ(IIR); /* Flush posted writes */
2443 if (iir & I915_USER_INTERRUPT)
2444 notify_ring(dev, &dev_priv->ring[RCS]);
2446 for_each_pipe(pipe) {
2450 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2451 drm_handle_vblank(dev, pipe)) {
2452 if (iir & flip[plane]) {
2453 intel_prepare_page_flip(dev, plane);
2454 intel_finish_page_flip(dev, pipe);
2455 flip_mask &= ~flip[plane];
2459 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2463 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2464 intel_opregion_asle_intr(dev);
2466 /* With MSI, interrupts are only generated when iir
2467 * transitions from zero to nonzero. If another bit got
2468 * set while we were handling the existing iir bits, then
2469 * we would never get another interrupt.
2471 * This is fine on non-MSI as well, as if we hit this path
2472 * we avoid exiting the interrupt handler only to generate
2475 * Note that for MSI this could cause a stray interrupt report
2476 * if an interrupt landed in the time between writing IIR and
2477 * the posting read. This should be rare enough to never
2478 * trigger the 99% of 100,000 interrupts test for disabling
2483 } while (iir & ~flip_mask);
2485 i915_update_dri1_breadcrumb(dev);
2490 static void i915_irq_uninstall(struct drm_device * dev)
2492 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2495 if (I915_HAS_HOTPLUG(dev)) {
2496 I915_WRITE(PORT_HOTPLUG_EN, 0);
2497 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2500 I915_WRITE16(HWSTAM, 0xffff);
2501 for_each_pipe(pipe) {
2502 /* Clear enable bits; then clear status bits */
2503 I915_WRITE(PIPESTAT(pipe), 0);
2504 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2506 I915_WRITE(IMR, 0xffffffff);
2507 I915_WRITE(IER, 0x0);
2509 I915_WRITE(IIR, I915_READ(IIR));
2512 static void i965_irq_preinstall(struct drm_device * dev)
2514 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2517 atomic_set(&dev_priv->irq_received, 0);
2519 I915_WRITE(PORT_HOTPLUG_EN, 0);
2520 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2522 I915_WRITE(HWSTAM, 0xeffe);
2524 I915_WRITE(PIPESTAT(pipe), 0);
2525 I915_WRITE(IMR, 0xffffffff);
2526 I915_WRITE(IER, 0x0);
2530 static int i965_irq_postinstall(struct drm_device *dev)
2532 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2536 /* Unmask the interrupts that we always want on. */
2537 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2538 I915_DISPLAY_PORT_INTERRUPT |
2539 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2540 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2541 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2542 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2543 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2545 enable_mask = ~dev_priv->irq_mask;
2546 enable_mask |= I915_USER_INTERRUPT;
2549 enable_mask |= I915_BSD_USER_INTERRUPT;
2551 dev_priv->pipestat[0] = 0;
2552 dev_priv->pipestat[1] = 0;
2553 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2556 * Enable some error detection, note the instruction error mask
2557 * bit is reserved, so we leave it masked.
2560 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2561 GM45_ERROR_MEM_PRIV |
2562 GM45_ERROR_CP_PRIV |
2563 I915_ERROR_MEMORY_REFRESH);
2565 error_mask = ~(I915_ERROR_PAGE_TABLE |
2566 I915_ERROR_MEMORY_REFRESH);
2568 I915_WRITE(EMR, error_mask);
2570 I915_WRITE(IMR, dev_priv->irq_mask);
2571 I915_WRITE(IER, enable_mask);
2574 I915_WRITE(PORT_HOTPLUG_EN, 0);
2575 POSTING_READ(PORT_HOTPLUG_EN);
2577 intel_opregion_enable_asle(dev);
2582 static void i965_hpd_irq_setup(struct drm_device *dev)
2584 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2587 /* Note HDMI and DP share hotplug bits */
2589 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2590 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2591 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2592 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2593 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2594 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2596 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2597 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2598 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2599 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2601 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2602 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2603 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2604 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2606 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2607 hotplug_en |= CRT_HOTPLUG_INT_EN;
2609 /* Programming the CRT detection parameters tends
2610 to generate a spurious hotplug event about three
2611 seconds later. So just do it once.
2614 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2615 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2618 /* Ignore TV since it's buggy */
2620 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2623 static irqreturn_t i965_irq_handler(int irq, void *arg)
2625 struct drm_device *dev = (struct drm_device *) arg;
2626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2628 u32 pipe_stats[I915_MAX_PIPES];
2629 unsigned long irqflags;
2631 int ret = IRQ_NONE, pipe;
2633 atomic_inc(&dev_priv->irq_received);
2635 iir = I915_READ(IIR);
2638 bool blc_event = false;
2640 irq_received = iir != 0;
2642 /* Can't rely on pipestat interrupt bit in iir as it might
2643 * have been cleared after the pipestat interrupt was received.
2644 * It doesn't set the bit in iir again, but it still produces
2645 * interrupts (for non-MSI).
2647 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2648 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2649 i915_handle_error(dev, false);
2651 for_each_pipe(pipe) {
2652 int reg = PIPESTAT(pipe);
2653 pipe_stats[pipe] = I915_READ(reg);
2656 * Clear the PIPE*STAT regs before the IIR
2658 if (pipe_stats[pipe] & 0x8000ffff) {
2659 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2660 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2662 I915_WRITE(reg, pipe_stats[pipe]);
2666 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2673 /* Consume port. Then clear IIR or we'll miss events */
2674 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2675 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2677 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2679 if (hotplug_status & dev_priv->hotplug_supported_mask)
2680 queue_work(dev_priv->wq,
2681 &dev_priv->hotplug_work);
2683 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2684 I915_READ(PORT_HOTPLUG_STAT);
2687 I915_WRITE(IIR, iir);
2688 new_iir = I915_READ(IIR); /* Flush posted writes */
2690 if (iir & I915_USER_INTERRUPT)
2691 notify_ring(dev, &dev_priv->ring[RCS]);
2692 if (iir & I915_BSD_USER_INTERRUPT)
2693 notify_ring(dev, &dev_priv->ring[VCS]);
2695 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2696 intel_prepare_page_flip(dev, 0);
2698 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2699 intel_prepare_page_flip(dev, 1);
2701 for_each_pipe(pipe) {
2702 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2703 drm_handle_vblank(dev, pipe)) {
2704 i915_pageflip_stall_check(dev, pipe);
2705 intel_finish_page_flip(dev, pipe);
2708 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2713 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2714 intel_opregion_asle_intr(dev);
2716 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2717 gmbus_irq_handler(dev);
2719 /* With MSI, interrupts are only generated when iir
2720 * transitions from zero to nonzero. If another bit got
2721 * set while we were handling the existing iir bits, then
2722 * we would never get another interrupt.
2724 * This is fine on non-MSI as well, as if we hit this path
2725 * we avoid exiting the interrupt handler only to generate
2728 * Note that for MSI this could cause a stray interrupt report
2729 * if an interrupt landed in the time between writing IIR and
2730 * the posting read. This should be rare enough to never
2731 * trigger the 99% of 100,000 interrupts test for disabling
2737 i915_update_dri1_breadcrumb(dev);
2742 static void i965_irq_uninstall(struct drm_device * dev)
2744 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2750 I915_WRITE(PORT_HOTPLUG_EN, 0);
2751 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2753 I915_WRITE(HWSTAM, 0xffffffff);
2755 I915_WRITE(PIPESTAT(pipe), 0);
2756 I915_WRITE(IMR, 0xffffffff);
2757 I915_WRITE(IER, 0x0);
2760 I915_WRITE(PIPESTAT(pipe),
2761 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2762 I915_WRITE(IIR, I915_READ(IIR));
2765 void intel_irq_init(struct drm_device *dev)
2767 struct drm_i915_private *dev_priv = dev->dev_private;
2769 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2770 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2771 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2772 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2774 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2775 (unsigned long) dev);
2777 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2779 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2780 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2781 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2782 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2783 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2786 if (drm_core_check_feature(dev, DRIVER_MODESET))
2787 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2789 dev->driver->get_vblank_timestamp = NULL;
2790 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2792 if (IS_VALLEYVIEW(dev)) {
2793 dev->driver->irq_handler = valleyview_irq_handler;
2794 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2795 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2796 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2797 dev->driver->enable_vblank = valleyview_enable_vblank;
2798 dev->driver->disable_vblank = valleyview_disable_vblank;
2799 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2800 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2801 /* Share pre & uninstall handlers with ILK/SNB */
2802 dev->driver->irq_handler = ivybridge_irq_handler;
2803 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2804 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2805 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2806 dev->driver->enable_vblank = ivybridge_enable_vblank;
2807 dev->driver->disable_vblank = ivybridge_disable_vblank;
2808 } else if (HAS_PCH_SPLIT(dev)) {
2809 dev->driver->irq_handler = ironlake_irq_handler;
2810 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2811 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2812 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2813 dev->driver->enable_vblank = ironlake_enable_vblank;
2814 dev->driver->disable_vblank = ironlake_disable_vblank;
2816 if (INTEL_INFO(dev)->gen == 2) {
2817 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2818 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2819 dev->driver->irq_handler = i8xx_irq_handler;
2820 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2821 } else if (INTEL_INFO(dev)->gen == 3) {
2822 dev->driver->irq_preinstall = i915_irq_preinstall;
2823 dev->driver->irq_postinstall = i915_irq_postinstall;
2824 dev->driver->irq_uninstall = i915_irq_uninstall;
2825 dev->driver->irq_handler = i915_irq_handler;
2826 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2828 dev->driver->irq_preinstall = i965_irq_preinstall;
2829 dev->driver->irq_postinstall = i965_irq_postinstall;
2830 dev->driver->irq_uninstall = i965_irq_uninstall;
2831 dev->driver->irq_handler = i965_irq_handler;
2832 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
2834 dev->driver->enable_vblank = i915_enable_vblank;
2835 dev->driver->disable_vblank = i915_disable_vblank;
2839 void intel_hpd_init(struct drm_device *dev)
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2843 if (dev_priv->display.hpd_irq_setup)
2844 dev_priv->display.hpd_irq_setup(dev);