1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/sysrq.h>
30 #include <linux/slab.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #define MAX_NOPID ((u32)~0)
41 * Interrupts that are always left unmasked.
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
47 #define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
68 ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
71 dev_priv->gt_irq_mask_reg &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
73 (void) I915_READ(GTIMR);
78 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
81 dev_priv->gt_irq_mask_reg |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
83 (void) I915_READ(GTIMR);
87 /* For display hotplug interrupt */
89 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
91 if ((dev_priv->irq_mask_reg & mask) != 0) {
92 dev_priv->irq_mask_reg &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
94 (void) I915_READ(DEIMR);
99 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
101 if ((dev_priv->irq_mask_reg & mask) != mask) {
102 dev_priv->irq_mask_reg |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
104 (void) I915_READ(DEIMR);
109 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
111 if ((dev_priv->irq_mask_reg & mask) != 0) {
112 dev_priv->irq_mask_reg &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask_reg);
114 (void) I915_READ(IMR);
119 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
121 if ((dev_priv->irq_mask_reg & mask) != mask) {
122 dev_priv->irq_mask_reg |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask_reg);
124 (void) I915_READ(IMR);
129 i915_pipestat(int pipe)
139 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
141 if ((dev_priv->pipestat[pipe] & mask) != mask) {
142 u32 reg = i915_pipestat(pipe);
144 dev_priv->pipestat[pipe] |= mask;
145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
147 (void) I915_READ(reg);
152 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
154 if ((dev_priv->pipestat[pipe] & mask) != 0) {
155 u32 reg = i915_pipestat(pipe);
157 dev_priv->pipestat[pipe] &= ~mask;
158 I915_WRITE(reg, dev_priv->pipestat[pipe]);
159 (void) I915_READ(reg);
164 * intel_enable_asle - enable ASLE interrupt for OpRegion
166 void intel_enable_asle (struct drm_device *dev)
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
170 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE);
173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE);
176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE);
182 * i915_pipe_enabled - check if a pipe is enabled
184 * @pipe: pipe to check
186 * Reading certain registers when the pipe is disabled can hang the chip.
187 * Use this routine to make sure the PLL is running and the pipe is active
188 * before reading such registers if unsure.
191 i915_pipe_enabled(struct drm_device *dev, int pipe)
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
196 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
202 /* Called from drm generic code, passed a 'crtc', which
203 * we use as a pipe index
205 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
207 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208 unsigned long high_frame;
209 unsigned long low_frame;
210 u32 high1, high2, low, count;
212 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
213 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
215 if (!i915_pipe_enabled(dev, pipe)) {
216 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
222 * High & low register fields aren't synchronized, so make sure
223 * we get a low value that's stable across two reads of the high
227 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
228 PIPE_FRAME_HIGH_SHIFT);
229 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
230 PIPE_FRAME_LOW_SHIFT);
231 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
232 PIPE_FRAME_HIGH_SHIFT);
233 } while (high1 != high2);
235 count = (high1 << 8) | low;
240 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
245 if (!i915_pipe_enabled(dev, pipe)) {
246 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
251 return I915_READ(reg);
255 * Handle hotplug events outside the interrupt handler proper.
257 static void i915_hotplug_work_func(struct work_struct *work)
259 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
261 struct drm_device *dev = dev_priv->dev;
262 struct drm_mode_config *mode_config = &dev->mode_config;
263 struct drm_encoder *encoder;
265 if (mode_config->num_encoder) {
266 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
267 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
269 if (intel_encoder->hot_plug)
270 (*intel_encoder->hot_plug) (intel_encoder);
273 /* Just fire off a uevent and let userspace tell us what to do */
274 intelfb_hotplug(dev, false);
275 drm_sysfs_hotplug_event(dev);
278 static void i915_handle_rps_change(struct drm_device *dev)
280 drm_i915_private_t *dev_priv = dev->dev_private;
281 u32 busy_up, busy_down, max_avg, min_avg;
283 u8 new_delay = dev_priv->cur_delay;
285 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
286 busy_up = I915_READ(RCPREVBSYTUPAVG);
287 busy_down = I915_READ(RCPREVBSYTDNAVG);
288 max_avg = I915_READ(RCBMAXAVG);
289 min_avg = I915_READ(RCBMINAVG);
291 /* Handle RCS change request from hw */
292 if (busy_up > max_avg) {
293 if (dev_priv->cur_delay != dev_priv->max_delay)
294 new_delay = dev_priv->cur_delay - 1;
295 if (new_delay < dev_priv->max_delay)
296 new_delay = dev_priv->max_delay;
297 } else if (busy_down < min_avg) {
298 if (dev_priv->cur_delay != dev_priv->min_delay)
299 new_delay = dev_priv->cur_delay + 1;
300 if (new_delay > dev_priv->min_delay)
301 new_delay = dev_priv->min_delay;
304 DRM_DEBUG("rps change requested: %d -> %d\n",
305 dev_priv->cur_delay, new_delay);
307 rgvswctl = I915_READ(MEMSWCTL);
308 if (rgvswctl & MEMCTL_CMD_STS) {
309 DRM_ERROR("gpu busy, RCS change rejected\n");
310 return; /* still busy with another command */
313 /* Program the new state */
314 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
315 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
316 I915_WRITE(MEMSWCTL, rgvswctl);
317 POSTING_READ(MEMSWCTL);
319 rgvswctl |= MEMCTL_CMD_STS;
320 I915_WRITE(MEMSWCTL, rgvswctl);
322 dev_priv->cur_delay = new_delay;
324 DRM_DEBUG("rps changed\n");
329 irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
333 u32 de_iir, gt_iir, de_ier, pch_iir;
334 struct drm_i915_master_private *master_priv;
336 /* disable master interrupt before clearing iir */
337 de_ier = I915_READ(DEIER);
338 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
339 (void)I915_READ(DEIER);
341 de_iir = I915_READ(DEIIR);
342 gt_iir = I915_READ(GTIIR);
343 pch_iir = I915_READ(SDEIIR);
345 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
350 if (dev->primary->master) {
351 master_priv = dev->primary->master->driver_priv;
352 if (master_priv->sarea_priv)
353 master_priv->sarea_priv->last_dispatch =
354 READ_BREADCRUMB(dev_priv);
357 if (gt_iir & GT_USER_INTERRUPT) {
358 u32 seqno = i915_get_gem_seqno(dev);
359 dev_priv->mm.irq_gem_seqno = seqno;
360 trace_i915_gem_request_complete(dev, seqno);
361 DRM_WAKEUP(&dev_priv->irq_queue);
362 dev_priv->hangcheck_count = 0;
363 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
367 ironlake_opregion_gse_intr(dev);
369 if (de_iir & DE_PLANEA_FLIP_DONE) {
370 intel_prepare_page_flip(dev, 0);
371 intel_finish_page_flip(dev, 0);
374 if (de_iir & DE_PLANEB_FLIP_DONE) {
375 intel_prepare_page_flip(dev, 1);
376 intel_finish_page_flip(dev, 1);
379 if (de_iir & DE_PIPEA_VBLANK)
380 drm_handle_vblank(dev, 0);
382 if (de_iir & DE_PIPEB_VBLANK)
383 drm_handle_vblank(dev, 1);
385 /* check event from PCH */
386 if ((de_iir & DE_PCH_EVENT) &&
387 (pch_iir & SDE_HOTPLUG_MASK)) {
388 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
391 if (de_iir & DE_PCU_EVENT) {
392 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
393 i915_handle_rps_change(dev);
396 /* should clear PCH hotplug event before clear CPU irq */
397 I915_WRITE(SDEIIR, pch_iir);
398 I915_WRITE(GTIIR, gt_iir);
399 I915_WRITE(DEIIR, de_iir);
402 I915_WRITE(DEIER, de_ier);
403 (void)I915_READ(DEIER);
409 * i915_error_work_func - do process context error handling work
412 * Fire an error uevent so userspace can see that a hang or error
415 static void i915_error_work_func(struct work_struct *work)
417 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
419 struct drm_device *dev = dev_priv->dev;
420 char *error_event[] = { "ERROR=1", NULL };
421 char *reset_event[] = { "RESET=1", NULL };
422 char *reset_done_event[] = { "ERROR=0", NULL };
424 DRM_DEBUG_DRIVER("generating error event\n");
425 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
427 if (atomic_read(&dev_priv->mm.wedged)) {
429 DRM_DEBUG_DRIVER("resetting chip\n");
430 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
431 if (!i965_reset(dev, GDRST_RENDER)) {
432 atomic_set(&dev_priv->mm.wedged, 0);
433 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
436 DRM_DEBUG_DRIVER("reboot required\n");
441 static struct drm_i915_error_object *
442 i915_error_object_create(struct drm_device *dev,
443 struct drm_gem_object *src)
445 struct drm_i915_error_object *dst;
446 struct drm_i915_gem_object *src_priv;
447 int page, page_count;
452 src_priv = to_intel_bo(src);
453 if (src_priv->pages == NULL)
456 page_count = src->size / PAGE_SIZE;
458 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
462 for (page = 0; page < page_count; page++) {
463 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
466 s = kmap_atomic(src_priv->pages[page], KM_USER0);
467 memcpy(d, s, PAGE_SIZE);
468 kunmap_atomic(s, KM_USER0);
469 dst->pages[page] = d;
471 dst->page_count = page_count;
472 dst->gtt_offset = src_priv->gtt_offset;
478 kfree(dst->pages[page]);
484 i915_error_object_free(struct drm_i915_error_object *obj)
491 for (page = 0; page < obj->page_count; page++)
492 kfree(obj->pages[page]);
498 i915_error_state_free(struct drm_device *dev,
499 struct drm_i915_error_state *error)
501 i915_error_object_free(error->batchbuffer[0]);
502 i915_error_object_free(error->batchbuffer[1]);
503 i915_error_object_free(error->ringbuffer);
504 kfree(error->active_bo);
509 i915_get_bbaddr(struct drm_device *dev, u32 *ring)
513 if (IS_I830(dev) || IS_845G(dev))
514 cmd = MI_BATCH_BUFFER;
515 else if (IS_I965G(dev))
516 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
517 MI_BATCH_NON_SECURE_I965);
519 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
521 return ring[0] == cmd ? ring[1] : 0;
525 i915_ringbuffer_last_batch(struct drm_device *dev)
527 struct drm_i915_private *dev_priv = dev->dev_private;
531 /* Locate the current position in the ringbuffer and walk back
532 * to find the most recently dispatched batch buffer.
535 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
536 ring = (u32 *)(dev_priv->ring.virtual_start + head);
538 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
539 bbaddr = i915_get_bbaddr(dev, ring);
545 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
546 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
547 bbaddr = i915_get_bbaddr(dev, ring);
557 * i915_capture_error_state - capture an error record for later analysis
560 * Should be called when an error is detected (either a hang or an error
561 * interrupt) to capture error state from the time of the error. Fills
562 * out a structure which becomes available in debugfs for user level tools
565 static void i915_capture_error_state(struct drm_device *dev)
567 struct drm_i915_private *dev_priv = dev->dev_private;
568 struct drm_i915_gem_object *obj_priv;
569 struct drm_i915_error_state *error;
570 struct drm_gem_object *batchbuffer[2];
575 spin_lock_irqsave(&dev_priv->error_lock, flags);
576 error = dev_priv->first_error;
577 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
581 error = kmalloc(sizeof(*error), GFP_ATOMIC);
583 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
587 error->seqno = i915_get_gem_seqno(dev);
588 error->eir = I915_READ(EIR);
589 error->pgtbl_er = I915_READ(PGTBL_ER);
590 error->pipeastat = I915_READ(PIPEASTAT);
591 error->pipebstat = I915_READ(PIPEBSTAT);
592 error->instpm = I915_READ(INSTPM);
593 if (!IS_I965G(dev)) {
594 error->ipeir = I915_READ(IPEIR);
595 error->ipehr = I915_READ(IPEHR);
596 error->instdone = I915_READ(INSTDONE);
597 error->acthd = I915_READ(ACTHD);
600 error->ipeir = I915_READ(IPEIR_I965);
601 error->ipehr = I915_READ(IPEHR_I965);
602 error->instdone = I915_READ(INSTDONE_I965);
603 error->instps = I915_READ(INSTPS);
604 error->instdone1 = I915_READ(INSTDONE1);
605 error->acthd = I915_READ(ACTHD_I965);
606 error->bbaddr = I915_READ64(BB_ADDR);
609 bbaddr = i915_ringbuffer_last_batch(dev);
611 /* Grab the current batchbuffer, most likely to have crashed. */
612 batchbuffer[0] = NULL;
613 batchbuffer[1] = NULL;
615 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
616 struct drm_gem_object *obj = obj_priv->obj;
618 if (batchbuffer[0] == NULL &&
619 bbaddr >= obj_priv->gtt_offset &&
620 bbaddr < obj_priv->gtt_offset + obj->size)
621 batchbuffer[0] = obj;
623 if (batchbuffer[1] == NULL &&
624 error->acthd >= obj_priv->gtt_offset &&
625 error->acthd < obj_priv->gtt_offset + obj->size &&
626 batchbuffer[0] != obj)
627 batchbuffer[1] = obj;
632 /* We need to copy these to an anonymous buffer as the simplest
633 * method to avoid being overwritten by userpace.
635 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
636 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
638 /* Record the ringbuffer */
639 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
641 /* Record buffers on the active list. */
642 error->active_bo = NULL;
643 error->active_bo_count = 0;
646 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
649 if (error->active_bo) {
651 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
652 struct drm_gem_object *obj = obj_priv->obj;
654 error->active_bo[i].size = obj->size;
655 error->active_bo[i].name = obj->name;
656 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
657 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
658 error->active_bo[i].read_domains = obj->read_domains;
659 error->active_bo[i].write_domain = obj->write_domain;
660 error->active_bo[i].fence_reg = obj_priv->fence_reg;
661 error->active_bo[i].pinned = 0;
662 if (obj_priv->pin_count > 0)
663 error->active_bo[i].pinned = 1;
664 if (obj_priv->user_pin_count > 0)
665 error->active_bo[i].pinned = -1;
666 error->active_bo[i].tiling = obj_priv->tiling_mode;
667 error->active_bo[i].dirty = obj_priv->dirty;
668 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
673 error->active_bo_count = i;
676 do_gettimeofday(&error->time);
678 spin_lock_irqsave(&dev_priv->error_lock, flags);
679 if (dev_priv->first_error == NULL) {
680 dev_priv->first_error = error;
683 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
686 i915_error_state_free(dev, error);
689 void i915_destroy_error_state(struct drm_device *dev)
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 struct drm_i915_error_state *error;
694 spin_lock(&dev_priv->error_lock);
695 error = dev_priv->first_error;
696 dev_priv->first_error = NULL;
697 spin_unlock(&dev_priv->error_lock);
700 i915_error_state_free(dev, error);
704 * i915_handle_error - handle an error interrupt
707 * Do some basic checking of regsiter state at error interrupt time and
708 * dump it to the syslog. Also call i915_capture_error_state() to make
709 * sure we get a record and make it available in debugfs. Fire a uevent
710 * so userspace knows something bad happened (should trigger collection
711 * of a ring dump etc.).
713 static void i915_handle_error(struct drm_device *dev, bool wedged)
715 struct drm_i915_private *dev_priv = dev->dev_private;
716 u32 eir = I915_READ(EIR);
717 u32 pipea_stats = I915_READ(PIPEASTAT);
718 u32 pipeb_stats = I915_READ(PIPEBSTAT);
720 i915_capture_error_state(dev);
722 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
726 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
727 u32 ipeir = I915_READ(IPEIR_I965);
729 printk(KERN_ERR " IPEIR: 0x%08x\n",
730 I915_READ(IPEIR_I965));
731 printk(KERN_ERR " IPEHR: 0x%08x\n",
732 I915_READ(IPEHR_I965));
733 printk(KERN_ERR " INSTDONE: 0x%08x\n",
734 I915_READ(INSTDONE_I965));
735 printk(KERN_ERR " INSTPS: 0x%08x\n",
737 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
738 I915_READ(INSTDONE1));
739 printk(KERN_ERR " ACTHD: 0x%08x\n",
740 I915_READ(ACTHD_I965));
741 I915_WRITE(IPEIR_I965, ipeir);
742 (void)I915_READ(IPEIR_I965);
744 if (eir & GM45_ERROR_PAGE_TABLE) {
745 u32 pgtbl_err = I915_READ(PGTBL_ER);
746 printk(KERN_ERR "page table error\n");
747 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
749 I915_WRITE(PGTBL_ER, pgtbl_err);
750 (void)I915_READ(PGTBL_ER);
755 if (eir & I915_ERROR_PAGE_TABLE) {
756 u32 pgtbl_err = I915_READ(PGTBL_ER);
757 printk(KERN_ERR "page table error\n");
758 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
760 I915_WRITE(PGTBL_ER, pgtbl_err);
761 (void)I915_READ(PGTBL_ER);
765 if (eir & I915_ERROR_MEMORY_REFRESH) {
766 printk(KERN_ERR "memory refresh error\n");
767 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
769 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
771 /* pipestat has already been acked */
773 if (eir & I915_ERROR_INSTRUCTION) {
774 printk(KERN_ERR "instruction error\n");
775 printk(KERN_ERR " INSTPM: 0x%08x\n",
777 if (!IS_I965G(dev)) {
778 u32 ipeir = I915_READ(IPEIR);
780 printk(KERN_ERR " IPEIR: 0x%08x\n",
782 printk(KERN_ERR " IPEHR: 0x%08x\n",
784 printk(KERN_ERR " INSTDONE: 0x%08x\n",
785 I915_READ(INSTDONE));
786 printk(KERN_ERR " ACTHD: 0x%08x\n",
788 I915_WRITE(IPEIR, ipeir);
789 (void)I915_READ(IPEIR);
791 u32 ipeir = I915_READ(IPEIR_I965);
793 printk(KERN_ERR " IPEIR: 0x%08x\n",
794 I915_READ(IPEIR_I965));
795 printk(KERN_ERR " IPEHR: 0x%08x\n",
796 I915_READ(IPEHR_I965));
797 printk(KERN_ERR " INSTDONE: 0x%08x\n",
798 I915_READ(INSTDONE_I965));
799 printk(KERN_ERR " INSTPS: 0x%08x\n",
801 printk(KERN_ERR " INSTDONE1: 0x%08x\n",
802 I915_READ(INSTDONE1));
803 printk(KERN_ERR " ACTHD: 0x%08x\n",
804 I915_READ(ACTHD_I965));
805 I915_WRITE(IPEIR_I965, ipeir);
806 (void)I915_READ(IPEIR_I965);
810 I915_WRITE(EIR, eir);
811 (void)I915_READ(EIR);
812 eir = I915_READ(EIR);
815 * some errors might have become stuck,
818 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
819 I915_WRITE(EMR, I915_READ(EMR) | eir);
820 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
824 atomic_set(&dev_priv->mm.wedged, 1);
827 * Wakeup waiting processes so they don't hang
829 DRM_WAKEUP(&dev_priv->irq_queue);
832 queue_work(dev_priv->wq, &dev_priv->error_work);
835 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
837 struct drm_device *dev = (struct drm_device *) arg;
838 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
839 struct drm_i915_master_private *master_priv;
841 u32 pipea_stats, pipeb_stats;
845 unsigned long irqflags;
849 atomic_inc(&dev_priv->irq_received);
851 if (HAS_PCH_SPLIT(dev))
852 return ironlake_irq_handler(dev);
854 iir = I915_READ(IIR);
857 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
858 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
860 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
861 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
865 irq_received = iir != 0;
867 /* Can't rely on pipestat interrupt bit in iir as it might
868 * have been cleared after the pipestat interrupt was received.
869 * It doesn't set the bit in iir again, but it still produces
870 * interrupts (for non-MSI).
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 pipea_stats = I915_READ(PIPEASTAT);
874 pipeb_stats = I915_READ(PIPEBSTAT);
876 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
877 i915_handle_error(dev, false);
880 * Clear the PIPE(A|B)STAT regs before the IIR
882 if (pipea_stats & 0x8000ffff) {
883 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
884 DRM_DEBUG_DRIVER("pipe a underrun\n");
885 I915_WRITE(PIPEASTAT, pipea_stats);
889 if (pipeb_stats & 0x8000ffff) {
890 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
891 DRM_DEBUG_DRIVER("pipe b underrun\n");
892 I915_WRITE(PIPEBSTAT, pipeb_stats);
895 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
902 /* Consume port. Then clear IIR or we'll miss events */
903 if ((I915_HAS_HOTPLUG(dev)) &&
904 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
905 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
907 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
909 if (hotplug_status & dev_priv->hotplug_supported_mask)
910 queue_work(dev_priv->wq,
911 &dev_priv->hotplug_work);
913 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
914 I915_READ(PORT_HOTPLUG_STAT);
917 I915_WRITE(IIR, iir);
918 new_iir = I915_READ(IIR); /* Flush posted writes */
920 if (dev->primary->master) {
921 master_priv = dev->primary->master->driver_priv;
922 if (master_priv->sarea_priv)
923 master_priv->sarea_priv->last_dispatch =
924 READ_BREADCRUMB(dev_priv);
927 if (iir & I915_USER_INTERRUPT) {
928 u32 seqno = i915_get_gem_seqno(dev);
929 dev_priv->mm.irq_gem_seqno = seqno;
930 trace_i915_gem_request_complete(dev, seqno);
931 DRM_WAKEUP(&dev_priv->irq_queue);
932 dev_priv->hangcheck_count = 0;
933 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
936 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
937 intel_prepare_page_flip(dev, 0);
939 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
940 intel_prepare_page_flip(dev, 1);
942 if (pipea_stats & vblank_status) {
944 drm_handle_vblank(dev, 0);
945 intel_finish_page_flip(dev, 0);
948 if (pipeb_stats & vblank_status) {
950 drm_handle_vblank(dev, 1);
951 intel_finish_page_flip(dev, 1);
954 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
955 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
956 (iir & I915_ASLE_INTERRUPT))
957 opregion_asle_intr(dev);
959 /* With MSI, interrupts are only generated when iir
960 * transitions from zero to nonzero. If another bit got
961 * set while we were handling the existing iir bits, then
962 * we would never get another interrupt.
964 * This is fine on non-MSI as well, as if we hit this path
965 * we avoid exiting the interrupt handler only to generate
968 * Note that for MSI this could cause a stray interrupt report
969 * if an interrupt landed in the time between writing IIR and
970 * the posting read. This should be rare enough to never
971 * trigger the 99% of 100,000 interrupts test for disabling
980 static int i915_emit_irq(struct drm_device * dev)
982 drm_i915_private_t *dev_priv = dev->dev_private;
983 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
986 i915_kernel_lost_context(dev);
988 DRM_DEBUG_DRIVER("\n");
991 if (dev_priv->counter > 0x7FFFFFFFUL)
992 dev_priv->counter = 1;
993 if (master_priv->sarea_priv)
994 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
997 OUT_RING(MI_STORE_DWORD_INDEX);
998 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
999 OUT_RING(dev_priv->counter);
1000 OUT_RING(MI_USER_INTERRUPT);
1003 return dev_priv->counter;
1006 void i915_user_irq_get(struct drm_device *dev)
1008 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1009 unsigned long irqflags;
1011 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1012 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1013 if (HAS_PCH_SPLIT(dev))
1014 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1016 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1018 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1021 void i915_user_irq_put(struct drm_device *dev)
1023 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1024 unsigned long irqflags;
1026 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1027 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1028 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1029 if (HAS_PCH_SPLIT(dev))
1030 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1032 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1034 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1037 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1039 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1041 if (dev_priv->trace_irq_seqno == 0)
1042 i915_user_irq_get(dev);
1044 dev_priv->trace_irq_seqno = seqno;
1047 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1049 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1050 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1053 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1054 READ_BREADCRUMB(dev_priv));
1056 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
1057 if (master_priv->sarea_priv)
1058 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1062 if (master_priv->sarea_priv)
1063 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1065 i915_user_irq_get(dev);
1066 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
1067 READ_BREADCRUMB(dev_priv) >= irq_nr);
1068 i915_user_irq_put(dev);
1070 if (ret == -EBUSY) {
1071 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1072 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
1078 /* Needs the lock as it touches the ring.
1080 int i915_irq_emit(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv)
1083 drm_i915_private_t *dev_priv = dev->dev_private;
1084 drm_i915_irq_emit_t *emit = data;
1087 if (!dev_priv || !dev_priv->ring.virtual_start) {
1088 DRM_ERROR("called with no initialization\n");
1092 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
1094 mutex_lock(&dev->struct_mutex);
1095 result = i915_emit_irq(dev);
1096 mutex_unlock(&dev->struct_mutex);
1098 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
1099 DRM_ERROR("copy_to_user\n");
1106 /* Doesn't need the hardware lock.
1108 int i915_irq_wait(struct drm_device *dev, void *data,
1109 struct drm_file *file_priv)
1111 drm_i915_private_t *dev_priv = dev->dev_private;
1112 drm_i915_irq_wait_t *irqwait = data;
1115 DRM_ERROR("called with no initialization\n");
1119 return i915_wait_irq(dev, irqwait->irq_seq);
1122 /* Called from drm generic code, passed 'crtc' which
1123 * we use as a pipe index
1125 int i915_enable_vblank(struct drm_device *dev, int pipe)
1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1128 unsigned long irqflags;
1129 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
1132 pipeconf = I915_READ(pipeconf_reg);
1133 if (!(pipeconf & PIPEACONF_ENABLE))
1136 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1137 if (HAS_PCH_SPLIT(dev))
1138 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1139 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1140 else if (IS_I965G(dev))
1141 i915_enable_pipestat(dev_priv, pipe,
1142 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1144 i915_enable_pipestat(dev_priv, pipe,
1145 PIPE_VBLANK_INTERRUPT_ENABLE);
1146 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1150 /* Called from drm generic code, passed 'crtc' which
1151 * we use as a pipe index
1153 void i915_disable_vblank(struct drm_device *dev, int pipe)
1155 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1156 unsigned long irqflags;
1158 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1159 if (HAS_PCH_SPLIT(dev))
1160 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1161 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1163 i915_disable_pipestat(dev_priv, pipe,
1164 PIPE_VBLANK_INTERRUPT_ENABLE |
1165 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1166 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1169 void i915_enable_interrupt (struct drm_device *dev)
1171 struct drm_i915_private *dev_priv = dev->dev_private;
1173 if (!HAS_PCH_SPLIT(dev))
1174 opregion_enable_asle(dev);
1175 dev_priv->irq_enabled = 1;
1179 /* Set the vblank monitor pipe
1181 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1182 struct drm_file *file_priv)
1184 drm_i915_private_t *dev_priv = dev->dev_private;
1187 DRM_ERROR("called with no initialization\n");
1194 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1195 struct drm_file *file_priv)
1197 drm_i915_private_t *dev_priv = dev->dev_private;
1198 drm_i915_vblank_pipe_t *pipe = data;
1201 DRM_ERROR("called with no initialization\n");
1205 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1211 * Schedule buffer swap at given vertical blank.
1213 int i915_vblank_swap(struct drm_device *dev, void *data,
1214 struct drm_file *file_priv)
1216 /* The delayed swap mechanism was fundamentally racy, and has been
1217 * removed. The model was that the client requested a delayed flip/swap
1218 * from the kernel, then waited for vblank before continuing to perform
1219 * rendering. The problem was that the kernel might wake the client
1220 * up before it dispatched the vblank swap (since the lock has to be
1221 * held while touching the ringbuffer), in which case the client would
1222 * clear and start the next frame before the swap occurred, and
1223 * flicker would occur in addition to likely missing the vblank.
1225 * In the absence of this ioctl, userland falls back to a correct path
1226 * of waiting for a vblank, then dispatching the swap on its own.
1227 * Context switching to userland and back is plenty fast enough for
1228 * meeting the requirements of vblank swapping.
1233 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
1234 drm_i915_private_t *dev_priv = dev->dev_private;
1235 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
1239 * This is called when the chip hasn't reported back with completed
1240 * batchbuffers in a long time. The first time this is called we simply record
1241 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1242 * again, we assume the chip is wedged and try to fix it.
1244 void i915_hangcheck_elapsed(unsigned long data)
1246 struct drm_device *dev = (struct drm_device *)data;
1247 drm_i915_private_t *dev_priv = dev->dev_private;
1250 /* No reset support on this chip yet. */
1255 acthd = I915_READ(ACTHD);
1257 acthd = I915_READ(ACTHD_I965);
1259 /* If all work is done then ACTHD clearly hasn't advanced. */
1260 if (list_empty(&dev_priv->mm.request_list) ||
1261 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
1262 dev_priv->hangcheck_count = 0;
1266 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
1267 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1268 i915_handle_error(dev, true);
1272 /* Reset timer case chip hangs without another request being added */
1273 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1275 if (acthd != dev_priv->last_acthd)
1276 dev_priv->hangcheck_count = 0;
1278 dev_priv->hangcheck_count++;
1280 dev_priv->last_acthd = acthd;
1285 static void ironlake_irq_preinstall(struct drm_device *dev)
1287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1289 I915_WRITE(HWSTAM, 0xeffe);
1291 /* XXX hotplug from PCH */
1293 I915_WRITE(DEIMR, 0xffffffff);
1294 I915_WRITE(DEIER, 0x0);
1295 (void) I915_READ(DEIER);
1298 I915_WRITE(GTIMR, 0xffffffff);
1299 I915_WRITE(GTIER, 0x0);
1300 (void) I915_READ(GTIER);
1302 /* south display irq */
1303 I915_WRITE(SDEIMR, 0xffffffff);
1304 I915_WRITE(SDEIER, 0x0);
1305 (void) I915_READ(SDEIER);
1308 static int ironlake_irq_postinstall(struct drm_device *dev)
1310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1311 /* enable kind of interrupts always enabled */
1312 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1313 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1314 u32 render_mask = GT_USER_INTERRUPT;
1315 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1316 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1318 dev_priv->irq_mask_reg = ~display_mask;
1319 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1321 /* should always can generate irq */
1322 I915_WRITE(DEIIR, I915_READ(DEIIR));
1323 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1324 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1325 (void) I915_READ(DEIER);
1327 /* user interrupt should be enabled, but masked initial */
1328 dev_priv->gt_irq_mask_reg = 0xffffffff;
1329 dev_priv->gt_irq_enable_reg = render_mask;
1331 I915_WRITE(GTIIR, I915_READ(GTIIR));
1332 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1333 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1334 (void) I915_READ(GTIER);
1336 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1337 dev_priv->pch_irq_enable_reg = hotplug_mask;
1339 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1340 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1341 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1342 (void) I915_READ(SDEIER);
1344 if (IS_IRONLAKE_M(dev)) {
1345 /* Clear & enable PCU event interrupts */
1346 I915_WRITE(DEIIR, DE_PCU_EVENT);
1347 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1348 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1354 void i915_driver_irq_preinstall(struct drm_device * dev)
1356 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1358 atomic_set(&dev_priv->irq_received, 0);
1360 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1361 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1363 if (HAS_PCH_SPLIT(dev)) {
1364 ironlake_irq_preinstall(dev);
1368 if (I915_HAS_HOTPLUG(dev)) {
1369 I915_WRITE(PORT_HOTPLUG_EN, 0);
1370 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1373 I915_WRITE(HWSTAM, 0xeffe);
1374 I915_WRITE(PIPEASTAT, 0);
1375 I915_WRITE(PIPEBSTAT, 0);
1376 I915_WRITE(IMR, 0xffffffff);
1377 I915_WRITE(IER, 0x0);
1378 (void) I915_READ(IER);
1382 * Must be called after intel_modeset_init or hotplug interrupts won't be
1383 * enabled correctly.
1385 int i915_driver_irq_postinstall(struct drm_device *dev)
1387 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1388 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1391 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1393 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1395 if (HAS_PCH_SPLIT(dev))
1396 return ironlake_irq_postinstall(dev);
1398 /* Unmask the interrupts that we always want on. */
1399 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1401 dev_priv->pipestat[0] = 0;
1402 dev_priv->pipestat[1] = 0;
1404 if (I915_HAS_HOTPLUG(dev)) {
1405 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1407 /* Note HDMI and DP share bits */
1408 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1409 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1410 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1411 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1412 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1413 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1414 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1415 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1416 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1417 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1418 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1419 hotplug_en |= CRT_HOTPLUG_INT_EN;
1420 /* Ignore TV since it's buggy */
1422 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1424 /* Enable in IER... */
1425 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1426 /* and unmask in IMR */
1427 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1431 * Enable some error detection, note the instruction error mask
1432 * bit is reserved, so we leave it masked.
1435 error_mask = ~(GM45_ERROR_PAGE_TABLE |
1436 GM45_ERROR_MEM_PRIV |
1437 GM45_ERROR_CP_PRIV |
1438 I915_ERROR_MEMORY_REFRESH);
1440 error_mask = ~(I915_ERROR_PAGE_TABLE |
1441 I915_ERROR_MEMORY_REFRESH);
1443 I915_WRITE(EMR, error_mask);
1445 /* Disable pipe interrupt enables, clear pending pipe status */
1446 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1447 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1448 /* Clear pending interrupt status */
1449 I915_WRITE(IIR, I915_READ(IIR));
1451 I915_WRITE(IER, enable_mask);
1452 I915_WRITE(IMR, dev_priv->irq_mask_reg);
1453 (void) I915_READ(IER);
1455 opregion_enable_asle(dev);
1460 static void ironlake_irq_uninstall(struct drm_device *dev)
1462 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1463 I915_WRITE(HWSTAM, 0xffffffff);
1465 I915_WRITE(DEIMR, 0xffffffff);
1466 I915_WRITE(DEIER, 0x0);
1467 I915_WRITE(DEIIR, I915_READ(DEIIR));
1469 I915_WRITE(GTIMR, 0xffffffff);
1470 I915_WRITE(GTIER, 0x0);
1471 I915_WRITE(GTIIR, I915_READ(GTIIR));
1474 void i915_driver_irq_uninstall(struct drm_device * dev)
1476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1481 dev_priv->vblank_pipe = 0;
1483 if (HAS_PCH_SPLIT(dev)) {
1484 ironlake_irq_uninstall(dev);
1488 if (I915_HAS_HOTPLUG(dev)) {
1489 I915_WRITE(PORT_HOTPLUG_EN, 0);
1490 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1493 I915_WRITE(HWSTAM, 0xffffffff);
1494 I915_WRITE(PIPEASTAT, 0);
1495 I915_WRITE(PIPEBSTAT, 0);
1496 I915_WRITE(IMR, 0xffffffff);
1497 I915_WRITE(IER, 0x0);
1499 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1500 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1501 I915_WRITE(IIR, I915_READ(IIR));