]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 /* For display hotplug interrupt */
40 static void
41 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
42 {
43         if ((dev_priv->irq_mask & mask) != 0) {
44                 dev_priv->irq_mask &= ~mask;
45                 I915_WRITE(DEIMR, dev_priv->irq_mask);
46                 POSTING_READ(DEIMR);
47         }
48 }
49
50 static inline void
51 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
52 {
53         if ((dev_priv->irq_mask & mask) != mask) {
54                 dev_priv->irq_mask |= mask;
55                 I915_WRITE(DEIMR, dev_priv->irq_mask);
56                 POSTING_READ(DEIMR);
57         }
58 }
59
60 void
61 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
62 {
63         if ((dev_priv->pipestat[pipe] & mask) != mask) {
64                 u32 reg = PIPESTAT(pipe);
65
66                 dev_priv->pipestat[pipe] |= mask;
67                 /* Enable the interrupt, clear any pending status */
68                 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
69                 POSTING_READ(reg);
70         }
71 }
72
73 void
74 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
75 {
76         if ((dev_priv->pipestat[pipe] & mask) != 0) {
77                 u32 reg = PIPESTAT(pipe);
78
79                 dev_priv->pipestat[pipe] &= ~mask;
80                 I915_WRITE(reg, dev_priv->pipestat[pipe]);
81                 POSTING_READ(reg);
82         }
83 }
84
85 /**
86  * intel_enable_asle - enable ASLE interrupt for OpRegion
87  */
88 void intel_enable_asle(struct drm_device *dev)
89 {
90         drm_i915_private_t *dev_priv = dev->dev_private;
91         unsigned long irqflags;
92
93         /* FIXME: opregion/asle for VLV */
94         if (IS_VALLEYVIEW(dev))
95                 return;
96
97         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
98
99         if (HAS_PCH_SPLIT(dev))
100                 ironlake_enable_display_irq(dev_priv, DE_GSE);
101         else {
102                 i915_enable_pipestat(dev_priv, 1,
103                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
104                 if (INTEL_INFO(dev)->gen >= 4)
105                         i915_enable_pipestat(dev_priv, 0,
106                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
107         }
108
109         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
110 }
111
112 /**
113  * i915_pipe_enabled - check if a pipe is enabled
114  * @dev: DRM device
115  * @pipe: pipe to check
116  *
117  * Reading certain registers when the pipe is disabled can hang the chip.
118  * Use this routine to make sure the PLL is running and the pipe is active
119  * before reading such registers if unsure.
120  */
121 static int
122 i915_pipe_enabled(struct drm_device *dev, int pipe)
123 {
124         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
125         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
126                                                                       pipe);
127
128         return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
129 }
130
131 /* Called from drm generic code, passed a 'crtc', which
132  * we use as a pipe index
133  */
134 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
135 {
136         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
137         unsigned long high_frame;
138         unsigned long low_frame;
139         u32 high1, high2, low;
140
141         if (!i915_pipe_enabled(dev, pipe)) {
142                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
143                                 "pipe %c\n", pipe_name(pipe));
144                 return 0;
145         }
146
147         high_frame = PIPEFRAME(pipe);
148         low_frame = PIPEFRAMEPIXEL(pipe);
149
150         /*
151          * High & low register fields aren't synchronized, so make sure
152          * we get a low value that's stable across two reads of the high
153          * register.
154          */
155         do {
156                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
157                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
158                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
159         } while (high1 != high2);
160
161         high1 >>= PIPE_FRAME_HIGH_SHIFT;
162         low >>= PIPE_FRAME_LOW_SHIFT;
163         return (high1 << 8) | low;
164 }
165
166 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
167 {
168         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
169         int reg = PIPE_FRMCOUNT_GM45(pipe);
170
171         if (!i915_pipe_enabled(dev, pipe)) {
172                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
173                                  "pipe %c\n", pipe_name(pipe));
174                 return 0;
175         }
176
177         return I915_READ(reg);
178 }
179
180 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
181                              int *vpos, int *hpos)
182 {
183         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
184         u32 vbl = 0, position = 0;
185         int vbl_start, vbl_end, htotal, vtotal;
186         bool in_vbl = true;
187         int ret = 0;
188         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
189                                                                       pipe);
190
191         if (!i915_pipe_enabled(dev, pipe)) {
192                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
193                                  "pipe %c\n", pipe_name(pipe));
194                 return 0;
195         }
196
197         /* Get vtotal. */
198         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
199
200         if (INTEL_INFO(dev)->gen >= 4) {
201                 /* No obvious pixelcount register. Only query vertical
202                  * scanout position from Display scan line register.
203                  */
204                 position = I915_READ(PIPEDSL(pipe));
205
206                 /* Decode into vertical scanout position. Don't have
207                  * horizontal scanout position.
208                  */
209                 *vpos = position & 0x1fff;
210                 *hpos = 0;
211         } else {
212                 /* Have access to pixelcount since start of frame.
213                  * We can split this into vertical and horizontal
214                  * scanout position.
215                  */
216                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
217
218                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
219                 *vpos = position / htotal;
220                 *hpos = position - (*vpos * htotal);
221         }
222
223         /* Query vblank area. */
224         vbl = I915_READ(VBLANK(cpu_transcoder));
225
226         /* Test position against vblank region. */
227         vbl_start = vbl & 0x1fff;
228         vbl_end = (vbl >> 16) & 0x1fff;
229
230         if ((*vpos < vbl_start) || (*vpos > vbl_end))
231                 in_vbl = false;
232
233         /* Inside "upper part" of vblank area? Apply corrective offset: */
234         if (in_vbl && (*vpos >= vbl_start))
235                 *vpos = *vpos - vtotal;
236
237         /* Readouts valid? */
238         if (vbl > 0)
239                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
240
241         /* In vblank? */
242         if (in_vbl)
243                 ret |= DRM_SCANOUTPOS_INVBL;
244
245         return ret;
246 }
247
248 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
249                               int *max_error,
250                               struct timeval *vblank_time,
251                               unsigned flags)
252 {
253         struct drm_i915_private *dev_priv = dev->dev_private;
254         struct drm_crtc *crtc;
255
256         if (pipe < 0 || pipe >= dev_priv->num_pipe) {
257                 DRM_ERROR("Invalid crtc %d\n", pipe);
258                 return -EINVAL;
259         }
260
261         /* Get drm_crtc to timestamp: */
262         crtc = intel_get_crtc_for_pipe(dev, pipe);
263         if (crtc == NULL) {
264                 DRM_ERROR("Invalid crtc %d\n", pipe);
265                 return -EINVAL;
266         }
267
268         if (!crtc->enabled) {
269                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
270                 return -EBUSY;
271         }
272
273         /* Helper routine in DRM core does all the work: */
274         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
275                                                      vblank_time, flags,
276                                                      crtc);
277 }
278
279 /*
280  * Handle hotplug events outside the interrupt handler proper.
281  */
282 static void i915_hotplug_work_func(struct work_struct *work)
283 {
284         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
285                                                     hotplug_work);
286         struct drm_device *dev = dev_priv->dev;
287         struct drm_mode_config *mode_config = &dev->mode_config;
288         struct intel_encoder *encoder;
289
290         /* HPD irq before everything is fully set up. */
291         if (!dev_priv->enable_hotplug_processing)
292                 return;
293
294         mutex_lock(&mode_config->mutex);
295         DRM_DEBUG_KMS("running encoder hotplug functions\n");
296
297         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
298                 if (encoder->hot_plug)
299                         encoder->hot_plug(encoder);
300
301         mutex_unlock(&mode_config->mutex);
302
303         /* Just fire off a uevent and let userspace tell us what to do */
304         drm_helper_hpd_irq_event(dev);
305 }
306
307 static void ironlake_handle_rps_change(struct drm_device *dev)
308 {
309         drm_i915_private_t *dev_priv = dev->dev_private;
310         u32 busy_up, busy_down, max_avg, min_avg;
311         u8 new_delay;
312         unsigned long flags;
313
314         spin_lock_irqsave(&mchdev_lock, flags);
315
316         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
317
318         new_delay = dev_priv->ips.cur_delay;
319
320         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
321         busy_up = I915_READ(RCPREVBSYTUPAVG);
322         busy_down = I915_READ(RCPREVBSYTDNAVG);
323         max_avg = I915_READ(RCBMAXAVG);
324         min_avg = I915_READ(RCBMINAVG);
325
326         /* Handle RCS change request from hw */
327         if (busy_up > max_avg) {
328                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
329                         new_delay = dev_priv->ips.cur_delay - 1;
330                 if (new_delay < dev_priv->ips.max_delay)
331                         new_delay = dev_priv->ips.max_delay;
332         } else if (busy_down < min_avg) {
333                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
334                         new_delay = dev_priv->ips.cur_delay + 1;
335                 if (new_delay > dev_priv->ips.min_delay)
336                         new_delay = dev_priv->ips.min_delay;
337         }
338
339         if (ironlake_set_drps(dev, new_delay))
340                 dev_priv->ips.cur_delay = new_delay;
341
342         spin_unlock_irqrestore(&mchdev_lock, flags);
343
344         return;
345 }
346
347 static void notify_ring(struct drm_device *dev,
348                         struct intel_ring_buffer *ring)
349 {
350         struct drm_i915_private *dev_priv = dev->dev_private;
351
352         if (ring->obj == NULL)
353                 return;
354
355         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
356
357         wake_up_all(&ring->irq_queue);
358         if (i915_enable_hangcheck) {
359                 dev_priv->hangcheck_count = 0;
360                 mod_timer(&dev_priv->hangcheck_timer,
361                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
362         }
363 }
364
365 static void gen6_pm_rps_work(struct work_struct *work)
366 {
367         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
368                                                     rps.work);
369         u32 pm_iir, pm_imr;
370         u8 new_delay;
371
372         spin_lock_irq(&dev_priv->rps.lock);
373         pm_iir = dev_priv->rps.pm_iir;
374         dev_priv->rps.pm_iir = 0;
375         pm_imr = I915_READ(GEN6_PMIMR);
376         I915_WRITE(GEN6_PMIMR, 0);
377         spin_unlock_irq(&dev_priv->rps.lock);
378
379         if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
380                 return;
381
382         mutex_lock(&dev_priv->rps.hw_lock);
383
384         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
385                 new_delay = dev_priv->rps.cur_delay + 1;
386         else
387                 new_delay = dev_priv->rps.cur_delay - 1;
388
389         /* sysfs frequency interfaces may have snuck in while servicing the
390          * interrupt
391          */
392         if (!(new_delay > dev_priv->rps.max_delay ||
393               new_delay < dev_priv->rps.min_delay)) {
394                 gen6_set_rps(dev_priv->dev, new_delay);
395         }
396
397         mutex_unlock(&dev_priv->rps.hw_lock);
398 }
399
400
401 /**
402  * ivybridge_parity_work - Workqueue called when a parity error interrupt
403  * occurred.
404  * @work: workqueue struct
405  *
406  * Doesn't actually do anything except notify userspace. As a consequence of
407  * this event, userspace should try to remap the bad rows since statistically
408  * it is likely the same row is more likely to go bad again.
409  */
410 static void ivybridge_parity_work(struct work_struct *work)
411 {
412         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
413                                                     l3_parity.error_work);
414         u32 error_status, row, bank, subbank;
415         char *parity_event[5];
416         uint32_t misccpctl;
417         unsigned long flags;
418
419         /* We must turn off DOP level clock gating to access the L3 registers.
420          * In order to prevent a get/put style interface, acquire struct mutex
421          * any time we access those registers.
422          */
423         mutex_lock(&dev_priv->dev->struct_mutex);
424
425         misccpctl = I915_READ(GEN7_MISCCPCTL);
426         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
427         POSTING_READ(GEN7_MISCCPCTL);
428
429         error_status = I915_READ(GEN7_L3CDERRST1);
430         row = GEN7_PARITY_ERROR_ROW(error_status);
431         bank = GEN7_PARITY_ERROR_BANK(error_status);
432         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
433
434         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
435                                     GEN7_L3CDERRST1_ENABLE);
436         POSTING_READ(GEN7_L3CDERRST1);
437
438         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
439
440         spin_lock_irqsave(&dev_priv->irq_lock, flags);
441         dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
442         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
443         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
444
445         mutex_unlock(&dev_priv->dev->struct_mutex);
446
447         parity_event[0] = "L3_PARITY_ERROR=1";
448         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
449         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
450         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
451         parity_event[4] = NULL;
452
453         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
454                            KOBJ_CHANGE, parity_event);
455
456         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
457                   row, bank, subbank);
458
459         kfree(parity_event[3]);
460         kfree(parity_event[2]);
461         kfree(parity_event[1]);
462 }
463
464 static void ivybridge_handle_parity_error(struct drm_device *dev)
465 {
466         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
467         unsigned long flags;
468
469         if (!HAS_L3_GPU_CACHE(dev))
470                 return;
471
472         spin_lock_irqsave(&dev_priv->irq_lock, flags);
473         dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
474         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
475         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
476
477         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
478 }
479
480 static void snb_gt_irq_handler(struct drm_device *dev,
481                                struct drm_i915_private *dev_priv,
482                                u32 gt_iir)
483 {
484
485         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
486                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
487                 notify_ring(dev, &dev_priv->ring[RCS]);
488         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
489                 notify_ring(dev, &dev_priv->ring[VCS]);
490         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
491                 notify_ring(dev, &dev_priv->ring[BCS]);
492
493         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
494                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
495                       GT_RENDER_CS_ERROR_INTERRUPT)) {
496                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
497                 i915_handle_error(dev, false);
498         }
499
500         if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
501                 ivybridge_handle_parity_error(dev);
502 }
503
504 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
505                                 u32 pm_iir)
506 {
507         unsigned long flags;
508
509         /*
510          * IIR bits should never already be set because IMR should
511          * prevent an interrupt from being shown in IIR. The warning
512          * displays a case where we've unsafely cleared
513          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
514          * type is not a problem, it displays a problem in the logic.
515          *
516          * The mask bit in IMR is cleared by dev_priv->rps.work.
517          */
518
519         spin_lock_irqsave(&dev_priv->rps.lock, flags);
520         dev_priv->rps.pm_iir |= pm_iir;
521         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
522         POSTING_READ(GEN6_PMIMR);
523         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
524
525         queue_work(dev_priv->wq, &dev_priv->rps.work);
526 }
527
528 static void gmbus_irq_handler(struct drm_device *dev)
529 {
530         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
531
532         wake_up_all(&dev_priv->gmbus_wait_queue);
533 }
534
535 static void dp_aux_irq_handler(struct drm_device *dev)
536 {
537         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
538
539         wake_up_all(&dev_priv->gmbus_wait_queue);
540 }
541
542 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
543 {
544         struct drm_device *dev = (struct drm_device *) arg;
545         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
546         u32 iir, gt_iir, pm_iir;
547         irqreturn_t ret = IRQ_NONE;
548         unsigned long irqflags;
549         int pipe;
550         u32 pipe_stats[I915_MAX_PIPES];
551
552         atomic_inc(&dev_priv->irq_received);
553
554         while (true) {
555                 iir = I915_READ(VLV_IIR);
556                 gt_iir = I915_READ(GTIIR);
557                 pm_iir = I915_READ(GEN6_PMIIR);
558
559                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
560                         goto out;
561
562                 ret = IRQ_HANDLED;
563
564                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
565
566                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
567                 for_each_pipe(pipe) {
568                         int reg = PIPESTAT(pipe);
569                         pipe_stats[pipe] = I915_READ(reg);
570
571                         /*
572                          * Clear the PIPE*STAT regs before the IIR
573                          */
574                         if (pipe_stats[pipe] & 0x8000ffff) {
575                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
576                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
577                                                          pipe_name(pipe));
578                                 I915_WRITE(reg, pipe_stats[pipe]);
579                         }
580                 }
581                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
582
583                 for_each_pipe(pipe) {
584                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
585                                 drm_handle_vblank(dev, pipe);
586
587                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
588                                 intel_prepare_page_flip(dev, pipe);
589                                 intel_finish_page_flip(dev, pipe);
590                         }
591                 }
592
593                 /* Consume port.  Then clear IIR or we'll miss events */
594                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
595                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
596
597                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
598                                          hotplug_status);
599                         if (hotplug_status & dev_priv->hotplug_supported_mask)
600                                 queue_work(dev_priv->wq,
601                                            &dev_priv->hotplug_work);
602
603                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
604                         I915_READ(PORT_HOTPLUG_STAT);
605                 }
606
607                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
608                         gmbus_irq_handler(dev);
609
610                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
611                         gen6_queue_rps_work(dev_priv, pm_iir);
612
613                 I915_WRITE(GTIIR, gt_iir);
614                 I915_WRITE(GEN6_PMIIR, pm_iir);
615                 I915_WRITE(VLV_IIR, iir);
616         }
617
618 out:
619         return ret;
620 }
621
622 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
623 {
624         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
625         int pipe;
626
627         if (pch_iir & SDE_HOTPLUG_MASK)
628                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
629
630         if (pch_iir & SDE_AUDIO_POWER_MASK)
631                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
632                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
633                                  SDE_AUDIO_POWER_SHIFT);
634
635         if (pch_iir & SDE_AUX_MASK)
636                 dp_aux_irq_handler(dev);
637
638         if (pch_iir & SDE_GMBUS)
639                 gmbus_irq_handler(dev);
640
641         if (pch_iir & SDE_AUDIO_HDCP_MASK)
642                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
643
644         if (pch_iir & SDE_AUDIO_TRANS_MASK)
645                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
646
647         if (pch_iir & SDE_POISON)
648                 DRM_ERROR("PCH poison interrupt\n");
649
650         if (pch_iir & SDE_FDI_MASK)
651                 for_each_pipe(pipe)
652                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
653                                          pipe_name(pipe),
654                                          I915_READ(FDI_RX_IIR(pipe)));
655
656         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
657                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
658
659         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
660                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
661
662         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
663                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
664         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
665                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
666 }
667
668 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
669 {
670         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671         int pipe;
672
673         if (pch_iir & SDE_HOTPLUG_MASK_CPT)
674                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
675
676         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
677                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
678                                  (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
679                                  SDE_AUDIO_POWER_SHIFT_CPT);
680
681         if (pch_iir & SDE_AUX_MASK_CPT)
682                 dp_aux_irq_handler(dev);
683
684         if (pch_iir & SDE_GMBUS_CPT)
685                 gmbus_irq_handler(dev);
686
687         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
688                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
689
690         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
691                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
692
693         if (pch_iir & SDE_FDI_MASK_CPT)
694                 for_each_pipe(pipe)
695                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
696                                          pipe_name(pipe),
697                                          I915_READ(FDI_RX_IIR(pipe)));
698 }
699
700 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
701 {
702         struct drm_device *dev = (struct drm_device *) arg;
703         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
704         u32 de_iir, gt_iir, de_ier, pm_iir;
705         irqreturn_t ret = IRQ_NONE;
706         int i;
707
708         atomic_inc(&dev_priv->irq_received);
709
710         /* disable master interrupt before clearing iir  */
711         de_ier = I915_READ(DEIER);
712         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
713
714         gt_iir = I915_READ(GTIIR);
715         if (gt_iir) {
716                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
717                 I915_WRITE(GTIIR, gt_iir);
718                 ret = IRQ_HANDLED;
719         }
720
721         de_iir = I915_READ(DEIIR);
722         if (de_iir) {
723                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
724                         dp_aux_irq_handler(dev);
725
726                 if (de_iir & DE_GSE_IVB)
727                         intel_opregion_gse_intr(dev);
728
729                 for (i = 0; i < 3; i++) {
730                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
731                                 drm_handle_vblank(dev, i);
732                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
733                                 intel_prepare_page_flip(dev, i);
734                                 intel_finish_page_flip_plane(dev, i);
735                         }
736                 }
737
738                 /* check event from PCH */
739                 if (de_iir & DE_PCH_EVENT_IVB) {
740                         u32 pch_iir = I915_READ(SDEIIR);
741
742                         cpt_irq_handler(dev, pch_iir);
743
744                         /* clear PCH hotplug event before clear CPU irq */
745                         I915_WRITE(SDEIIR, pch_iir);
746                 }
747
748                 I915_WRITE(DEIIR, de_iir);
749                 ret = IRQ_HANDLED;
750         }
751
752         pm_iir = I915_READ(GEN6_PMIIR);
753         if (pm_iir) {
754                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
755                         gen6_queue_rps_work(dev_priv, pm_iir);
756                 I915_WRITE(GEN6_PMIIR, pm_iir);
757                 ret = IRQ_HANDLED;
758         }
759
760         I915_WRITE(DEIER, de_ier);
761         POSTING_READ(DEIER);
762
763         return ret;
764 }
765
766 static void ilk_gt_irq_handler(struct drm_device *dev,
767                                struct drm_i915_private *dev_priv,
768                                u32 gt_iir)
769 {
770         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
771                 notify_ring(dev, &dev_priv->ring[RCS]);
772         if (gt_iir & GT_BSD_USER_INTERRUPT)
773                 notify_ring(dev, &dev_priv->ring[VCS]);
774 }
775
776 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
777 {
778         struct drm_device *dev = (struct drm_device *) arg;
779         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
780         int ret = IRQ_NONE;
781         u32 de_iir, gt_iir, de_ier, pm_iir;
782
783         atomic_inc(&dev_priv->irq_received);
784
785         /* disable master interrupt before clearing iir  */
786         de_ier = I915_READ(DEIER);
787         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
788         POSTING_READ(DEIER);
789
790         de_iir = I915_READ(DEIIR);
791         gt_iir = I915_READ(GTIIR);
792         pm_iir = I915_READ(GEN6_PMIIR);
793
794         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
795                 goto done;
796
797         ret = IRQ_HANDLED;
798
799         if (IS_GEN5(dev))
800                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
801         else
802                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
803
804         if (de_iir & DE_AUX_CHANNEL_A)
805                 dp_aux_irq_handler(dev);
806
807         if (de_iir & DE_GSE)
808                 intel_opregion_gse_intr(dev);
809
810         if (de_iir & DE_PIPEA_VBLANK)
811                 drm_handle_vblank(dev, 0);
812
813         if (de_iir & DE_PIPEB_VBLANK)
814                 drm_handle_vblank(dev, 1);
815
816         if (de_iir & DE_PLANEA_FLIP_DONE) {
817                 intel_prepare_page_flip(dev, 0);
818                 intel_finish_page_flip_plane(dev, 0);
819         }
820
821         if (de_iir & DE_PLANEB_FLIP_DONE) {
822                 intel_prepare_page_flip(dev, 1);
823                 intel_finish_page_flip_plane(dev, 1);
824         }
825
826         /* check event from PCH */
827         if (de_iir & DE_PCH_EVENT) {
828                 u32 pch_iir = I915_READ(SDEIIR);
829
830                 if (HAS_PCH_CPT(dev))
831                         cpt_irq_handler(dev, pch_iir);
832                 else
833                         ibx_irq_handler(dev, pch_iir);
834
835                 /* should clear PCH hotplug event before clear CPU irq */
836                 I915_WRITE(SDEIIR, pch_iir);
837         }
838
839         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
840                 ironlake_handle_rps_change(dev);
841
842         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
843                 gen6_queue_rps_work(dev_priv, pm_iir);
844
845         I915_WRITE(GTIIR, gt_iir);
846         I915_WRITE(DEIIR, de_iir);
847         I915_WRITE(GEN6_PMIIR, pm_iir);
848
849 done:
850         I915_WRITE(DEIER, de_ier);
851         POSTING_READ(DEIER);
852
853         return ret;
854 }
855
856 /**
857  * i915_error_work_func - do process context error handling work
858  * @work: work struct
859  *
860  * Fire an error uevent so userspace can see that a hang or error
861  * was detected.
862  */
863 static void i915_error_work_func(struct work_struct *work)
864 {
865         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
866                                                     error_work);
867         struct drm_device *dev = dev_priv->dev;
868         char *error_event[] = { "ERROR=1", NULL };
869         char *reset_event[] = { "RESET=1", NULL };
870         char *reset_done_event[] = { "ERROR=0", NULL };
871
872         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
873
874         if (atomic_read(&dev_priv->mm.wedged)) {
875                 DRM_DEBUG_DRIVER("resetting chip\n");
876                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
877                 if (!i915_reset(dev)) {
878                         atomic_set(&dev_priv->mm.wedged, 0);
879                         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
880                 }
881                 complete_all(&dev_priv->error_completion);
882         }
883 }
884
885 /* NB: please notice the memset */
886 static void i915_get_extra_instdone(struct drm_device *dev,
887                                     uint32_t *instdone)
888 {
889         struct drm_i915_private *dev_priv = dev->dev_private;
890         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
891
892         switch(INTEL_INFO(dev)->gen) {
893         case 2:
894         case 3:
895                 instdone[0] = I915_READ(INSTDONE);
896                 break;
897         case 4:
898         case 5:
899         case 6:
900                 instdone[0] = I915_READ(INSTDONE_I965);
901                 instdone[1] = I915_READ(INSTDONE1);
902                 break;
903         default:
904                 WARN_ONCE(1, "Unsupported platform\n");
905         case 7:
906                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
907                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
908                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
909                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
910                 break;
911         }
912 }
913
914 #ifdef CONFIG_DEBUG_FS
915 static struct drm_i915_error_object *
916 i915_error_object_create(struct drm_i915_private *dev_priv,
917                          struct drm_i915_gem_object *src)
918 {
919         struct drm_i915_error_object *dst;
920         int i, count;
921         u32 reloc_offset;
922
923         if (src == NULL || src->pages == NULL)
924                 return NULL;
925
926         count = src->base.size / PAGE_SIZE;
927
928         dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
929         if (dst == NULL)
930                 return NULL;
931
932         reloc_offset = src->gtt_offset;
933         for (i = 0; i < count; i++) {
934                 unsigned long flags;
935                 void *d;
936
937                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
938                 if (d == NULL)
939                         goto unwind;
940
941                 local_irq_save(flags);
942                 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
943                     src->has_global_gtt_mapping) {
944                         void __iomem *s;
945
946                         /* Simply ignore tiling or any overlapping fence.
947                          * It's part of the error state, and this hopefully
948                          * captures what the GPU read.
949                          */
950
951                         s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
952                                                      reloc_offset);
953                         memcpy_fromio(d, s, PAGE_SIZE);
954                         io_mapping_unmap_atomic(s);
955                 } else if (src->stolen) {
956                         unsigned long offset;
957
958                         offset = dev_priv->mm.stolen_base;
959                         offset += src->stolen->start;
960                         offset += i << PAGE_SHIFT;
961
962                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
963                 } else {
964                         struct page *page;
965                         void *s;
966
967                         page = i915_gem_object_get_page(src, i);
968
969                         drm_clflush_pages(&page, 1);
970
971                         s = kmap_atomic(page);
972                         memcpy(d, s, PAGE_SIZE);
973                         kunmap_atomic(s);
974
975                         drm_clflush_pages(&page, 1);
976                 }
977                 local_irq_restore(flags);
978
979                 dst->pages[i] = d;
980
981                 reloc_offset += PAGE_SIZE;
982         }
983         dst->page_count = count;
984         dst->gtt_offset = src->gtt_offset;
985
986         return dst;
987
988 unwind:
989         while (i--)
990                 kfree(dst->pages[i]);
991         kfree(dst);
992         return NULL;
993 }
994
995 static void
996 i915_error_object_free(struct drm_i915_error_object *obj)
997 {
998         int page;
999
1000         if (obj == NULL)
1001                 return;
1002
1003         for (page = 0; page < obj->page_count; page++)
1004                 kfree(obj->pages[page]);
1005
1006         kfree(obj);
1007 }
1008
1009 void
1010 i915_error_state_free(struct kref *error_ref)
1011 {
1012         struct drm_i915_error_state *error = container_of(error_ref,
1013                                                           typeof(*error), ref);
1014         int i;
1015
1016         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1017                 i915_error_object_free(error->ring[i].batchbuffer);
1018                 i915_error_object_free(error->ring[i].ringbuffer);
1019                 kfree(error->ring[i].requests);
1020         }
1021
1022         kfree(error->active_bo);
1023         kfree(error->overlay);
1024         kfree(error);
1025 }
1026 static void capture_bo(struct drm_i915_error_buffer *err,
1027                        struct drm_i915_gem_object *obj)
1028 {
1029         err->size = obj->base.size;
1030         err->name = obj->base.name;
1031         err->rseqno = obj->last_read_seqno;
1032         err->wseqno = obj->last_write_seqno;
1033         err->gtt_offset = obj->gtt_offset;
1034         err->read_domains = obj->base.read_domains;
1035         err->write_domain = obj->base.write_domain;
1036         err->fence_reg = obj->fence_reg;
1037         err->pinned = 0;
1038         if (obj->pin_count > 0)
1039                 err->pinned = 1;
1040         if (obj->user_pin_count > 0)
1041                 err->pinned = -1;
1042         err->tiling = obj->tiling_mode;
1043         err->dirty = obj->dirty;
1044         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1045         err->ring = obj->ring ? obj->ring->id : -1;
1046         err->cache_level = obj->cache_level;
1047 }
1048
1049 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1050                              int count, struct list_head *head)
1051 {
1052         struct drm_i915_gem_object *obj;
1053         int i = 0;
1054
1055         list_for_each_entry(obj, head, mm_list) {
1056                 capture_bo(err++, obj);
1057                 if (++i == count)
1058                         break;
1059         }
1060
1061         return i;
1062 }
1063
1064 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1065                              int count, struct list_head *head)
1066 {
1067         struct drm_i915_gem_object *obj;
1068         int i = 0;
1069
1070         list_for_each_entry(obj, head, gtt_list) {
1071                 if (obj->pin_count == 0)
1072                         continue;
1073
1074                 capture_bo(err++, obj);
1075                 if (++i == count)
1076                         break;
1077         }
1078
1079         return i;
1080 }
1081
1082 static void i915_gem_record_fences(struct drm_device *dev,
1083                                    struct drm_i915_error_state *error)
1084 {
1085         struct drm_i915_private *dev_priv = dev->dev_private;
1086         int i;
1087
1088         /* Fences */
1089         switch (INTEL_INFO(dev)->gen) {
1090         case 7:
1091         case 6:
1092                 for (i = 0; i < 16; i++)
1093                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1094                 break;
1095         case 5:
1096         case 4:
1097                 for (i = 0; i < 16; i++)
1098                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1099                 break;
1100         case 3:
1101                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1102                         for (i = 0; i < 8; i++)
1103                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1104         case 2:
1105                 for (i = 0; i < 8; i++)
1106                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1107                 break;
1108
1109         default:
1110                 BUG();
1111         }
1112 }
1113
1114 static struct drm_i915_error_object *
1115 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1116                              struct intel_ring_buffer *ring)
1117 {
1118         struct drm_i915_gem_object *obj;
1119         u32 seqno;
1120
1121         if (!ring->get_seqno)
1122                 return NULL;
1123
1124         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1125                 u32 acthd = I915_READ(ACTHD);
1126
1127                 if (WARN_ON(ring->id != RCS))
1128                         return NULL;
1129
1130                 obj = ring->private;
1131                 if (acthd >= obj->gtt_offset &&
1132                     acthd < obj->gtt_offset + obj->base.size)
1133                         return i915_error_object_create(dev_priv, obj);
1134         }
1135
1136         seqno = ring->get_seqno(ring, false);
1137         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1138                 if (obj->ring != ring)
1139                         continue;
1140
1141                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1142                         continue;
1143
1144                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1145                         continue;
1146
1147                 /* We need to copy these to an anonymous buffer as the simplest
1148                  * method to avoid being overwritten by userspace.
1149                  */
1150                 return i915_error_object_create(dev_priv, obj);
1151         }
1152
1153         return NULL;
1154 }
1155
1156 static void i915_record_ring_state(struct drm_device *dev,
1157                                    struct drm_i915_error_state *error,
1158                                    struct intel_ring_buffer *ring)
1159 {
1160         struct drm_i915_private *dev_priv = dev->dev_private;
1161
1162         if (INTEL_INFO(dev)->gen >= 6) {
1163                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1164                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1165                 error->semaphore_mboxes[ring->id][0]
1166                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1167                 error->semaphore_mboxes[ring->id][1]
1168                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1169                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1170                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1171         }
1172
1173         if (INTEL_INFO(dev)->gen >= 4) {
1174                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1175                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1176                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1177                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1178                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1179                 if (ring->id == RCS)
1180                         error->bbaddr = I915_READ64(BB_ADDR);
1181         } else {
1182                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1183                 error->ipeir[ring->id] = I915_READ(IPEIR);
1184                 error->ipehr[ring->id] = I915_READ(IPEHR);
1185                 error->instdone[ring->id] = I915_READ(INSTDONE);
1186         }
1187
1188         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1189         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1190         error->seqno[ring->id] = ring->get_seqno(ring, false);
1191         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1192         error->head[ring->id] = I915_READ_HEAD(ring);
1193         error->tail[ring->id] = I915_READ_TAIL(ring);
1194
1195         error->cpu_ring_head[ring->id] = ring->head;
1196         error->cpu_ring_tail[ring->id] = ring->tail;
1197 }
1198
1199 static void i915_gem_record_rings(struct drm_device *dev,
1200                                   struct drm_i915_error_state *error)
1201 {
1202         struct drm_i915_private *dev_priv = dev->dev_private;
1203         struct intel_ring_buffer *ring;
1204         struct drm_i915_gem_request *request;
1205         int i, count;
1206
1207         for_each_ring(ring, dev_priv, i) {
1208                 i915_record_ring_state(dev, error, ring);
1209
1210                 error->ring[i].batchbuffer =
1211                         i915_error_first_batchbuffer(dev_priv, ring);
1212
1213                 error->ring[i].ringbuffer =
1214                         i915_error_object_create(dev_priv, ring->obj);
1215
1216                 count = 0;
1217                 list_for_each_entry(request, &ring->request_list, list)
1218                         count++;
1219
1220                 error->ring[i].num_requests = count;
1221                 error->ring[i].requests =
1222                         kmalloc(count*sizeof(struct drm_i915_error_request),
1223                                 GFP_ATOMIC);
1224                 if (error->ring[i].requests == NULL) {
1225                         error->ring[i].num_requests = 0;
1226                         continue;
1227                 }
1228
1229                 count = 0;
1230                 list_for_each_entry(request, &ring->request_list, list) {
1231                         struct drm_i915_error_request *erq;
1232
1233                         erq = &error->ring[i].requests[count++];
1234                         erq->seqno = request->seqno;
1235                         erq->jiffies = request->emitted_jiffies;
1236                         erq->tail = request->tail;
1237                 }
1238         }
1239 }
1240
1241 /**
1242  * i915_capture_error_state - capture an error record for later analysis
1243  * @dev: drm device
1244  *
1245  * Should be called when an error is detected (either a hang or an error
1246  * interrupt) to capture error state from the time of the error.  Fills
1247  * out a structure which becomes available in debugfs for user level tools
1248  * to pick up.
1249  */
1250 static void i915_capture_error_state(struct drm_device *dev)
1251 {
1252         struct drm_i915_private *dev_priv = dev->dev_private;
1253         struct drm_i915_gem_object *obj;
1254         struct drm_i915_error_state *error;
1255         unsigned long flags;
1256         int i, pipe;
1257
1258         spin_lock_irqsave(&dev_priv->error_lock, flags);
1259         error = dev_priv->first_error;
1260         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1261         if (error)
1262                 return;
1263
1264         /* Account for pipe specific data like PIPE*STAT */
1265         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1266         if (!error) {
1267                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1268                 return;
1269         }
1270
1271         DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1272                  dev->primary->index);
1273
1274         kref_init(&error->ref);
1275         error->eir = I915_READ(EIR);
1276         error->pgtbl_er = I915_READ(PGTBL_ER);
1277         error->ccid = I915_READ(CCID);
1278
1279         if (HAS_PCH_SPLIT(dev))
1280                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1281         else if (IS_VALLEYVIEW(dev))
1282                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1283         else if (IS_GEN2(dev))
1284                 error->ier = I915_READ16(IER);
1285         else
1286                 error->ier = I915_READ(IER);
1287
1288         for_each_pipe(pipe)
1289                 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1290
1291         if (INTEL_INFO(dev)->gen >= 6) {
1292                 error->error = I915_READ(ERROR_GEN6);
1293                 error->done_reg = I915_READ(DONE_REG);
1294         }
1295
1296         if (INTEL_INFO(dev)->gen == 7)
1297                 error->err_int = I915_READ(GEN7_ERR_INT);
1298
1299         i915_get_extra_instdone(dev, error->extra_instdone);
1300
1301         i915_gem_record_fences(dev, error);
1302         i915_gem_record_rings(dev, error);
1303
1304         /* Record buffers on the active and pinned lists. */
1305         error->active_bo = NULL;
1306         error->pinned_bo = NULL;
1307
1308         i = 0;
1309         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1310                 i++;
1311         error->active_bo_count = i;
1312         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1313                 if (obj->pin_count)
1314                         i++;
1315         error->pinned_bo_count = i - error->active_bo_count;
1316
1317         error->active_bo = NULL;
1318         error->pinned_bo = NULL;
1319         if (i) {
1320                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1321                                            GFP_ATOMIC);
1322                 if (error->active_bo)
1323                         error->pinned_bo =
1324                                 error->active_bo + error->active_bo_count;
1325         }
1326
1327         if (error->active_bo)
1328                 error->active_bo_count =
1329                         capture_active_bo(error->active_bo,
1330                                           error->active_bo_count,
1331                                           &dev_priv->mm.active_list);
1332
1333         if (error->pinned_bo)
1334                 error->pinned_bo_count =
1335                         capture_pinned_bo(error->pinned_bo,
1336                                           error->pinned_bo_count,
1337                                           &dev_priv->mm.bound_list);
1338
1339         do_gettimeofday(&error->time);
1340
1341         error->overlay = intel_overlay_capture_error_state(dev);
1342         error->display = intel_display_capture_error_state(dev);
1343
1344         spin_lock_irqsave(&dev_priv->error_lock, flags);
1345         if (dev_priv->first_error == NULL) {
1346                 dev_priv->first_error = error;
1347                 error = NULL;
1348         }
1349         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1350
1351         if (error)
1352                 i915_error_state_free(&error->ref);
1353 }
1354
1355 void i915_destroy_error_state(struct drm_device *dev)
1356 {
1357         struct drm_i915_private *dev_priv = dev->dev_private;
1358         struct drm_i915_error_state *error;
1359         unsigned long flags;
1360
1361         spin_lock_irqsave(&dev_priv->error_lock, flags);
1362         error = dev_priv->first_error;
1363         dev_priv->first_error = NULL;
1364         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1365
1366         if (error)
1367                 kref_put(&error->ref, i915_error_state_free);
1368 }
1369 #else
1370 #define i915_capture_error_state(x)
1371 #endif
1372
1373 static void i915_report_and_clear_eir(struct drm_device *dev)
1374 {
1375         struct drm_i915_private *dev_priv = dev->dev_private;
1376         uint32_t instdone[I915_NUM_INSTDONE_REG];
1377         u32 eir = I915_READ(EIR);
1378         int pipe, i;
1379
1380         if (!eir)
1381                 return;
1382
1383         pr_err("render error detected, EIR: 0x%08x\n", eir);
1384
1385         i915_get_extra_instdone(dev, instdone);
1386
1387         if (IS_G4X(dev)) {
1388                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1389                         u32 ipeir = I915_READ(IPEIR_I965);
1390
1391                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1392                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1393                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1394                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1395                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1396                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1397                         I915_WRITE(IPEIR_I965, ipeir);
1398                         POSTING_READ(IPEIR_I965);
1399                 }
1400                 if (eir & GM45_ERROR_PAGE_TABLE) {
1401                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1402                         pr_err("page table error\n");
1403                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1404                         I915_WRITE(PGTBL_ER, pgtbl_err);
1405                         POSTING_READ(PGTBL_ER);
1406                 }
1407         }
1408
1409         if (!IS_GEN2(dev)) {
1410                 if (eir & I915_ERROR_PAGE_TABLE) {
1411                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1412                         pr_err("page table error\n");
1413                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1414                         I915_WRITE(PGTBL_ER, pgtbl_err);
1415                         POSTING_READ(PGTBL_ER);
1416                 }
1417         }
1418
1419         if (eir & I915_ERROR_MEMORY_REFRESH) {
1420                 pr_err("memory refresh error:\n");
1421                 for_each_pipe(pipe)
1422                         pr_err("pipe %c stat: 0x%08x\n",
1423                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1424                 /* pipestat has already been acked */
1425         }
1426         if (eir & I915_ERROR_INSTRUCTION) {
1427                 pr_err("instruction error\n");
1428                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1429                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1430                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1431                 if (INTEL_INFO(dev)->gen < 4) {
1432                         u32 ipeir = I915_READ(IPEIR);
1433
1434                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1435                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1436                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1437                         I915_WRITE(IPEIR, ipeir);
1438                         POSTING_READ(IPEIR);
1439                 } else {
1440                         u32 ipeir = I915_READ(IPEIR_I965);
1441
1442                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1443                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1444                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1445                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1446                         I915_WRITE(IPEIR_I965, ipeir);
1447                         POSTING_READ(IPEIR_I965);
1448                 }
1449         }
1450
1451         I915_WRITE(EIR, eir);
1452         POSTING_READ(EIR);
1453         eir = I915_READ(EIR);
1454         if (eir) {
1455                 /*
1456                  * some errors might have become stuck,
1457                  * mask them.
1458                  */
1459                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1460                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1461                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1462         }
1463 }
1464
1465 /**
1466  * i915_handle_error - handle an error interrupt
1467  * @dev: drm device
1468  *
1469  * Do some basic checking of regsiter state at error interrupt time and
1470  * dump it to the syslog.  Also call i915_capture_error_state() to make
1471  * sure we get a record and make it available in debugfs.  Fire a uevent
1472  * so userspace knows something bad happened (should trigger collection
1473  * of a ring dump etc.).
1474  */
1475 void i915_handle_error(struct drm_device *dev, bool wedged)
1476 {
1477         struct drm_i915_private *dev_priv = dev->dev_private;
1478         struct intel_ring_buffer *ring;
1479         int i;
1480
1481         i915_capture_error_state(dev);
1482         i915_report_and_clear_eir(dev);
1483
1484         if (wedged) {
1485                 INIT_COMPLETION(dev_priv->error_completion);
1486                 atomic_set(&dev_priv->mm.wedged, 1);
1487
1488                 /*
1489                  * Wakeup waiting processes so they don't hang
1490                  */
1491                 for_each_ring(ring, dev_priv, i)
1492                         wake_up_all(&ring->irq_queue);
1493         }
1494
1495         queue_work(dev_priv->wq, &dev_priv->error_work);
1496 }
1497
1498 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1499 {
1500         drm_i915_private_t *dev_priv = dev->dev_private;
1501         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1502         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1503         struct drm_i915_gem_object *obj;
1504         struct intel_unpin_work *work;
1505         unsigned long flags;
1506         bool stall_detected;
1507
1508         /* Ignore early vblank irqs */
1509         if (intel_crtc == NULL)
1510                 return;
1511
1512         spin_lock_irqsave(&dev->event_lock, flags);
1513         work = intel_crtc->unpin_work;
1514
1515         if (work == NULL ||
1516             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1517             !work->enable_stall_check) {
1518                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1519                 spin_unlock_irqrestore(&dev->event_lock, flags);
1520                 return;
1521         }
1522
1523         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1524         obj = work->pending_flip_obj;
1525         if (INTEL_INFO(dev)->gen >= 4) {
1526                 int dspsurf = DSPSURF(intel_crtc->plane);
1527                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1528                                         obj->gtt_offset;
1529         } else {
1530                 int dspaddr = DSPADDR(intel_crtc->plane);
1531                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1532                                                         crtc->y * crtc->fb->pitches[0] +
1533                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1534         }
1535
1536         spin_unlock_irqrestore(&dev->event_lock, flags);
1537
1538         if (stall_detected) {
1539                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1540                 intel_prepare_page_flip(dev, intel_crtc->plane);
1541         }
1542 }
1543
1544 /* Called from drm generic code, passed 'crtc' which
1545  * we use as a pipe index
1546  */
1547 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1548 {
1549         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1550         unsigned long irqflags;
1551
1552         if (!i915_pipe_enabled(dev, pipe))
1553                 return -EINVAL;
1554
1555         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1556         if (INTEL_INFO(dev)->gen >= 4)
1557                 i915_enable_pipestat(dev_priv, pipe,
1558                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1559         else
1560                 i915_enable_pipestat(dev_priv, pipe,
1561                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1562
1563         /* maintain vblank delivery even in deep C-states */
1564         if (dev_priv->info->gen == 3)
1565                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1566         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1567
1568         return 0;
1569 }
1570
1571 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1572 {
1573         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1574         unsigned long irqflags;
1575
1576         if (!i915_pipe_enabled(dev, pipe))
1577                 return -EINVAL;
1578
1579         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1580         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1581                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1582         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1583
1584         return 0;
1585 }
1586
1587 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1588 {
1589         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1590         unsigned long irqflags;
1591
1592         if (!i915_pipe_enabled(dev, pipe))
1593                 return -EINVAL;
1594
1595         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1596         ironlake_enable_display_irq(dev_priv,
1597                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1598         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1599
1600         return 0;
1601 }
1602
1603 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1604 {
1605         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1606         unsigned long irqflags;
1607         u32 imr;
1608
1609         if (!i915_pipe_enabled(dev, pipe))
1610                 return -EINVAL;
1611
1612         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1613         imr = I915_READ(VLV_IMR);
1614         if (pipe == 0)
1615                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1616         else
1617                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1618         I915_WRITE(VLV_IMR, imr);
1619         i915_enable_pipestat(dev_priv, pipe,
1620                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1621         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1622
1623         return 0;
1624 }
1625
1626 /* Called from drm generic code, passed 'crtc' which
1627  * we use as a pipe index
1628  */
1629 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1630 {
1631         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1632         unsigned long irqflags;
1633
1634         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1635         if (dev_priv->info->gen == 3)
1636                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1637
1638         i915_disable_pipestat(dev_priv, pipe,
1639                               PIPE_VBLANK_INTERRUPT_ENABLE |
1640                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1641         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1642 }
1643
1644 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1645 {
1646         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1647         unsigned long irqflags;
1648
1649         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1650         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1651                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1652         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1653 }
1654
1655 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1656 {
1657         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1658         unsigned long irqflags;
1659
1660         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1661         ironlake_disable_display_irq(dev_priv,
1662                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1663         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1664 }
1665
1666 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1667 {
1668         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1669         unsigned long irqflags;
1670         u32 imr;
1671
1672         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1673         i915_disable_pipestat(dev_priv, pipe,
1674                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1675         imr = I915_READ(VLV_IMR);
1676         if (pipe == 0)
1677                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1678         else
1679                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1680         I915_WRITE(VLV_IMR, imr);
1681         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1682 }
1683
1684 static u32
1685 ring_last_seqno(struct intel_ring_buffer *ring)
1686 {
1687         return list_entry(ring->request_list.prev,
1688                           struct drm_i915_gem_request, list)->seqno;
1689 }
1690
1691 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1692 {
1693         if (list_empty(&ring->request_list) ||
1694             i915_seqno_passed(ring->get_seqno(ring, false),
1695                               ring_last_seqno(ring))) {
1696                 /* Issue a wake-up to catch stuck h/w. */
1697                 if (waitqueue_active(&ring->irq_queue)) {
1698                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1699                                   ring->name);
1700                         wake_up_all(&ring->irq_queue);
1701                         *err = true;
1702                 }
1703                 return true;
1704         }
1705         return false;
1706 }
1707
1708 static bool kick_ring(struct intel_ring_buffer *ring)
1709 {
1710         struct drm_device *dev = ring->dev;
1711         struct drm_i915_private *dev_priv = dev->dev_private;
1712         u32 tmp = I915_READ_CTL(ring);
1713         if (tmp & RING_WAIT) {
1714                 DRM_ERROR("Kicking stuck wait on %s\n",
1715                           ring->name);
1716                 I915_WRITE_CTL(ring, tmp);
1717                 return true;
1718         }
1719         return false;
1720 }
1721
1722 static bool i915_hangcheck_hung(struct drm_device *dev)
1723 {
1724         drm_i915_private_t *dev_priv = dev->dev_private;
1725
1726         if (dev_priv->hangcheck_count++ > 1) {
1727                 bool hung = true;
1728
1729                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1730                 i915_handle_error(dev, true);
1731
1732                 if (!IS_GEN2(dev)) {
1733                         struct intel_ring_buffer *ring;
1734                         int i;
1735
1736                         /* Is the chip hanging on a WAIT_FOR_EVENT?
1737                          * If so we can simply poke the RB_WAIT bit
1738                          * and break the hang. This should work on
1739                          * all but the second generation chipsets.
1740                          */
1741                         for_each_ring(ring, dev_priv, i)
1742                                 hung &= !kick_ring(ring);
1743                 }
1744
1745                 return hung;
1746         }
1747
1748         return false;
1749 }
1750
1751 /**
1752  * This is called when the chip hasn't reported back with completed
1753  * batchbuffers in a long time. The first time this is called we simply record
1754  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1755  * again, we assume the chip is wedged and try to fix it.
1756  */
1757 void i915_hangcheck_elapsed(unsigned long data)
1758 {
1759         struct drm_device *dev = (struct drm_device *)data;
1760         drm_i915_private_t *dev_priv = dev->dev_private;
1761         uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1762         struct intel_ring_buffer *ring;
1763         bool err = false, idle;
1764         int i;
1765
1766         if (!i915_enable_hangcheck)
1767                 return;
1768
1769         memset(acthd, 0, sizeof(acthd));
1770         idle = true;
1771         for_each_ring(ring, dev_priv, i) {
1772             idle &= i915_hangcheck_ring_idle(ring, &err);
1773             acthd[i] = intel_ring_get_active_head(ring);
1774         }
1775
1776         /* If all work is done then ACTHD clearly hasn't advanced. */
1777         if (idle) {
1778                 if (err) {
1779                         if (i915_hangcheck_hung(dev))
1780                                 return;
1781
1782                         goto repeat;
1783                 }
1784
1785                 dev_priv->hangcheck_count = 0;
1786                 return;
1787         }
1788
1789         i915_get_extra_instdone(dev, instdone);
1790         if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1791             memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1792                 if (i915_hangcheck_hung(dev))
1793                         return;
1794         } else {
1795                 dev_priv->hangcheck_count = 0;
1796
1797                 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1798                 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1799         }
1800
1801 repeat:
1802         /* Reset timer case chip hangs without another request being added */
1803         mod_timer(&dev_priv->hangcheck_timer,
1804                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1805 }
1806
1807 /* drm_dma.h hooks
1808 */
1809 static void ironlake_irq_preinstall(struct drm_device *dev)
1810 {
1811         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1812
1813         atomic_set(&dev_priv->irq_received, 0);
1814
1815         I915_WRITE(HWSTAM, 0xeffe);
1816
1817         /* XXX hotplug from PCH */
1818
1819         I915_WRITE(DEIMR, 0xffffffff);
1820         I915_WRITE(DEIER, 0x0);
1821         POSTING_READ(DEIER);
1822
1823         /* and GT */
1824         I915_WRITE(GTIMR, 0xffffffff);
1825         I915_WRITE(GTIER, 0x0);
1826         POSTING_READ(GTIER);
1827
1828         /* south display irq */
1829         I915_WRITE(SDEIMR, 0xffffffff);
1830         I915_WRITE(SDEIER, 0x0);
1831         POSTING_READ(SDEIER);
1832 }
1833
1834 static void valleyview_irq_preinstall(struct drm_device *dev)
1835 {
1836         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1837         int pipe;
1838
1839         atomic_set(&dev_priv->irq_received, 0);
1840
1841         /* VLV magic */
1842         I915_WRITE(VLV_IMR, 0);
1843         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1844         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1845         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1846
1847         /* and GT */
1848         I915_WRITE(GTIIR, I915_READ(GTIIR));
1849         I915_WRITE(GTIIR, I915_READ(GTIIR));
1850         I915_WRITE(GTIMR, 0xffffffff);
1851         I915_WRITE(GTIER, 0x0);
1852         POSTING_READ(GTIER);
1853
1854         I915_WRITE(DPINVGTT, 0xff);
1855
1856         I915_WRITE(PORT_HOTPLUG_EN, 0);
1857         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1858         for_each_pipe(pipe)
1859                 I915_WRITE(PIPESTAT(pipe), 0xffff);
1860         I915_WRITE(VLV_IIR, 0xffffffff);
1861         I915_WRITE(VLV_IMR, 0xffffffff);
1862         I915_WRITE(VLV_IER, 0x0);
1863         POSTING_READ(VLV_IER);
1864 }
1865
1866 /*
1867  * Enable digital hotplug on the PCH, and configure the DP short pulse
1868  * duration to 2ms (which is the minimum in the Display Port spec)
1869  *
1870  * This register is the same on all known PCH chips.
1871  */
1872
1873 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1874 {
1875         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1876         u32     hotplug;
1877
1878         hotplug = I915_READ(PCH_PORT_HOTPLUG);
1879         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1880         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1881         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1882         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1883         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1884 }
1885
1886 static int ironlake_irq_postinstall(struct drm_device *dev)
1887 {
1888         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1889         /* enable kind of interrupts always enabled */
1890         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1891                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1892                            DE_AUX_CHANNEL_A;
1893         u32 render_irqs;
1894         u32 hotplug_mask;
1895
1896         dev_priv->irq_mask = ~display_mask;
1897
1898         /* should always can generate irq */
1899         I915_WRITE(DEIIR, I915_READ(DEIIR));
1900         I915_WRITE(DEIMR, dev_priv->irq_mask);
1901         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1902         POSTING_READ(DEIER);
1903
1904         dev_priv->gt_irq_mask = ~0;
1905
1906         I915_WRITE(GTIIR, I915_READ(GTIIR));
1907         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1908
1909         if (IS_GEN6(dev))
1910                 render_irqs =
1911                         GT_USER_INTERRUPT |
1912                         GEN6_BSD_USER_INTERRUPT |
1913                         GEN6_BLITTER_USER_INTERRUPT;
1914         else
1915                 render_irqs =
1916                         GT_USER_INTERRUPT |
1917                         GT_PIPE_NOTIFY |
1918                         GT_BSD_USER_INTERRUPT;
1919         I915_WRITE(GTIER, render_irqs);
1920         POSTING_READ(GTIER);
1921
1922         if (HAS_PCH_CPT(dev)) {
1923                 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1924                                 SDE_PORTB_HOTPLUG_CPT |
1925                                 SDE_PORTC_HOTPLUG_CPT |
1926                                 SDE_PORTD_HOTPLUG_CPT |
1927                                 SDE_GMBUS_CPT |
1928                                 SDE_AUX_MASK_CPT);
1929         } else {
1930                 hotplug_mask = (SDE_CRT_HOTPLUG |
1931                                 SDE_PORTB_HOTPLUG |
1932                                 SDE_PORTC_HOTPLUG |
1933                                 SDE_PORTD_HOTPLUG |
1934                                 SDE_GMBUS |
1935                                 SDE_AUX_MASK);
1936         }
1937
1938         dev_priv->pch_irq_mask = ~hotplug_mask;
1939
1940         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1941         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1942         I915_WRITE(SDEIER, hotplug_mask);
1943         POSTING_READ(SDEIER);
1944
1945         ironlake_enable_pch_hotplug(dev);
1946
1947         if (IS_IRONLAKE_M(dev)) {
1948                 /* Clear & enable PCU event interrupts */
1949                 I915_WRITE(DEIIR, DE_PCU_EVENT);
1950                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1951                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1952         }
1953
1954         return 0;
1955 }
1956
1957 static int ivybridge_irq_postinstall(struct drm_device *dev)
1958 {
1959         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1960         /* enable kind of interrupts always enabled */
1961         u32 display_mask =
1962                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1963                 DE_PLANEC_FLIP_DONE_IVB |
1964                 DE_PLANEB_FLIP_DONE_IVB |
1965                 DE_PLANEA_FLIP_DONE_IVB |
1966                 DE_AUX_CHANNEL_A_IVB;
1967         u32 render_irqs;
1968         u32 hotplug_mask;
1969
1970         dev_priv->irq_mask = ~display_mask;
1971
1972         /* should always can generate irq */
1973         I915_WRITE(DEIIR, I915_READ(DEIIR));
1974         I915_WRITE(DEIMR, dev_priv->irq_mask);
1975         I915_WRITE(DEIER,
1976                    display_mask |
1977                    DE_PIPEC_VBLANK_IVB |
1978                    DE_PIPEB_VBLANK_IVB |
1979                    DE_PIPEA_VBLANK_IVB);
1980         POSTING_READ(DEIER);
1981
1982         dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1983
1984         I915_WRITE(GTIIR, I915_READ(GTIIR));
1985         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1986
1987         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1988                 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1989         I915_WRITE(GTIER, render_irqs);
1990         POSTING_READ(GTIER);
1991
1992         hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1993                         SDE_PORTB_HOTPLUG_CPT |
1994                         SDE_PORTC_HOTPLUG_CPT |
1995                         SDE_PORTD_HOTPLUG_CPT |
1996                         SDE_GMBUS_CPT |
1997                         SDE_AUX_MASK_CPT);
1998         dev_priv->pch_irq_mask = ~hotplug_mask;
1999
2000         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2001         I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
2002         I915_WRITE(SDEIER, hotplug_mask);
2003         POSTING_READ(SDEIER);
2004
2005         ironlake_enable_pch_hotplug(dev);
2006
2007         return 0;
2008 }
2009
2010 static int valleyview_irq_postinstall(struct drm_device *dev)
2011 {
2012         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2013         u32 enable_mask;
2014         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2015         u32 render_irqs;
2016         u16 msid;
2017
2018         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2019         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2020                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2021                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2022                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2023
2024         /*
2025          *Leave vblank interrupts masked initially.  enable/disable will
2026          * toggle them based on usage.
2027          */
2028         dev_priv->irq_mask = (~enable_mask) |
2029                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2030                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2031
2032         dev_priv->pipestat[0] = 0;
2033         dev_priv->pipestat[1] = 0;
2034
2035         /* Hack for broken MSIs on VLV */
2036         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2037         pci_read_config_word(dev->pdev, 0x98, &msid);
2038         msid &= 0xff; /* mask out delivery bits */
2039         msid |= (1<<14);
2040         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2041
2042         I915_WRITE(PORT_HOTPLUG_EN, 0);
2043         POSTING_READ(PORT_HOTPLUG_EN);
2044
2045         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2046         I915_WRITE(VLV_IER, enable_mask);
2047         I915_WRITE(VLV_IIR, 0xffffffff);
2048         I915_WRITE(PIPESTAT(0), 0xffff);
2049         I915_WRITE(PIPESTAT(1), 0xffff);
2050         POSTING_READ(VLV_IER);
2051
2052         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2053         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2054         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2055
2056         I915_WRITE(VLV_IIR, 0xffffffff);
2057         I915_WRITE(VLV_IIR, 0xffffffff);
2058
2059         I915_WRITE(GTIIR, I915_READ(GTIIR));
2060         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2061
2062         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2063                 GEN6_BLITTER_USER_INTERRUPT;
2064         I915_WRITE(GTIER, render_irqs);
2065         POSTING_READ(GTIER);
2066
2067         /* ack & enable invalid PTE error interrupts */
2068 #if 0 /* FIXME: add support to irq handler for checking these bits */
2069         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2070         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2071 #endif
2072
2073         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2074
2075         return 0;
2076 }
2077
2078 static void valleyview_hpd_irq_setup(struct drm_device *dev)
2079 {
2080         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2081         u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2082
2083         /* Note HDMI and DP share bits */
2084         if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2085                 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2086         if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2087                 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2088         if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2089                 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2090         if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2091                 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2092         if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2093                 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2094         if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2095                 hotplug_en |= CRT_HOTPLUG_INT_EN;
2096                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2097         }
2098
2099         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2100 }
2101
2102 static void valleyview_irq_uninstall(struct drm_device *dev)
2103 {
2104         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2105         int pipe;
2106
2107         if (!dev_priv)
2108                 return;
2109
2110         for_each_pipe(pipe)
2111                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2112
2113         I915_WRITE(HWSTAM, 0xffffffff);
2114         I915_WRITE(PORT_HOTPLUG_EN, 0);
2115         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2116         for_each_pipe(pipe)
2117                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2118         I915_WRITE(VLV_IIR, 0xffffffff);
2119         I915_WRITE(VLV_IMR, 0xffffffff);
2120         I915_WRITE(VLV_IER, 0x0);
2121         POSTING_READ(VLV_IER);
2122 }
2123
2124 static void ironlake_irq_uninstall(struct drm_device *dev)
2125 {
2126         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2127
2128         if (!dev_priv)
2129                 return;
2130
2131         I915_WRITE(HWSTAM, 0xffffffff);
2132
2133         I915_WRITE(DEIMR, 0xffffffff);
2134         I915_WRITE(DEIER, 0x0);
2135         I915_WRITE(DEIIR, I915_READ(DEIIR));
2136
2137         I915_WRITE(GTIMR, 0xffffffff);
2138         I915_WRITE(GTIER, 0x0);
2139         I915_WRITE(GTIIR, I915_READ(GTIIR));
2140
2141         I915_WRITE(SDEIMR, 0xffffffff);
2142         I915_WRITE(SDEIER, 0x0);
2143         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2144 }
2145
2146 static void i8xx_irq_preinstall(struct drm_device * dev)
2147 {
2148         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2149         int pipe;
2150
2151         atomic_set(&dev_priv->irq_received, 0);
2152
2153         for_each_pipe(pipe)
2154                 I915_WRITE(PIPESTAT(pipe), 0);
2155         I915_WRITE16(IMR, 0xffff);
2156         I915_WRITE16(IER, 0x0);
2157         POSTING_READ16(IER);
2158 }
2159
2160 static int i8xx_irq_postinstall(struct drm_device *dev)
2161 {
2162         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2163
2164         dev_priv->pipestat[0] = 0;
2165         dev_priv->pipestat[1] = 0;
2166
2167         I915_WRITE16(EMR,
2168                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2169
2170         /* Unmask the interrupts that we always want on. */
2171         dev_priv->irq_mask =
2172                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2173                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2174                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2175                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2176                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2177         I915_WRITE16(IMR, dev_priv->irq_mask);
2178
2179         I915_WRITE16(IER,
2180                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2181                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2182                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2183                      I915_USER_INTERRUPT);
2184         POSTING_READ16(IER);
2185
2186         return 0;
2187 }
2188
2189 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2190 {
2191         struct drm_device *dev = (struct drm_device *) arg;
2192         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2193         u16 iir, new_iir;
2194         u32 pipe_stats[2];
2195         unsigned long irqflags;
2196         int irq_received;
2197         int pipe;
2198         u16 flip_mask =
2199                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2200                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2201
2202         atomic_inc(&dev_priv->irq_received);
2203
2204         iir = I915_READ16(IIR);
2205         if (iir == 0)
2206                 return IRQ_NONE;
2207
2208         while (iir & ~flip_mask) {
2209                 /* Can't rely on pipestat interrupt bit in iir as it might
2210                  * have been cleared after the pipestat interrupt was received.
2211                  * It doesn't set the bit in iir again, but it still produces
2212                  * interrupts (for non-MSI).
2213                  */
2214                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2215                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2216                         i915_handle_error(dev, false);
2217
2218                 for_each_pipe(pipe) {
2219                         int reg = PIPESTAT(pipe);
2220                         pipe_stats[pipe] = I915_READ(reg);
2221
2222                         /*
2223                          * Clear the PIPE*STAT regs before the IIR
2224                          */
2225                         if (pipe_stats[pipe] & 0x8000ffff) {
2226                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2227                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2228                                                          pipe_name(pipe));
2229                                 I915_WRITE(reg, pipe_stats[pipe]);
2230                                 irq_received = 1;
2231                         }
2232                 }
2233                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2234
2235                 I915_WRITE16(IIR, iir & ~flip_mask);
2236                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2237
2238                 i915_update_dri1_breadcrumb(dev);
2239
2240                 if (iir & I915_USER_INTERRUPT)
2241                         notify_ring(dev, &dev_priv->ring[RCS]);
2242
2243                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2244                     drm_handle_vblank(dev, 0)) {
2245                         if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2246                                 intel_prepare_page_flip(dev, 0);
2247                                 intel_finish_page_flip(dev, 0);
2248                                 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2249                         }
2250                 }
2251
2252                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2253                     drm_handle_vblank(dev, 1)) {
2254                         if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2255                                 intel_prepare_page_flip(dev, 1);
2256                                 intel_finish_page_flip(dev, 1);
2257                                 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2258                         }
2259                 }
2260
2261                 iir = new_iir;
2262         }
2263
2264         return IRQ_HANDLED;
2265 }
2266
2267 static void i8xx_irq_uninstall(struct drm_device * dev)
2268 {
2269         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2270         int pipe;
2271
2272         for_each_pipe(pipe) {
2273                 /* Clear enable bits; then clear status bits */
2274                 I915_WRITE(PIPESTAT(pipe), 0);
2275                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2276         }
2277         I915_WRITE16(IMR, 0xffff);
2278         I915_WRITE16(IER, 0x0);
2279         I915_WRITE16(IIR, I915_READ16(IIR));
2280 }
2281
2282 static void i915_irq_preinstall(struct drm_device * dev)
2283 {
2284         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2285         int pipe;
2286
2287         atomic_set(&dev_priv->irq_received, 0);
2288
2289         if (I915_HAS_HOTPLUG(dev)) {
2290                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2291                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2292         }
2293
2294         I915_WRITE16(HWSTAM, 0xeffe);
2295         for_each_pipe(pipe)
2296                 I915_WRITE(PIPESTAT(pipe), 0);
2297         I915_WRITE(IMR, 0xffffffff);
2298         I915_WRITE(IER, 0x0);
2299         POSTING_READ(IER);
2300 }
2301
2302 static int i915_irq_postinstall(struct drm_device *dev)
2303 {
2304         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2305         u32 enable_mask;
2306
2307         dev_priv->pipestat[0] = 0;
2308         dev_priv->pipestat[1] = 0;
2309
2310         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2311
2312         /* Unmask the interrupts that we always want on. */
2313         dev_priv->irq_mask =
2314                 ~(I915_ASLE_INTERRUPT |
2315                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2316                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2317                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2318                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2319                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2320
2321         enable_mask =
2322                 I915_ASLE_INTERRUPT |
2323                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2324                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2325                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2326                 I915_USER_INTERRUPT;
2327
2328         if (I915_HAS_HOTPLUG(dev)) {
2329                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2330                 POSTING_READ(PORT_HOTPLUG_EN);
2331
2332                 /* Enable in IER... */
2333                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2334                 /* and unmask in IMR */
2335                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2336         }
2337
2338         I915_WRITE(IMR, dev_priv->irq_mask);
2339         I915_WRITE(IER, enable_mask);
2340         POSTING_READ(IER);
2341
2342         intel_opregion_enable_asle(dev);
2343
2344         return 0;
2345 }
2346
2347 static void i915_hpd_irq_setup(struct drm_device *dev)
2348 {
2349         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2350         u32 hotplug_en;
2351
2352         if (I915_HAS_HOTPLUG(dev)) {
2353                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2354
2355                 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2356                         hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2357                 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2358                         hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2359                 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2360                         hotplug_en |= HDMID_HOTPLUG_INT_EN;
2361                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2362                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2363                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2364                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2365                 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2366                         hotplug_en |= CRT_HOTPLUG_INT_EN;
2367                         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2368                 }
2369
2370                 /* Ignore TV since it's buggy */
2371
2372                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2373         }
2374 }
2375
2376 static irqreturn_t i915_irq_handler(int irq, void *arg)
2377 {
2378         struct drm_device *dev = (struct drm_device *) arg;
2379         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2380         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2381         unsigned long irqflags;
2382         u32 flip_mask =
2383                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2384                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2385         u32 flip[2] = {
2386                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2387                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2388         };
2389         int pipe, ret = IRQ_NONE;
2390
2391         atomic_inc(&dev_priv->irq_received);
2392
2393         iir = I915_READ(IIR);
2394         do {
2395                 bool irq_received = (iir & ~flip_mask) != 0;
2396                 bool blc_event = false;
2397
2398                 /* Can't rely on pipestat interrupt bit in iir as it might
2399                  * have been cleared after the pipestat interrupt was received.
2400                  * It doesn't set the bit in iir again, but it still produces
2401                  * interrupts (for non-MSI).
2402                  */
2403                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2404                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2405                         i915_handle_error(dev, false);
2406
2407                 for_each_pipe(pipe) {
2408                         int reg = PIPESTAT(pipe);
2409                         pipe_stats[pipe] = I915_READ(reg);
2410
2411                         /* Clear the PIPE*STAT regs before the IIR */
2412                         if (pipe_stats[pipe] & 0x8000ffff) {
2413                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2414                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2415                                                          pipe_name(pipe));
2416                                 I915_WRITE(reg, pipe_stats[pipe]);
2417                                 irq_received = true;
2418                         }
2419                 }
2420                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2421
2422                 if (!irq_received)
2423                         break;
2424
2425                 /* Consume port.  Then clear IIR or we'll miss events */
2426                 if ((I915_HAS_HOTPLUG(dev)) &&
2427                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2428                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2429
2430                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2431                                   hotplug_status);
2432                         if (hotplug_status & dev_priv->hotplug_supported_mask)
2433                                 queue_work(dev_priv->wq,
2434                                            &dev_priv->hotplug_work);
2435
2436                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2437                         POSTING_READ(PORT_HOTPLUG_STAT);
2438                 }
2439
2440                 I915_WRITE(IIR, iir & ~flip_mask);
2441                 new_iir = I915_READ(IIR); /* Flush posted writes */
2442
2443                 if (iir & I915_USER_INTERRUPT)
2444                         notify_ring(dev, &dev_priv->ring[RCS]);
2445
2446                 for_each_pipe(pipe) {
2447                         int plane = pipe;
2448                         if (IS_MOBILE(dev))
2449                                 plane = !plane;
2450                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2451                             drm_handle_vblank(dev, pipe)) {
2452                                 if (iir & flip[plane]) {
2453                                         intel_prepare_page_flip(dev, plane);
2454                                         intel_finish_page_flip(dev, pipe);
2455                                         flip_mask &= ~flip[plane];
2456                                 }
2457                         }
2458
2459                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2460                                 blc_event = true;
2461                 }
2462
2463                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2464                         intel_opregion_asle_intr(dev);
2465
2466                 /* With MSI, interrupts are only generated when iir
2467                  * transitions from zero to nonzero.  If another bit got
2468                  * set while we were handling the existing iir bits, then
2469                  * we would never get another interrupt.
2470                  *
2471                  * This is fine on non-MSI as well, as if we hit this path
2472                  * we avoid exiting the interrupt handler only to generate
2473                  * another one.
2474                  *
2475                  * Note that for MSI this could cause a stray interrupt report
2476                  * if an interrupt landed in the time between writing IIR and
2477                  * the posting read.  This should be rare enough to never
2478                  * trigger the 99% of 100,000 interrupts test for disabling
2479                  * stray interrupts.
2480                  */
2481                 ret = IRQ_HANDLED;
2482                 iir = new_iir;
2483         } while (iir & ~flip_mask);
2484
2485         i915_update_dri1_breadcrumb(dev);
2486
2487         return ret;
2488 }
2489
2490 static void i915_irq_uninstall(struct drm_device * dev)
2491 {
2492         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2493         int pipe;
2494
2495         if (I915_HAS_HOTPLUG(dev)) {
2496                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2497                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2498         }
2499
2500         I915_WRITE16(HWSTAM, 0xffff);
2501         for_each_pipe(pipe) {
2502                 /* Clear enable bits; then clear status bits */
2503                 I915_WRITE(PIPESTAT(pipe), 0);
2504                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2505         }
2506         I915_WRITE(IMR, 0xffffffff);
2507         I915_WRITE(IER, 0x0);
2508
2509         I915_WRITE(IIR, I915_READ(IIR));
2510 }
2511
2512 static void i965_irq_preinstall(struct drm_device * dev)
2513 {
2514         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2515         int pipe;
2516
2517         atomic_set(&dev_priv->irq_received, 0);
2518
2519         I915_WRITE(PORT_HOTPLUG_EN, 0);
2520         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2521
2522         I915_WRITE(HWSTAM, 0xeffe);
2523         for_each_pipe(pipe)
2524                 I915_WRITE(PIPESTAT(pipe), 0);
2525         I915_WRITE(IMR, 0xffffffff);
2526         I915_WRITE(IER, 0x0);
2527         POSTING_READ(IER);
2528 }
2529
2530 static int i965_irq_postinstall(struct drm_device *dev)
2531 {
2532         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2533         u32 enable_mask;
2534         u32 error_mask;
2535
2536         /* Unmask the interrupts that we always want on. */
2537         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2538                                I915_DISPLAY_PORT_INTERRUPT |
2539                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2540                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2541                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2542                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2543                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2544
2545         enable_mask = ~dev_priv->irq_mask;
2546         enable_mask |= I915_USER_INTERRUPT;
2547
2548         if (IS_G4X(dev))
2549                 enable_mask |= I915_BSD_USER_INTERRUPT;
2550
2551         dev_priv->pipestat[0] = 0;
2552         dev_priv->pipestat[1] = 0;
2553         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2554
2555         /*
2556          * Enable some error detection, note the instruction error mask
2557          * bit is reserved, so we leave it masked.
2558          */
2559         if (IS_G4X(dev)) {
2560                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2561                                GM45_ERROR_MEM_PRIV |
2562                                GM45_ERROR_CP_PRIV |
2563                                I915_ERROR_MEMORY_REFRESH);
2564         } else {
2565                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2566                                I915_ERROR_MEMORY_REFRESH);
2567         }
2568         I915_WRITE(EMR, error_mask);
2569
2570         I915_WRITE(IMR, dev_priv->irq_mask);
2571         I915_WRITE(IER, enable_mask);
2572         POSTING_READ(IER);
2573
2574         I915_WRITE(PORT_HOTPLUG_EN, 0);
2575         POSTING_READ(PORT_HOTPLUG_EN);
2576
2577         intel_opregion_enable_asle(dev);
2578
2579         return 0;
2580 }
2581
2582 static void i965_hpd_irq_setup(struct drm_device *dev)
2583 {
2584         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2585         u32 hotplug_en;
2586
2587         /* Note HDMI and DP share hotplug bits */
2588         hotplug_en = 0;
2589         if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2590                 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2591         if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2592                 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2593         if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2594                 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2595         if (IS_G4X(dev)) {
2596                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2597                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2598                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2599                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2600         } else {
2601                 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2602                         hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2603                 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2604                         hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2605         }
2606         if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2607                 hotplug_en |= CRT_HOTPLUG_INT_EN;
2608
2609                 /* Programming the CRT detection parameters tends
2610                    to generate a spurious hotplug event about three
2611                    seconds later.  So just do it once.
2612                    */
2613                 if (IS_G4X(dev))
2614                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2615                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2616         }
2617
2618         /* Ignore TV since it's buggy */
2619
2620         I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2621 }
2622
2623 static irqreturn_t i965_irq_handler(int irq, void *arg)
2624 {
2625         struct drm_device *dev = (struct drm_device *) arg;
2626         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2627         u32 iir, new_iir;
2628         u32 pipe_stats[I915_MAX_PIPES];
2629         unsigned long irqflags;
2630         int irq_received;
2631         int ret = IRQ_NONE, pipe;
2632
2633         atomic_inc(&dev_priv->irq_received);
2634
2635         iir = I915_READ(IIR);
2636
2637         for (;;) {
2638                 bool blc_event = false;
2639
2640                 irq_received = iir != 0;
2641
2642                 /* Can't rely on pipestat interrupt bit in iir as it might
2643                  * have been cleared after the pipestat interrupt was received.
2644                  * It doesn't set the bit in iir again, but it still produces
2645                  * interrupts (for non-MSI).
2646                  */
2647                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2648                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2649                         i915_handle_error(dev, false);
2650
2651                 for_each_pipe(pipe) {
2652                         int reg = PIPESTAT(pipe);
2653                         pipe_stats[pipe] = I915_READ(reg);
2654
2655                         /*
2656                          * Clear the PIPE*STAT regs before the IIR
2657                          */
2658                         if (pipe_stats[pipe] & 0x8000ffff) {
2659                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2660                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2661                                                          pipe_name(pipe));
2662                                 I915_WRITE(reg, pipe_stats[pipe]);
2663                                 irq_received = 1;
2664                         }
2665                 }
2666                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2667
2668                 if (!irq_received)
2669                         break;
2670
2671                 ret = IRQ_HANDLED;
2672
2673                 /* Consume port.  Then clear IIR or we'll miss events */
2674                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2675                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2676
2677                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2678                                   hotplug_status);
2679                         if (hotplug_status & dev_priv->hotplug_supported_mask)
2680                                 queue_work(dev_priv->wq,
2681                                            &dev_priv->hotplug_work);
2682
2683                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2684                         I915_READ(PORT_HOTPLUG_STAT);
2685                 }
2686
2687                 I915_WRITE(IIR, iir);
2688                 new_iir = I915_READ(IIR); /* Flush posted writes */
2689
2690                 if (iir & I915_USER_INTERRUPT)
2691                         notify_ring(dev, &dev_priv->ring[RCS]);
2692                 if (iir & I915_BSD_USER_INTERRUPT)
2693                         notify_ring(dev, &dev_priv->ring[VCS]);
2694
2695                 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2696                         intel_prepare_page_flip(dev, 0);
2697
2698                 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2699                         intel_prepare_page_flip(dev, 1);
2700
2701                 for_each_pipe(pipe) {
2702                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2703                             drm_handle_vblank(dev, pipe)) {
2704                                 i915_pageflip_stall_check(dev, pipe);
2705                                 intel_finish_page_flip(dev, pipe);
2706                         }
2707
2708                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2709                                 blc_event = true;
2710                 }
2711
2712
2713                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2714                         intel_opregion_asle_intr(dev);
2715
2716                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2717                         gmbus_irq_handler(dev);
2718
2719                 /* With MSI, interrupts are only generated when iir
2720                  * transitions from zero to nonzero.  If another bit got
2721                  * set while we were handling the existing iir bits, then
2722                  * we would never get another interrupt.
2723                  *
2724                  * This is fine on non-MSI as well, as if we hit this path
2725                  * we avoid exiting the interrupt handler only to generate
2726                  * another one.
2727                  *
2728                  * Note that for MSI this could cause a stray interrupt report
2729                  * if an interrupt landed in the time between writing IIR and
2730                  * the posting read.  This should be rare enough to never
2731                  * trigger the 99% of 100,000 interrupts test for disabling
2732                  * stray interrupts.
2733                  */
2734                 iir = new_iir;
2735         }
2736
2737         i915_update_dri1_breadcrumb(dev);
2738
2739         return ret;
2740 }
2741
2742 static void i965_irq_uninstall(struct drm_device * dev)
2743 {
2744         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2745         int pipe;
2746
2747         if (!dev_priv)
2748                 return;
2749
2750         I915_WRITE(PORT_HOTPLUG_EN, 0);
2751         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2752
2753         I915_WRITE(HWSTAM, 0xffffffff);
2754         for_each_pipe(pipe)
2755                 I915_WRITE(PIPESTAT(pipe), 0);
2756         I915_WRITE(IMR, 0xffffffff);
2757         I915_WRITE(IER, 0x0);
2758
2759         for_each_pipe(pipe)
2760                 I915_WRITE(PIPESTAT(pipe),
2761                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2762         I915_WRITE(IIR, I915_READ(IIR));
2763 }
2764
2765 void intel_irq_init(struct drm_device *dev)
2766 {
2767         struct drm_i915_private *dev_priv = dev->dev_private;
2768
2769         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2770         INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2771         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2772         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2773
2774         setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2775                     (unsigned long) dev);
2776
2777         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2778
2779         dev->driver->get_vblank_counter = i915_get_vblank_counter;
2780         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2781         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2782                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2783                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2784         }
2785
2786         if (drm_core_check_feature(dev, DRIVER_MODESET))
2787                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2788         else
2789                 dev->driver->get_vblank_timestamp = NULL;
2790         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2791
2792         if (IS_VALLEYVIEW(dev)) {
2793                 dev->driver->irq_handler = valleyview_irq_handler;
2794                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2795                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2796                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2797                 dev->driver->enable_vblank = valleyview_enable_vblank;
2798                 dev->driver->disable_vblank = valleyview_disable_vblank;
2799                 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2800         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2801                 /* Share pre & uninstall handlers with ILK/SNB */
2802                 dev->driver->irq_handler = ivybridge_irq_handler;
2803                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2804                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2805                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2806                 dev->driver->enable_vblank = ivybridge_enable_vblank;
2807                 dev->driver->disable_vblank = ivybridge_disable_vblank;
2808         } else if (HAS_PCH_SPLIT(dev)) {
2809                 dev->driver->irq_handler = ironlake_irq_handler;
2810                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2811                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2812                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2813                 dev->driver->enable_vblank = ironlake_enable_vblank;
2814                 dev->driver->disable_vblank = ironlake_disable_vblank;
2815         } else {
2816                 if (INTEL_INFO(dev)->gen == 2) {
2817                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
2818                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
2819                         dev->driver->irq_handler = i8xx_irq_handler;
2820                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
2821                 } else if (INTEL_INFO(dev)->gen == 3) {
2822                         dev->driver->irq_preinstall = i915_irq_preinstall;
2823                         dev->driver->irq_postinstall = i915_irq_postinstall;
2824                         dev->driver->irq_uninstall = i915_irq_uninstall;
2825                         dev->driver->irq_handler = i915_irq_handler;
2826                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2827                 } else {
2828                         dev->driver->irq_preinstall = i965_irq_preinstall;
2829                         dev->driver->irq_postinstall = i965_irq_postinstall;
2830                         dev->driver->irq_uninstall = i965_irq_uninstall;
2831                         dev->driver->irq_handler = i965_irq_handler;
2832                         dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
2833                 }
2834                 dev->driver->enable_vblank = i915_enable_vblank;
2835                 dev->driver->disable_vblank = i915_disable_vblank;
2836         }
2837 }
2838
2839 void intel_hpd_init(struct drm_device *dev)
2840 {
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842
2843         if (dev_priv->display.hpd_irq_setup)
2844                 dev_priv->display.hpd_irq_setup(dev);
2845 }