1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define MAX_NOPID ((u32)~0)
37 * Interrupts that are always left unmasked.
39 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
40 * we leave them always unmasked in IMR and then control enabling them through
43 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
44 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
45 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
47 /** Interrupts that we mask and unmask at runtime. */
48 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
50 /** These are all of the interrupts used by the driver */
51 #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
52 I915_INTERRUPT_ENABLE_VAR)
55 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
57 if ((dev_priv->irq_mask_reg & mask) != 0) {
58 dev_priv->irq_mask_reg &= ~mask;
59 I915_WRITE(IMR, dev_priv->irq_mask_reg);
60 (void) I915_READ(IMR);
65 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
67 if ((dev_priv->irq_mask_reg & mask) != mask) {
68 dev_priv->irq_mask_reg |= mask;
69 I915_WRITE(IMR, dev_priv->irq_mask_reg);
70 (void) I915_READ(IMR);
75 i915_pipestat(int pipe)
85 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
87 if ((dev_priv->pipestat[pipe] & mask) != mask) {
88 u32 reg = i915_pipestat(pipe);
90 dev_priv->pipestat[pipe] |= mask;
91 /* Enable the interrupt, clear any pending status */
92 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
93 (void) I915_READ(reg);
98 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
100 if ((dev_priv->pipestat[pipe] & mask) != 0) {
101 u32 reg = i915_pipestat(pipe);
103 dev_priv->pipestat[pipe] &= ~mask;
104 I915_WRITE(reg, dev_priv->pipestat[pipe]);
105 (void) I915_READ(reg);
110 * i915_pipe_enabled - check if a pipe is enabled
112 * @pipe: pipe to check
114 * Reading certain registers when the pipe is disabled can hang the chip.
115 * Use this routine to make sure the PLL is running and the pipe is active
116 * before reading such registers if unsure.
119 i915_pipe_enabled(struct drm_device *dev, int pipe)
121 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
122 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
124 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
130 /* Called from drm generic code, passed a 'crtc', which
131 * we use as a pipe index
133 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
135 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
136 unsigned long high_frame;
137 unsigned long low_frame;
138 u32 high1, high2, low, count;
140 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
141 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
143 if (!i915_pipe_enabled(dev, pipe)) {
144 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
149 * High & low register fields aren't synchronized, so make sure
150 * we get a low value that's stable across two reads of the high
154 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
155 PIPE_FRAME_HIGH_SHIFT);
156 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
157 PIPE_FRAME_LOW_SHIFT);
158 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
159 PIPE_FRAME_HIGH_SHIFT);
160 } while (high1 != high2);
162 count = (high1 << 8) | low;
167 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
169 struct drm_device *dev = (struct drm_device *) arg;
170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
172 u32 pipea_stats, pipeb_stats;
176 unsigned long irqflags;
180 atomic_inc(&dev_priv->irq_received);
182 iir = I915_READ(IIR);
185 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
186 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
188 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
189 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
193 irq_received = iir != 0;
195 /* Can't rely on pipestat interrupt bit in iir as it might
196 * have been cleared after the pipestat interrupt was received.
197 * It doesn't set the bit in iir again, but it still produces
198 * interrupts (for non-MSI).
200 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
201 pipea_stats = I915_READ(PIPEASTAT);
202 pipeb_stats = I915_READ(PIPEBSTAT);
204 * Clear the PIPE(A|B)STAT regs before the IIR
206 if (pipea_stats & 0x8000ffff) {
207 I915_WRITE(PIPEASTAT, pipea_stats);
211 if (pipeb_stats & 0x8000ffff) {
212 I915_WRITE(PIPEBSTAT, pipeb_stats);
215 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
222 I915_WRITE(IIR, iir);
223 new_iir = I915_READ(IIR); /* Flush posted writes */
225 if (dev_priv->sarea_priv)
226 dev_priv->sarea_priv->last_dispatch =
227 READ_BREADCRUMB(dev_priv);
229 if (iir & I915_USER_INTERRUPT) {
230 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
231 DRM_WAKEUP(&dev_priv->irq_queue);
234 if (pipea_stats & vblank_status) {
236 drm_handle_vblank(dev, 0);
239 if (pipeb_stats & vblank_status) {
241 drm_handle_vblank(dev, 1);
244 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
245 (iir & I915_ASLE_INTERRUPT))
246 opregion_asle_intr(dev);
248 /* With MSI, interrupts are only generated when iir
249 * transitions from zero to nonzero. If another bit got
250 * set while we were handling the existing iir bits, then
251 * we would never get another interrupt.
253 * This is fine on non-MSI as well, as if we hit this path
254 * we avoid exiting the interrupt handler only to generate
257 * Note that for MSI this could cause a stray interrupt report
258 * if an interrupt landed in the time between writing IIR and
259 * the posting read. This should be rare enough to never
260 * trigger the 99% of 100,000 interrupts test for disabling
269 static int i915_emit_irq(struct drm_device * dev)
271 drm_i915_private_t *dev_priv = dev->dev_private;
274 i915_kernel_lost_context(dev);
279 if (dev_priv->counter > 0x7FFFFFFFUL)
280 dev_priv->counter = 1;
281 if (dev_priv->sarea_priv)
282 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
285 OUT_RING(MI_STORE_DWORD_INDEX);
286 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
287 OUT_RING(dev_priv->counter);
288 OUT_RING(MI_USER_INTERRUPT);
291 return dev_priv->counter;
294 void i915_user_irq_get(struct drm_device *dev)
296 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
297 unsigned long irqflags;
299 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
300 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
301 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
302 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
305 void i915_user_irq_put(struct drm_device *dev)
307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
308 unsigned long irqflags;
310 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
311 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
312 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
313 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
314 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
317 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
322 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
323 READ_BREADCRUMB(dev_priv));
325 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
326 if (dev_priv->sarea_priv) {
327 dev_priv->sarea_priv->last_dispatch =
328 READ_BREADCRUMB(dev_priv);
333 if (dev_priv->sarea_priv)
334 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
336 i915_user_irq_get(dev);
337 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
338 READ_BREADCRUMB(dev_priv) >= irq_nr);
339 i915_user_irq_put(dev);
342 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
343 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
346 if (dev_priv->sarea_priv)
347 dev_priv->sarea_priv->last_dispatch =
348 READ_BREADCRUMB(dev_priv);
353 /* Needs the lock as it touches the ring.
355 int i915_irq_emit(struct drm_device *dev, void *data,
356 struct drm_file *file_priv)
358 drm_i915_private_t *dev_priv = dev->dev_private;
359 drm_i915_irq_emit_t *emit = data;
362 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
365 DRM_ERROR("called with no initialization\n");
368 mutex_lock(&dev->struct_mutex);
369 result = i915_emit_irq(dev);
370 mutex_unlock(&dev->struct_mutex);
372 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
373 DRM_ERROR("copy_to_user\n");
380 /* Doesn't need the hardware lock.
382 int i915_irq_wait(struct drm_device *dev, void *data,
383 struct drm_file *file_priv)
385 drm_i915_private_t *dev_priv = dev->dev_private;
386 drm_i915_irq_wait_t *irqwait = data;
389 DRM_ERROR("called with no initialization\n");
393 return i915_wait_irq(dev, irqwait->irq_seq);
396 /* Called from drm generic code, passed 'crtc' which
397 * we use as a pipe index
399 int i915_enable_vblank(struct drm_device *dev, int pipe)
401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
402 unsigned long irqflags;
404 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
406 i915_enable_pipestat(dev_priv, pipe,
407 PIPE_START_VBLANK_INTERRUPT_ENABLE);
409 i915_enable_pipestat(dev_priv, pipe,
410 PIPE_VBLANK_INTERRUPT_ENABLE);
411 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
415 /* Called from drm generic code, passed 'crtc' which
416 * we use as a pipe index
418 void i915_disable_vblank(struct drm_device *dev, int pipe)
420 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
421 unsigned long irqflags;
423 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
424 i915_disable_pipestat(dev_priv, pipe,
425 PIPE_VBLANK_INTERRUPT_ENABLE |
426 PIPE_START_VBLANK_INTERRUPT_ENABLE);
427 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
430 /* Set the vblank monitor pipe
432 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
433 struct drm_file *file_priv)
435 drm_i915_private_t *dev_priv = dev->dev_private;
438 DRM_ERROR("called with no initialization\n");
445 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
446 struct drm_file *file_priv)
448 drm_i915_private_t *dev_priv = dev->dev_private;
449 drm_i915_vblank_pipe_t *pipe = data;
452 DRM_ERROR("called with no initialization\n");
456 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
462 * Schedule buffer swap at given vertical blank.
464 int i915_vblank_swap(struct drm_device *dev, void *data,
465 struct drm_file *file_priv)
467 /* The delayed swap mechanism was fundamentally racy, and has been
468 * removed. The model was that the client requested a delayed flip/swap
469 * from the kernel, then waited for vblank before continuing to perform
470 * rendering. The problem was that the kernel might wake the client
471 * up before it dispatched the vblank swap (since the lock has to be
472 * held while touching the ringbuffer), in which case the client would
473 * clear and start the next frame before the swap occurred, and
474 * flicker would occur in addition to likely missing the vblank.
476 * In the absence of this ioctl, userland falls back to a correct path
477 * of waiting for a vblank, then dispatching the swap on its own.
478 * Context switching to userland and back is plenty fast enough for
479 * meeting the requirements of vblank swapping.
486 void i915_driver_irq_preinstall(struct drm_device * dev)
488 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
490 I915_WRITE(HWSTAM, 0xeffe);
491 I915_WRITE(PIPEASTAT, 0);
492 I915_WRITE(PIPEBSTAT, 0);
493 I915_WRITE(IMR, 0xffffffff);
494 I915_WRITE(IER, 0x0);
495 (void) I915_READ(IER);
498 int i915_driver_irq_postinstall(struct drm_device *dev)
500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
502 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
504 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
506 /* Unmask the interrupts that we always want on. */
507 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
509 dev_priv->pipestat[0] = 0;
510 dev_priv->pipestat[1] = 0;
512 /* Disable pipe interrupt enables, clear pending pipe status */
513 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
514 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
515 /* Clear pending interrupt status */
516 I915_WRITE(IIR, I915_READ(IIR));
518 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
519 I915_WRITE(IMR, dev_priv->irq_mask_reg);
520 (void) I915_READ(IER);
522 opregion_enable_asle(dev);
523 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
528 void i915_driver_irq_uninstall(struct drm_device * dev)
530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
535 dev_priv->vblank_pipe = 0;
537 I915_WRITE(HWSTAM, 0xffffffff);
538 I915_WRITE(PIPEASTAT, 0);
539 I915_WRITE(PIPEBSTAT, 0);
540 I915_WRITE(IMR, 0xffffffff);
541 I915_WRITE(IER, 0x0);
543 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
544 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
545 I915_WRITE(IIR, I915_READ(IIR));