trace_i915_gem_request_wait_begin(dev, seqno);
ring->waiting_seqno = seqno;
- ring->irq_get(ring);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
+ ret = -ENODEV;
+ if (ring->irq_get(ring)) {
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
+ ring->irq_put(ring);
+ }
ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->irq_get(ring);
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
+ if (ring->irq_get(ring)) {
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->irq_put(ring);
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
+ }
}
if (ret == 0)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
- if (dev_priv->trace_irq_seqno == 0)
- ring->irq_get(ring);
-
- dev_priv->trace_irq_seqno = seqno;
+ if (dev_priv->trace_irq_seqno == 0 &&
+ ring->irq_get(ring))
+ dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- ring->irq_get(ring);
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
+ ret = -ENODEV;
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ }
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
-static void
+static bool
render_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- if (dev->irq_enabled && ++ring->irq_refcount == 1) {
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
if (HAS_PCH_SPLIT(dev))
ironlake_enable_graphics_irq(dev_priv,
GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+
+ return true;
}
static void
{
struct drm_device *dev = ring->dev;
- BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
- if (dev->irq_enabled && --ring->irq_refcount == 0) {
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
return 0;
}
-static void
+static bool
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{
struct drm_device *dev = ring->dev;
- if (dev->irq_enabled && ++ring->irq_refcount == 1) {
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
ironlake_enable_graphics_irq(dev_priv, flag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+
+ return true;
}
static void
{
struct drm_device *dev = ring->dev;
- if (dev->irq_enabled && --ring->irq_refcount == 0) {
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
}
}
-
-static void
+static bool
bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+ return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
}
static void
bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
- ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
+ ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
}
static int
return 0;
}
-static void
+static bool
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+ return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
}
static void
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
- ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+ ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
}
/* ring buffer for Video Codec for Gen6+ */
/* Blitter support (SandyBridge+) */
-static void
+static bool
blt_ring_get_irq(struct intel_ring_buffer *ring)
{
- ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
+ return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
}
static void
blt_ring_put_irq(struct intel_ring_buffer *ring)
{
- ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
+ ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
}