]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
drm/i915: Make the ring IMR handling private
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 5 Jan 2011 10:32:24 +0000 (10:32 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 11 Jan 2011 20:43:58 +0000 (20:43 +0000)
As the IMR for the USER interrupts are not modified elsewhere, we can
separate the spinlock used for these from that of hpd and pipestats.
Those two IMR are manipulated under an IRQ and so need heavier locking.

Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 13cad981713befbe2abce68a4afdb92ea737ebf1..03e3370725179bd14785aef19bf83f5c13c285ee 100644 (file)
@@ -526,7 +526,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return false;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_enable_irq(dev_priv,
@@ -534,7 +534,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
        }
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 
        return true;
 }
@@ -545,7 +545,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_disable_irq(dev_priv,
@@ -554,7 +554,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
        }
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 }
 
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -620,10 +620,10 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
        if (!dev->irq_enabled)
               return false;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0)
                ironlake_enable_irq(dev_priv, flag);
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 
        return true;
 }
@@ -634,10 +634,10 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0)
                ironlake_disable_irq(dev_priv, flag);
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 }
 
 static bool
@@ -649,13 +649,13 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
        if (!dev->irq_enabled)
               return false;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
                ring->irq_mask &= ~rflag;
                I915_WRITE_IMR(ring, ring->irq_mask);
                ironlake_enable_irq(dev_priv, gflag);
        }
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 
        return true;
 }
@@ -666,13 +666,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock(&dev_priv->irq_lock);
+       spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
                ring->irq_mask |= rflag;
                I915_WRITE_IMR(ring, ring->irq_mask);
                ironlake_disable_irq(dev_priv, gflag);
        }
-       spin_unlock(&dev_priv->irq_lock);
+       spin_unlock(&ring->irq_lock);
 }
 
 static bool
@@ -814,6 +814,8 @@ int intel_init_ring_buffer(struct drm_device *dev,
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->gpu_write_list);
+
+       spin_lock_init(&ring->irq_lock);
        ring->irq_mask = ~0;
 
        if (I915_NEED_GFX_HWS(dev)) {
index 6b1d9a5a7d0712865c5c9f24625e13960d66f29a..be9087e4c9beeb4c561ebc474a2b38a6124ce8cd 100644 (file)
@@ -55,6 +55,7 @@ struct  intel_ring_buffer {
        int             effective_size;
        struct intel_hw_status_page status_page;
 
+       spinlock_t      irq_lock;
        u32             irq_refcount;
        u32             irq_mask;
        u32             irq_seqno;              /* last seq seem at irq time */