]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/i915/vlv: WA to fix Voltage not getting dropped to Vmin when Gfx is power gated.
authorDeepak S <deepak.s@intel.com>
Thu, 30 Jan 2014 17:38:16 +0000 (23:08 +0530)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 4 Feb 2014 10:59:19 +0000 (11:59 +0100)
When we enter RC6 and GFX Clocks are off, the voltage remains higher
than Vmin. When we try to set the freq to RPn, it might fail since the
Gfx clocks are down. So to fix this in Gfx idle, Bring the GFX clock up
and set the freq to RPn then move GFx down.

v2: remove vlv_update_rps_cur_delay function. Update commit message (Daniel)

v3: Fix the timeout during wait for gfx clock (Jesse)

v4: addressed comments on set freq and punit wait (Ville)

v5: use wait_for while waiting for GFX clk to be up. (Daniel)
    update cur_delay before requesting min_delay. (Ville)

v6: use wait_for while waiting for punit. (Ville)

Signed-off-by: Deepak S <deepak.s@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pm.c

index fa37dfdad4e3d770cb0d29707806ddca651fa779..e908c99106404f0d6201a3717a20c5d8f7090aa0 100644 (file)
@@ -1984,6 +1984,8 @@ extern void intel_console_resume(struct work_struct *work);
 void i915_queue_hangcheck(struct drm_device *dev);
 void i915_handle_error(struct drm_device *dev, bool wedged);
 
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+                                                       int new_delay);
 extern void intel_irq_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
 
index ec9eec4ce9528f9f57d2e067c94f1c6166c0a386..56edff3bbbb9177784da4249a4daa76133a89a98 100644 (file)
@@ -986,7 +986,7 @@ static void notify_ring(struct drm_device *dev,
        i915_queue_hangcheck(dev);
 }
 
-static void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
+void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
                             u32 pm_iir, int new_delay)
 {
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
index abd18cd58aa1b25e6e2ad2b4ea9abceca18db4da..9d0f4f7bbe6a78b24f97da65e61f401b4a7426f9 100644 (file)
                                                 GEN6_PM_RP_DOWN_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
 
+#define VLV_GTLC_SURVIVABILITY_REG              0x130098
+#define VLV_GFX_CLK_STATUS_BIT                 (1<<3)
+#define VLV_GFX_CLK_FORCE_ON_BIT               (1<<2)
+
 #define GEN6_GT_GFX_RC6_LOCKED                 0x138104
 #define VLV_COUNTER_CONTROL                    0x138104
 #define   VLV_COUNT_RANGE_HIGH                 (1<<15)
index df18bb6ffb6f74f637c250177dfd94251cd87935..9ab388313d3dc5d8547dfd1a16b9d2879f555e84 100644 (file)
@@ -3038,6 +3038,58 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(val * 50);
 }
 
+/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+ *
+ * * If Gfx is Idle, then
+ * 1. Mask Turbo interrupts
+ * 2. Bring up Gfx clock
+ * 3. Change the freq to Rpn and wait till P-Unit updates freq
+ * 4. Clear the Force GFX CLK ON bit so that Gfx can down
+ * 5. Unmask Turbo interrupts
+*/
+static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+{
+       /*
+        * When we are idle.  Drop to min voltage state.
+        */
+
+       if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+               return;
+
+       /* Mask turbo interrupt so that they will not come in between */
+       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+
+       /* Bring up the Gfx clock */
+       I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
+                               VLV_GFX_CLK_FORCE_ON_BIT);
+
+       if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
+                       DRM_ERROR("GFX_CLK_ON request timed out\n");
+               return;
+       }
+
+       dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+
+       vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
+                                       dev_priv->rps.min_delay);
+
+       if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
+                               & GENFREQSTATUS) == 0, 5))
+               DRM_ERROR("timed out waiting for Punit\n");
+
+       /* Release the Gfx clock */
+       I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
+               I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
+                               ~VLV_GFX_CLK_FORCE_ON_BIT);
+
+       /* Unmask Up interrupts */
+       dev_priv->rps.rp_up_masked = true;
+       gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
+                                               dev_priv->rps.min_delay);
+}
+
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
@@ -3045,7 +3097,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev))
-                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+                       vlv_set_rps_idle(dev_priv);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
                dev_priv->rps.last_adj = 0;
@@ -4276,6 +4328,7 @@ void intel_gpu_ips_teardown(void)
        i915_mch_dev = NULL;
        spin_unlock_irq(&mchdev_lock);
 }
+
 static void intel_init_emon(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;