]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_pm.c
bc5aae088550e8b86b8efc53ba9bcbeeb578953c
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <drm/i915_powerwell.h>
34
35 #define FORCEWAKE_ACK_TIMEOUT_MS 2
36
37 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
38  * framebuffer contents in-memory, aiming at reducing the required bandwidth
39  * during in-memory transfers and, therefore, reduce the power packet.
40  *
41  * The benefits of FBC are mostly visible with solid backgrounds and
42  * variation-less patterns.
43  *
44  * FBC-related functionality can be enabled by the means of the
45  * i915.i915_enable_fbc parameter
46  */
47
48 static bool intel_crtc_active(struct drm_crtc *crtc)
49 {
50         /* Be paranoid as we can arrive here with only partial
51          * state retrieved from the hardware during setup.
52          */
53         return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
54 }
55
56 static void i8xx_disable_fbc(struct drm_device *dev)
57 {
58         struct drm_i915_private *dev_priv = dev->dev_private;
59         u32 fbc_ctl;
60
61         /* Disable compression */
62         fbc_ctl = I915_READ(FBC_CONTROL);
63         if ((fbc_ctl & FBC_CTL_EN) == 0)
64                 return;
65
66         fbc_ctl &= ~FBC_CTL_EN;
67         I915_WRITE(FBC_CONTROL, fbc_ctl);
68
69         /* Wait for compressing bit to clear */
70         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
71                 DRM_DEBUG_KMS("FBC idle timed out\n");
72                 return;
73         }
74
75         DRM_DEBUG_KMS("disabled FBC\n");
76 }
77
78 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
79 {
80         struct drm_device *dev = crtc->dev;
81         struct drm_i915_private *dev_priv = dev->dev_private;
82         struct drm_framebuffer *fb = crtc->fb;
83         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
84         struct drm_i915_gem_object *obj = intel_fb->obj;
85         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
86         int cfb_pitch;
87         int plane, i;
88         u32 fbc_ctl, fbc_ctl2;
89
90         cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
91         if (fb->pitches[0] < cfb_pitch)
92                 cfb_pitch = fb->pitches[0];
93
94         /* FBC_CTL wants 64B units */
95         cfb_pitch = (cfb_pitch / 64) - 1;
96         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
97
98         /* Clear old tags */
99         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
100                 I915_WRITE(FBC_TAG + (i * 4), 0);
101
102         /* Set it up... */
103         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
104         fbc_ctl2 |= plane;
105         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
106         I915_WRITE(FBC_FENCE_OFF, crtc->y);
107
108         /* enable it... */
109         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
110         if (IS_I945GM(dev))
111                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
112         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
113         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
114         fbc_ctl |= obj->fence_reg;
115         I915_WRITE(FBC_CONTROL, fbc_ctl);
116
117         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
118                       cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
119 }
120
121 static bool i8xx_fbc_enabled(struct drm_device *dev)
122 {
123         struct drm_i915_private *dev_priv = dev->dev_private;
124
125         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
126 }
127
128 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
129 {
130         struct drm_device *dev = crtc->dev;
131         struct drm_i915_private *dev_priv = dev->dev_private;
132         struct drm_framebuffer *fb = crtc->fb;
133         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
134         struct drm_i915_gem_object *obj = intel_fb->obj;
135         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
136         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
137         unsigned long stall_watermark = 200;
138         u32 dpfc_ctl;
139
140         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
141         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
142         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
143
144         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
145                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
146                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
147         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
148
149         /* enable it... */
150         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
151
152         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
153 }
154
155 static void g4x_disable_fbc(struct drm_device *dev)
156 {
157         struct drm_i915_private *dev_priv = dev->dev_private;
158         u32 dpfc_ctl;
159
160         /* Disable compression */
161         dpfc_ctl = I915_READ(DPFC_CONTROL);
162         if (dpfc_ctl & DPFC_CTL_EN) {
163                 dpfc_ctl &= ~DPFC_CTL_EN;
164                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
165
166                 DRM_DEBUG_KMS("disabled FBC\n");
167         }
168 }
169
170 static bool g4x_fbc_enabled(struct drm_device *dev)
171 {
172         struct drm_i915_private *dev_priv = dev->dev_private;
173
174         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
175 }
176
177 static void sandybridge_blit_fbc_update(struct drm_device *dev)
178 {
179         struct drm_i915_private *dev_priv = dev->dev_private;
180         u32 blt_ecoskpd;
181
182         /* Make sure blitter notifies FBC of writes */
183         gen6_gt_force_wake_get(dev_priv);
184         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
185         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
186                 GEN6_BLITTER_LOCK_SHIFT;
187         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
188         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
189         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
190         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
191                          GEN6_BLITTER_LOCK_SHIFT);
192         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
193         POSTING_READ(GEN6_BLITTER_ECOSKPD);
194         gen6_gt_force_wake_put(dev_priv);
195 }
196
197 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
198 {
199         struct drm_device *dev = crtc->dev;
200         struct drm_i915_private *dev_priv = dev->dev_private;
201         struct drm_framebuffer *fb = crtc->fb;
202         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
203         struct drm_i915_gem_object *obj = intel_fb->obj;
204         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
205         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
206         unsigned long stall_watermark = 200;
207         u32 dpfc_ctl;
208
209         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
210         dpfc_ctl &= DPFC_RESERVED;
211         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
212         /* Set persistent mode for front-buffer rendering, ala X. */
213         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
214         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
215         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
216
217         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
218                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
219                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
220         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
221         I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
222         /* enable it... */
223         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
224
225         if (IS_GEN6(dev)) {
226                 I915_WRITE(SNB_DPFC_CTL_SA,
227                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
228                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
229                 sandybridge_blit_fbc_update(dev);
230         }
231
232         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
233 }
234
235 static void ironlake_disable_fbc(struct drm_device *dev)
236 {
237         struct drm_i915_private *dev_priv = dev->dev_private;
238         u32 dpfc_ctl;
239
240         /* Disable compression */
241         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
242         if (dpfc_ctl & DPFC_CTL_EN) {
243                 dpfc_ctl &= ~DPFC_CTL_EN;
244                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
245
246                 if (IS_IVYBRIDGE(dev))
247                         /* WaFbcDisableDpfcClockGating:ivb */
248                         I915_WRITE(ILK_DSPCLK_GATE_D,
249                                    I915_READ(ILK_DSPCLK_GATE_D) &
250                                    ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
251
252                 if (IS_HASWELL(dev))
253                         /* WaFbcDisableDpfcClockGating:hsw */
254                         I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
255                                    I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
256                                    ~HSW_DPFC_GATING_DISABLE);
257
258                 DRM_DEBUG_KMS("disabled FBC\n");
259         }
260 }
261
262 static bool ironlake_fbc_enabled(struct drm_device *dev)
263 {
264         struct drm_i915_private *dev_priv = dev->dev_private;
265
266         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
267 }
268
269 static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
270 {
271         struct drm_device *dev = crtc->dev;
272         struct drm_i915_private *dev_priv = dev->dev_private;
273         struct drm_framebuffer *fb = crtc->fb;
274         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
275         struct drm_i915_gem_object *obj = intel_fb->obj;
276         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
277
278         I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
279
280         I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
281                    IVB_DPFC_CTL_FENCE_EN |
282                    intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
283
284         if (IS_IVYBRIDGE(dev)) {
285                 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
286                 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
287                 /* WaFbcDisableDpfcClockGating:ivb */
288                 I915_WRITE(ILK_DSPCLK_GATE_D,
289                            I915_READ(ILK_DSPCLK_GATE_D) |
290                            ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
291         } else {
292                 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
293                 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
294                            HSW_BYPASS_FBC_QUEUE);
295                 /* WaFbcDisableDpfcClockGating:hsw */
296                 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
297                            I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
298                            HSW_DPFC_GATING_DISABLE);
299         }
300
301         I915_WRITE(SNB_DPFC_CTL_SA,
302                    SNB_CPU_FENCE_ENABLE | obj->fence_reg);
303         I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
304
305         sandybridge_blit_fbc_update(dev);
306
307         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
308 }
309
310 bool intel_fbc_enabled(struct drm_device *dev)
311 {
312         struct drm_i915_private *dev_priv = dev->dev_private;
313
314         if (!dev_priv->display.fbc_enabled)
315                 return false;
316
317         return dev_priv->display.fbc_enabled(dev);
318 }
319
320 static void intel_fbc_work_fn(struct work_struct *__work)
321 {
322         struct intel_fbc_work *work =
323                 container_of(to_delayed_work(__work),
324                              struct intel_fbc_work, work);
325         struct drm_device *dev = work->crtc->dev;
326         struct drm_i915_private *dev_priv = dev->dev_private;
327
328         mutex_lock(&dev->struct_mutex);
329         if (work == dev_priv->fbc.fbc_work) {
330                 /* Double check that we haven't switched fb without cancelling
331                  * the prior work.
332                  */
333                 if (work->crtc->fb == work->fb) {
334                         dev_priv->display.enable_fbc(work->crtc,
335                                                      work->interval);
336
337                         dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
338                         dev_priv->fbc.fb_id = work->crtc->fb->base.id;
339                         dev_priv->fbc.y = work->crtc->y;
340                 }
341
342                 dev_priv->fbc.fbc_work = NULL;
343         }
344         mutex_unlock(&dev->struct_mutex);
345
346         kfree(work);
347 }
348
349 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
350 {
351         if (dev_priv->fbc.fbc_work == NULL)
352                 return;
353
354         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
355
356         /* Synchronisation is provided by struct_mutex and checking of
357          * dev_priv->fbc.fbc_work, so we can perform the cancellation
358          * entirely asynchronously.
359          */
360         if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
361                 /* tasklet was killed before being run, clean up */
362                 kfree(dev_priv->fbc.fbc_work);
363
364         /* Mark the work as no longer wanted so that if it does
365          * wake-up (because the work was already running and waiting
366          * for our mutex), it will discover that is no longer
367          * necessary to run.
368          */
369         dev_priv->fbc.fbc_work = NULL;
370 }
371
372 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
373 {
374         struct intel_fbc_work *work;
375         struct drm_device *dev = crtc->dev;
376         struct drm_i915_private *dev_priv = dev->dev_private;
377
378         if (!dev_priv->display.enable_fbc)
379                 return;
380
381         intel_cancel_fbc_work(dev_priv);
382
383         work = kzalloc(sizeof *work, GFP_KERNEL);
384         if (work == NULL) {
385                 DRM_ERROR("Failed to allocate FBC work structure\n");
386                 dev_priv->display.enable_fbc(crtc, interval);
387                 return;
388         }
389
390         work->crtc = crtc;
391         work->fb = crtc->fb;
392         work->interval = interval;
393         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
394
395         dev_priv->fbc.fbc_work = work;
396
397         /* Delay the actual enabling to let pageflipping cease and the
398          * display to settle before starting the compression. Note that
399          * this delay also serves a second purpose: it allows for a
400          * vblank to pass after disabling the FBC before we attempt
401          * to modify the control registers.
402          *
403          * A more complicated solution would involve tracking vblanks
404          * following the termination of the page-flipping sequence
405          * and indeed performing the enable as a co-routine and not
406          * waiting synchronously upon the vblank.
407          */
408         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
409 }
410
411 void intel_disable_fbc(struct drm_device *dev)
412 {
413         struct drm_i915_private *dev_priv = dev->dev_private;
414
415         intel_cancel_fbc_work(dev_priv);
416
417         if (!dev_priv->display.disable_fbc)
418                 return;
419
420         dev_priv->display.disable_fbc(dev);
421         dev_priv->fbc.plane = -1;
422 }
423
424 /**
425  * intel_update_fbc - enable/disable FBC as needed
426  * @dev: the drm_device
427  *
428  * Set up the framebuffer compression hardware at mode set time.  We
429  * enable it if possible:
430  *   - plane A only (on pre-965)
431  *   - no pixel mulitply/line duplication
432  *   - no alpha buffer discard
433  *   - no dual wide
434  *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
435  *
436  * We can't assume that any compression will take place (worst case),
437  * so the compressed buffer has to be the same size as the uncompressed
438  * one.  It also must reside (along with the line length buffer) in
439  * stolen memory.
440  *
441  * We need to enable/disable FBC on a global basis.
442  */
443 void intel_update_fbc(struct drm_device *dev)
444 {
445         struct drm_i915_private *dev_priv = dev->dev_private;
446         struct drm_crtc *crtc = NULL, *tmp_crtc;
447         struct intel_crtc *intel_crtc;
448         struct drm_framebuffer *fb;
449         struct intel_framebuffer *intel_fb;
450         struct drm_i915_gem_object *obj;
451         unsigned int max_hdisplay, max_vdisplay;
452
453         if (!i915_powersave)
454                 return;
455
456         if (!I915_HAS_FBC(dev))
457                 return;
458
459         /*
460          * If FBC is already on, we just have to verify that we can
461          * keep it that way...
462          * Need to disable if:
463          *   - more than one pipe is active
464          *   - changing FBC params (stride, fence, mode)
465          *   - new fb is too large to fit in compressed buffer
466          *   - going to an unsupported config (interlace, pixel multiply, etc.)
467          */
468         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
469                 if (intel_crtc_active(tmp_crtc) &&
470                     !to_intel_crtc(tmp_crtc)->primary_disabled) {
471                         if (crtc) {
472                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
473                                 dev_priv->fbc.no_fbc_reason =
474                                         FBC_MULTIPLE_PIPES;
475                                 goto out_disable;
476                         }
477                         crtc = tmp_crtc;
478                 }
479         }
480
481         if (!crtc || crtc->fb == NULL) {
482                 DRM_DEBUG_KMS("no output, disabling\n");
483                 dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
484                 goto out_disable;
485         }
486
487         intel_crtc = to_intel_crtc(crtc);
488         fb = crtc->fb;
489         intel_fb = to_intel_framebuffer(fb);
490         obj = intel_fb->obj;
491
492         if (i915_enable_fbc < 0 &&
493             INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
494                 DRM_DEBUG_KMS("disabled per chip default\n");
495                 dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
496                 goto out_disable;
497         }
498         if (!i915_enable_fbc) {
499                 DRM_DEBUG_KMS("fbc disabled per module param\n");
500                 dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
501                 goto out_disable;
502         }
503         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
504             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
505                 DRM_DEBUG_KMS("mode incompatible with compression, "
506                               "disabling\n");
507                 dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
508                 goto out_disable;
509         }
510
511         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
512                 max_hdisplay = 4096;
513                 max_vdisplay = 2048;
514         } else {
515                 max_hdisplay = 2048;
516                 max_vdisplay = 1536;
517         }
518         if ((crtc->mode.hdisplay > max_hdisplay) ||
519             (crtc->mode.vdisplay > max_vdisplay)) {
520                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
521                 dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
522                 goto out_disable;
523         }
524         if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
525             intel_crtc->plane != 0) {
526                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
527                 dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
528                 goto out_disable;
529         }
530
531         /* The use of a CPU fence is mandatory in order to detect writes
532          * by the CPU to the scanout and trigger updates to the FBC.
533          */
534         if (obj->tiling_mode != I915_TILING_X ||
535             obj->fence_reg == I915_FENCE_REG_NONE) {
536                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
537                 dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
538                 goto out_disable;
539         }
540
541         /* If the kernel debugger is active, always disable compression */
542         if (in_dbg_master())
543                 goto out_disable;
544
545         if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
546                 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
547                 dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
548                 goto out_disable;
549         }
550
551         /* If the scanout has not changed, don't modify the FBC settings.
552          * Note that we make the fundamental assumption that the fb->obj
553          * cannot be unpinned (and have its GTT offset and fence revoked)
554          * without first being decoupled from the scanout and FBC disabled.
555          */
556         if (dev_priv->fbc.plane == intel_crtc->plane &&
557             dev_priv->fbc.fb_id == fb->base.id &&
558             dev_priv->fbc.y == crtc->y)
559                 return;
560
561         if (intel_fbc_enabled(dev)) {
562                 /* We update FBC along two paths, after changing fb/crtc
563                  * configuration (modeswitching) and after page-flipping
564                  * finishes. For the latter, we know that not only did
565                  * we disable the FBC at the start of the page-flip
566                  * sequence, but also more than one vblank has passed.
567                  *
568                  * For the former case of modeswitching, it is possible
569                  * to switch between two FBC valid configurations
570                  * instantaneously so we do need to disable the FBC
571                  * before we can modify its control registers. We also
572                  * have to wait for the next vblank for that to take
573                  * effect. However, since we delay enabling FBC we can
574                  * assume that a vblank has passed since disabling and
575                  * that we can safely alter the registers in the deferred
576                  * callback.
577                  *
578                  * In the scenario that we go from a valid to invalid
579                  * and then back to valid FBC configuration we have
580                  * no strict enforcement that a vblank occurred since
581                  * disabling the FBC. However, along all current pipe
582                  * disabling paths we do need to wait for a vblank at
583                  * some point. And we wait before enabling FBC anyway.
584                  */
585                 DRM_DEBUG_KMS("disabling active FBC for update\n");
586                 intel_disable_fbc(dev);
587         }
588
589         intel_enable_fbc(crtc, 500);
590         return;
591
592 out_disable:
593         /* Multiple disables should be harmless */
594         if (intel_fbc_enabled(dev)) {
595                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
596                 intel_disable_fbc(dev);
597         }
598         i915_gem_stolen_cleanup_compression(dev);
599 }
600
601 static void i915_pineview_get_mem_freq(struct drm_device *dev)
602 {
603         drm_i915_private_t *dev_priv = dev->dev_private;
604         u32 tmp;
605
606         tmp = I915_READ(CLKCFG);
607
608         switch (tmp & CLKCFG_FSB_MASK) {
609         case CLKCFG_FSB_533:
610                 dev_priv->fsb_freq = 533; /* 133*4 */
611                 break;
612         case CLKCFG_FSB_800:
613                 dev_priv->fsb_freq = 800; /* 200*4 */
614                 break;
615         case CLKCFG_FSB_667:
616                 dev_priv->fsb_freq =  667; /* 167*4 */
617                 break;
618         case CLKCFG_FSB_400:
619                 dev_priv->fsb_freq = 400; /* 100*4 */
620                 break;
621         }
622
623         switch (tmp & CLKCFG_MEM_MASK) {
624         case CLKCFG_MEM_533:
625                 dev_priv->mem_freq = 533;
626                 break;
627         case CLKCFG_MEM_667:
628                 dev_priv->mem_freq = 667;
629                 break;
630         case CLKCFG_MEM_800:
631                 dev_priv->mem_freq = 800;
632                 break;
633         }
634
635         /* detect pineview DDR3 setting */
636         tmp = I915_READ(CSHRDDR3CTL);
637         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
638 }
639
640 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
641 {
642         drm_i915_private_t *dev_priv = dev->dev_private;
643         u16 ddrpll, csipll;
644
645         ddrpll = I915_READ16(DDRMPLL1);
646         csipll = I915_READ16(CSIPLL0);
647
648         switch (ddrpll & 0xff) {
649         case 0xc:
650                 dev_priv->mem_freq = 800;
651                 break;
652         case 0x10:
653                 dev_priv->mem_freq = 1066;
654                 break;
655         case 0x14:
656                 dev_priv->mem_freq = 1333;
657                 break;
658         case 0x18:
659                 dev_priv->mem_freq = 1600;
660                 break;
661         default:
662                 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
663                                  ddrpll & 0xff);
664                 dev_priv->mem_freq = 0;
665                 break;
666         }
667
668         dev_priv->ips.r_t = dev_priv->mem_freq;
669
670         switch (csipll & 0x3ff) {
671         case 0x00c:
672                 dev_priv->fsb_freq = 3200;
673                 break;
674         case 0x00e:
675                 dev_priv->fsb_freq = 3733;
676                 break;
677         case 0x010:
678                 dev_priv->fsb_freq = 4266;
679                 break;
680         case 0x012:
681                 dev_priv->fsb_freq = 4800;
682                 break;
683         case 0x014:
684                 dev_priv->fsb_freq = 5333;
685                 break;
686         case 0x016:
687                 dev_priv->fsb_freq = 5866;
688                 break;
689         case 0x018:
690                 dev_priv->fsb_freq = 6400;
691                 break;
692         default:
693                 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
694                                  csipll & 0x3ff);
695                 dev_priv->fsb_freq = 0;
696                 break;
697         }
698
699         if (dev_priv->fsb_freq == 3200) {
700                 dev_priv->ips.c_m = 0;
701         } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
702                 dev_priv->ips.c_m = 1;
703         } else {
704                 dev_priv->ips.c_m = 2;
705         }
706 }
707
708 static const struct cxsr_latency cxsr_latency_table[] = {
709         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
710         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
711         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
712         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
713         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
714
715         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
716         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
717         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
718         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
719         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
720
721         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
722         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
723         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
724         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
725         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
726
727         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
728         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
729         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
730         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
731         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
732
733         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
734         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
735         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
736         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
737         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
738
739         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
740         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
741         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
742         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
743         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
744 };
745
746 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
747                                                          int is_ddr3,
748                                                          int fsb,
749                                                          int mem)
750 {
751         const struct cxsr_latency *latency;
752         int i;
753
754         if (fsb == 0 || mem == 0)
755                 return NULL;
756
757         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
758                 latency = &cxsr_latency_table[i];
759                 if (is_desktop == latency->is_desktop &&
760                     is_ddr3 == latency->is_ddr3 &&
761                     fsb == latency->fsb_freq && mem == latency->mem_freq)
762                         return latency;
763         }
764
765         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
766
767         return NULL;
768 }
769
770 static void pineview_disable_cxsr(struct drm_device *dev)
771 {
772         struct drm_i915_private *dev_priv = dev->dev_private;
773
774         /* deactivate cxsr */
775         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
776 }
777
778 /*
779  * Latency for FIFO fetches is dependent on several factors:
780  *   - memory configuration (speed, channels)
781  *   - chipset
782  *   - current MCH state
783  * It can be fairly high in some situations, so here we assume a fairly
784  * pessimal value.  It's a tradeoff between extra memory fetches (if we
785  * set this value too high, the FIFO will fetch frequently to stay full)
786  * and power consumption (set it too low to save power and we might see
787  * FIFO underruns and display "flicker").
788  *
789  * A value of 5us seems to be a good balance; safe for very low end
790  * platforms but not overly aggressive on lower latency configs.
791  */
792 static const int latency_ns = 5000;
793
794 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
795 {
796         struct drm_i915_private *dev_priv = dev->dev_private;
797         uint32_t dsparb = I915_READ(DSPARB);
798         int size;
799
800         size = dsparb & 0x7f;
801         if (plane)
802                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
803
804         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
805                       plane ? "B" : "A", size);
806
807         return size;
808 }
809
810 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
811 {
812         struct drm_i915_private *dev_priv = dev->dev_private;
813         uint32_t dsparb = I915_READ(DSPARB);
814         int size;
815
816         size = dsparb & 0x1ff;
817         if (plane)
818                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
819         size >>= 1; /* Convert to cachelines */
820
821         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
822                       plane ? "B" : "A", size);
823
824         return size;
825 }
826
827 static int i845_get_fifo_size(struct drm_device *dev, int plane)
828 {
829         struct drm_i915_private *dev_priv = dev->dev_private;
830         uint32_t dsparb = I915_READ(DSPARB);
831         int size;
832
833         size = dsparb & 0x7f;
834         size >>= 2; /* Convert to cachelines */
835
836         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
837                       plane ? "B" : "A",
838                       size);
839
840         return size;
841 }
842
843 static int i830_get_fifo_size(struct drm_device *dev, int plane)
844 {
845         struct drm_i915_private *dev_priv = dev->dev_private;
846         uint32_t dsparb = I915_READ(DSPARB);
847         int size;
848
849         size = dsparb & 0x7f;
850         size >>= 1; /* Convert to cachelines */
851
852         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
853                       plane ? "B" : "A", size);
854
855         return size;
856 }
857
858 /* Pineview has different values for various configs */
859 static const struct intel_watermark_params pineview_display_wm = {
860         PINEVIEW_DISPLAY_FIFO,
861         PINEVIEW_MAX_WM,
862         PINEVIEW_DFT_WM,
863         PINEVIEW_GUARD_WM,
864         PINEVIEW_FIFO_LINE_SIZE
865 };
866 static const struct intel_watermark_params pineview_display_hplloff_wm = {
867         PINEVIEW_DISPLAY_FIFO,
868         PINEVIEW_MAX_WM,
869         PINEVIEW_DFT_HPLLOFF_WM,
870         PINEVIEW_GUARD_WM,
871         PINEVIEW_FIFO_LINE_SIZE
872 };
873 static const struct intel_watermark_params pineview_cursor_wm = {
874         PINEVIEW_CURSOR_FIFO,
875         PINEVIEW_CURSOR_MAX_WM,
876         PINEVIEW_CURSOR_DFT_WM,
877         PINEVIEW_CURSOR_GUARD_WM,
878         PINEVIEW_FIFO_LINE_SIZE,
879 };
880 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
881         PINEVIEW_CURSOR_FIFO,
882         PINEVIEW_CURSOR_MAX_WM,
883         PINEVIEW_CURSOR_DFT_WM,
884         PINEVIEW_CURSOR_GUARD_WM,
885         PINEVIEW_FIFO_LINE_SIZE
886 };
887 static const struct intel_watermark_params g4x_wm_info = {
888         G4X_FIFO_SIZE,
889         G4X_MAX_WM,
890         G4X_MAX_WM,
891         2,
892         G4X_FIFO_LINE_SIZE,
893 };
894 static const struct intel_watermark_params g4x_cursor_wm_info = {
895         I965_CURSOR_FIFO,
896         I965_CURSOR_MAX_WM,
897         I965_CURSOR_DFT_WM,
898         2,
899         G4X_FIFO_LINE_SIZE,
900 };
901 static const struct intel_watermark_params valleyview_wm_info = {
902         VALLEYVIEW_FIFO_SIZE,
903         VALLEYVIEW_MAX_WM,
904         VALLEYVIEW_MAX_WM,
905         2,
906         G4X_FIFO_LINE_SIZE,
907 };
908 static const struct intel_watermark_params valleyview_cursor_wm_info = {
909         I965_CURSOR_FIFO,
910         VALLEYVIEW_CURSOR_MAX_WM,
911         I965_CURSOR_DFT_WM,
912         2,
913         G4X_FIFO_LINE_SIZE,
914 };
915 static const struct intel_watermark_params i965_cursor_wm_info = {
916         I965_CURSOR_FIFO,
917         I965_CURSOR_MAX_WM,
918         I965_CURSOR_DFT_WM,
919         2,
920         I915_FIFO_LINE_SIZE,
921 };
922 static const struct intel_watermark_params i945_wm_info = {
923         I945_FIFO_SIZE,
924         I915_MAX_WM,
925         1,
926         2,
927         I915_FIFO_LINE_SIZE
928 };
929 static const struct intel_watermark_params i915_wm_info = {
930         I915_FIFO_SIZE,
931         I915_MAX_WM,
932         1,
933         2,
934         I915_FIFO_LINE_SIZE
935 };
936 static const struct intel_watermark_params i855_wm_info = {
937         I855GM_FIFO_SIZE,
938         I915_MAX_WM,
939         1,
940         2,
941         I830_FIFO_LINE_SIZE
942 };
943 static const struct intel_watermark_params i830_wm_info = {
944         I830_FIFO_SIZE,
945         I915_MAX_WM,
946         1,
947         2,
948         I830_FIFO_LINE_SIZE
949 };
950
951 static const struct intel_watermark_params ironlake_display_wm_info = {
952         ILK_DISPLAY_FIFO,
953         ILK_DISPLAY_MAXWM,
954         ILK_DISPLAY_DFTWM,
955         2,
956         ILK_FIFO_LINE_SIZE
957 };
958 static const struct intel_watermark_params ironlake_cursor_wm_info = {
959         ILK_CURSOR_FIFO,
960         ILK_CURSOR_MAXWM,
961         ILK_CURSOR_DFTWM,
962         2,
963         ILK_FIFO_LINE_SIZE
964 };
965 static const struct intel_watermark_params ironlake_display_srwm_info = {
966         ILK_DISPLAY_SR_FIFO,
967         ILK_DISPLAY_MAX_SRWM,
968         ILK_DISPLAY_DFT_SRWM,
969         2,
970         ILK_FIFO_LINE_SIZE
971 };
972 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
973         ILK_CURSOR_SR_FIFO,
974         ILK_CURSOR_MAX_SRWM,
975         ILK_CURSOR_DFT_SRWM,
976         2,
977         ILK_FIFO_LINE_SIZE
978 };
979
980 static const struct intel_watermark_params sandybridge_display_wm_info = {
981         SNB_DISPLAY_FIFO,
982         SNB_DISPLAY_MAXWM,
983         SNB_DISPLAY_DFTWM,
984         2,
985         SNB_FIFO_LINE_SIZE
986 };
987 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
988         SNB_CURSOR_FIFO,
989         SNB_CURSOR_MAXWM,
990         SNB_CURSOR_DFTWM,
991         2,
992         SNB_FIFO_LINE_SIZE
993 };
994 static const struct intel_watermark_params sandybridge_display_srwm_info = {
995         SNB_DISPLAY_SR_FIFO,
996         SNB_DISPLAY_MAX_SRWM,
997         SNB_DISPLAY_DFT_SRWM,
998         2,
999         SNB_FIFO_LINE_SIZE
1000 };
1001 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1002         SNB_CURSOR_SR_FIFO,
1003         SNB_CURSOR_MAX_SRWM,
1004         SNB_CURSOR_DFT_SRWM,
1005         2,
1006         SNB_FIFO_LINE_SIZE
1007 };
1008
1009
1010 /**
1011  * intel_calculate_wm - calculate watermark level
1012  * @clock_in_khz: pixel clock
1013  * @wm: chip FIFO params
1014  * @pixel_size: display pixel size
1015  * @latency_ns: memory latency for the platform
1016  *
1017  * Calculate the watermark level (the level at which the display plane will
1018  * start fetching from memory again).  Each chip has a different display
1019  * FIFO size and allocation, so the caller needs to figure that out and pass
1020  * in the correct intel_watermark_params structure.
1021  *
1022  * As the pixel clock runs, the FIFO will be drained at a rate that depends
1023  * on the pixel size.  When it reaches the watermark level, it'll start
1024  * fetching FIFO line sized based chunks from memory until the FIFO fills
1025  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
1026  * will occur, and a display engine hang could result.
1027  */
1028 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1029                                         const struct intel_watermark_params *wm,
1030                                         int fifo_size,
1031                                         int pixel_size,
1032                                         unsigned long latency_ns)
1033 {
1034         long entries_required, wm_size;
1035
1036         /*
1037          * Note: we need to make sure we don't overflow for various clock &
1038          * latency values.
1039          * clocks go from a few thousand to several hundred thousand.
1040          * latency is usually a few thousand
1041          */
1042         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1043                 1000;
1044         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1045
1046         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1047
1048         wm_size = fifo_size - (entries_required + wm->guard_size);
1049
1050         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1051
1052         /* Don't promote wm_size to unsigned... */
1053         if (wm_size > (long)wm->max_wm)
1054                 wm_size = wm->max_wm;
1055         if (wm_size <= 0)
1056                 wm_size = wm->default_wm;
1057         return wm_size;
1058 }
1059
1060 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1061 {
1062         struct drm_crtc *crtc, *enabled = NULL;
1063
1064         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1065                 if (intel_crtc_active(crtc)) {
1066                         if (enabled)
1067                                 return NULL;
1068                         enabled = crtc;
1069                 }
1070         }
1071
1072         return enabled;
1073 }
1074
1075 static void pineview_update_wm(struct drm_device *dev)
1076 {
1077         struct drm_i915_private *dev_priv = dev->dev_private;
1078         struct drm_crtc *crtc;
1079         const struct cxsr_latency *latency;
1080         u32 reg;
1081         unsigned long wm;
1082
1083         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1084                                          dev_priv->fsb_freq, dev_priv->mem_freq);
1085         if (!latency) {
1086                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1087                 pineview_disable_cxsr(dev);
1088                 return;
1089         }
1090
1091         crtc = single_enabled_crtc(dev);
1092         if (crtc) {
1093                 int clock = crtc->mode.clock;
1094                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1095
1096                 /* Display SR */
1097                 wm = intel_calculate_wm(clock, &pineview_display_wm,
1098                                         pineview_display_wm.fifo_size,
1099                                         pixel_size, latency->display_sr);
1100                 reg = I915_READ(DSPFW1);
1101                 reg &= ~DSPFW_SR_MASK;
1102                 reg |= wm << DSPFW_SR_SHIFT;
1103                 I915_WRITE(DSPFW1, reg);
1104                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1105
1106                 /* cursor SR */
1107                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1108                                         pineview_display_wm.fifo_size,
1109                                         pixel_size, latency->cursor_sr);
1110                 reg = I915_READ(DSPFW3);
1111                 reg &= ~DSPFW_CURSOR_SR_MASK;
1112                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1113                 I915_WRITE(DSPFW3, reg);
1114
1115                 /* Display HPLL off SR */
1116                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1117                                         pineview_display_hplloff_wm.fifo_size,
1118                                         pixel_size, latency->display_hpll_disable);
1119                 reg = I915_READ(DSPFW3);
1120                 reg &= ~DSPFW_HPLL_SR_MASK;
1121                 reg |= wm & DSPFW_HPLL_SR_MASK;
1122                 I915_WRITE(DSPFW3, reg);
1123
1124                 /* cursor HPLL off SR */
1125                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1126                                         pineview_display_hplloff_wm.fifo_size,
1127                                         pixel_size, latency->cursor_hpll_disable);
1128                 reg = I915_READ(DSPFW3);
1129                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1130                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1131                 I915_WRITE(DSPFW3, reg);
1132                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1133
1134                 /* activate cxsr */
1135                 I915_WRITE(DSPFW3,
1136                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1137                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1138         } else {
1139                 pineview_disable_cxsr(dev);
1140                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1141         }
1142 }
1143
1144 static bool g4x_compute_wm0(struct drm_device *dev,
1145                             int plane,
1146                             const struct intel_watermark_params *display,
1147                             int display_latency_ns,
1148                             const struct intel_watermark_params *cursor,
1149                             int cursor_latency_ns,
1150                             int *plane_wm,
1151                             int *cursor_wm)
1152 {
1153         struct drm_crtc *crtc;
1154         int htotal, hdisplay, clock, pixel_size;
1155         int line_time_us, line_count;
1156         int entries, tlb_miss;
1157
1158         crtc = intel_get_crtc_for_plane(dev, plane);
1159         if (!intel_crtc_active(crtc)) {
1160                 *cursor_wm = cursor->guard_size;
1161                 *plane_wm = display->guard_size;
1162                 return false;
1163         }
1164
1165         htotal = crtc->mode.htotal;
1166         hdisplay = crtc->mode.hdisplay;
1167         clock = crtc->mode.clock;
1168         pixel_size = crtc->fb->bits_per_pixel / 8;
1169
1170         /* Use the small buffer method to calculate plane watermark */
1171         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1172         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1173         if (tlb_miss > 0)
1174                 entries += tlb_miss;
1175         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1176         *plane_wm = entries + display->guard_size;
1177         if (*plane_wm > (int)display->max_wm)
1178                 *plane_wm = display->max_wm;
1179
1180         /* Use the large buffer method to calculate cursor watermark */
1181         line_time_us = ((htotal * 1000) / clock);
1182         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1183         entries = line_count * 64 * pixel_size;
1184         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1185         if (tlb_miss > 0)
1186                 entries += tlb_miss;
1187         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1188         *cursor_wm = entries + cursor->guard_size;
1189         if (*cursor_wm > (int)cursor->max_wm)
1190                 *cursor_wm = (int)cursor->max_wm;
1191
1192         return true;
1193 }
1194
1195 /*
1196  * Check the wm result.
1197  *
1198  * If any calculated watermark values is larger than the maximum value that
1199  * can be programmed into the associated watermark register, that watermark
1200  * must be disabled.
1201  */
1202 static bool g4x_check_srwm(struct drm_device *dev,
1203                            int display_wm, int cursor_wm,
1204                            const struct intel_watermark_params *display,
1205                            const struct intel_watermark_params *cursor)
1206 {
1207         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1208                       display_wm, cursor_wm);
1209
1210         if (display_wm > display->max_wm) {
1211                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1212                               display_wm, display->max_wm);
1213                 return false;
1214         }
1215
1216         if (cursor_wm > cursor->max_wm) {
1217                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1218                               cursor_wm, cursor->max_wm);
1219                 return false;
1220         }
1221
1222         if (!(display_wm || cursor_wm)) {
1223                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1224                 return false;
1225         }
1226
1227         return true;
1228 }
1229
1230 static bool g4x_compute_srwm(struct drm_device *dev,
1231                              int plane,
1232                              int latency_ns,
1233                              const struct intel_watermark_params *display,
1234                              const struct intel_watermark_params *cursor,
1235                              int *display_wm, int *cursor_wm)
1236 {
1237         struct drm_crtc *crtc;
1238         int hdisplay, htotal, pixel_size, clock;
1239         unsigned long line_time_us;
1240         int line_count, line_size;
1241         int small, large;
1242         int entries;
1243
1244         if (!latency_ns) {
1245                 *display_wm = *cursor_wm = 0;
1246                 return false;
1247         }
1248
1249         crtc = intel_get_crtc_for_plane(dev, plane);
1250         hdisplay = crtc->mode.hdisplay;
1251         htotal = crtc->mode.htotal;
1252         clock = crtc->mode.clock;
1253         pixel_size = crtc->fb->bits_per_pixel / 8;
1254
1255         line_time_us = (htotal * 1000) / clock;
1256         line_count = (latency_ns / line_time_us + 1000) / 1000;
1257         line_size = hdisplay * pixel_size;
1258
1259         /* Use the minimum of the small and large buffer method for primary */
1260         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1261         large = line_count * line_size;
1262
1263         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1264         *display_wm = entries + display->guard_size;
1265
1266         /* calculate the self-refresh watermark for display cursor */
1267         entries = line_count * pixel_size * 64;
1268         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1269         *cursor_wm = entries + cursor->guard_size;
1270
1271         return g4x_check_srwm(dev,
1272                               *display_wm, *cursor_wm,
1273                               display, cursor);
1274 }
1275
1276 static bool vlv_compute_drain_latency(struct drm_device *dev,
1277                                      int plane,
1278                                      int *plane_prec_mult,
1279                                      int *plane_dl,
1280                                      int *cursor_prec_mult,
1281                                      int *cursor_dl)
1282 {
1283         struct drm_crtc *crtc;
1284         int clock, pixel_size;
1285         int entries;
1286
1287         crtc = intel_get_crtc_for_plane(dev, plane);
1288         if (!intel_crtc_active(crtc))
1289                 return false;
1290
1291         clock = crtc->mode.clock;       /* VESA DOT Clock */
1292         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
1293
1294         entries = (clock / 1000) * pixel_size;
1295         *plane_prec_mult = (entries > 256) ?
1296                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1297         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1298                                                      pixel_size);
1299
1300         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
1301         *cursor_prec_mult = (entries > 256) ?
1302                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1303         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1304
1305         return true;
1306 }
1307
1308 /*
1309  * Update drain latency registers of memory arbiter
1310  *
1311  * Valleyview SoC has a new memory arbiter and needs drain latency registers
1312  * to be programmed. Each plane has a drain latency multiplier and a drain
1313  * latency value.
1314  */
1315
1316 static void vlv_update_drain_latency(struct drm_device *dev)
1317 {
1318         struct drm_i915_private *dev_priv = dev->dev_private;
1319         int planea_prec, planea_dl, planeb_prec, planeb_dl;
1320         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1321         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1322                                                         either 16 or 32 */
1323
1324         /* For plane A, Cursor A */
1325         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1326                                       &cursor_prec_mult, &cursora_dl)) {
1327                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1328                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1329                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1330                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1331
1332                 I915_WRITE(VLV_DDL1, cursora_prec |
1333                                 (cursora_dl << DDL_CURSORA_SHIFT) |
1334                                 planea_prec | planea_dl);
1335         }
1336
1337         /* For plane B, Cursor B */
1338         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1339                                       &cursor_prec_mult, &cursorb_dl)) {
1340                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1341                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1342                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1343                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1344
1345                 I915_WRITE(VLV_DDL2, cursorb_prec |
1346                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
1347                                 planeb_prec | planeb_dl);
1348         }
1349 }
1350
1351 #define single_plane_enabled(mask) is_power_of_2(mask)
1352
1353 static void valleyview_update_wm(struct drm_device *dev)
1354 {
1355         static const int sr_latency_ns = 12000;
1356         struct drm_i915_private *dev_priv = dev->dev_private;
1357         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1358         int plane_sr, cursor_sr;
1359         int ignore_plane_sr, ignore_cursor_sr;
1360         unsigned int enabled = 0;
1361
1362         vlv_update_drain_latency(dev);
1363
1364         if (g4x_compute_wm0(dev, PIPE_A,
1365                             &valleyview_wm_info, latency_ns,
1366                             &valleyview_cursor_wm_info, latency_ns,
1367                             &planea_wm, &cursora_wm))
1368                 enabled |= 1 << PIPE_A;
1369
1370         if (g4x_compute_wm0(dev, PIPE_B,
1371                             &valleyview_wm_info, latency_ns,
1372                             &valleyview_cursor_wm_info, latency_ns,
1373                             &planeb_wm, &cursorb_wm))
1374                 enabled |= 1 << PIPE_B;
1375
1376         if (single_plane_enabled(enabled) &&
1377             g4x_compute_srwm(dev, ffs(enabled) - 1,
1378                              sr_latency_ns,
1379                              &valleyview_wm_info,
1380                              &valleyview_cursor_wm_info,
1381                              &plane_sr, &ignore_cursor_sr) &&
1382             g4x_compute_srwm(dev, ffs(enabled) - 1,
1383                              2*sr_latency_ns,
1384                              &valleyview_wm_info,
1385                              &valleyview_cursor_wm_info,
1386                              &ignore_plane_sr, &cursor_sr)) {
1387                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1388         } else {
1389                 I915_WRITE(FW_BLC_SELF_VLV,
1390                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1391                 plane_sr = cursor_sr = 0;
1392         }
1393
1394         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1395                       planea_wm, cursora_wm,
1396                       planeb_wm, cursorb_wm,
1397                       plane_sr, cursor_sr);
1398
1399         I915_WRITE(DSPFW1,
1400                    (plane_sr << DSPFW_SR_SHIFT) |
1401                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1402                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1403                    planea_wm);
1404         I915_WRITE(DSPFW2,
1405                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1406                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1407         I915_WRITE(DSPFW3,
1408                    (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1409                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1410 }
1411
1412 static void g4x_update_wm(struct drm_device *dev)
1413 {
1414         static const int sr_latency_ns = 12000;
1415         struct drm_i915_private *dev_priv = dev->dev_private;
1416         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1417         int plane_sr, cursor_sr;
1418         unsigned int enabled = 0;
1419
1420         if (g4x_compute_wm0(dev, PIPE_A,
1421                             &g4x_wm_info, latency_ns,
1422                             &g4x_cursor_wm_info, latency_ns,
1423                             &planea_wm, &cursora_wm))
1424                 enabled |= 1 << PIPE_A;
1425
1426         if (g4x_compute_wm0(dev, PIPE_B,
1427                             &g4x_wm_info, latency_ns,
1428                             &g4x_cursor_wm_info, latency_ns,
1429                             &planeb_wm, &cursorb_wm))
1430                 enabled |= 1 << PIPE_B;
1431
1432         if (single_plane_enabled(enabled) &&
1433             g4x_compute_srwm(dev, ffs(enabled) - 1,
1434                              sr_latency_ns,
1435                              &g4x_wm_info,
1436                              &g4x_cursor_wm_info,
1437                              &plane_sr, &cursor_sr)) {
1438                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1439         } else {
1440                 I915_WRITE(FW_BLC_SELF,
1441                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1442                 plane_sr = cursor_sr = 0;
1443         }
1444
1445         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1446                       planea_wm, cursora_wm,
1447                       planeb_wm, cursorb_wm,
1448                       plane_sr, cursor_sr);
1449
1450         I915_WRITE(DSPFW1,
1451                    (plane_sr << DSPFW_SR_SHIFT) |
1452                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1453                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1454                    planea_wm);
1455         I915_WRITE(DSPFW2,
1456                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1457                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1458         /* HPLL off in SR has some issues on G4x... disable it */
1459         I915_WRITE(DSPFW3,
1460                    (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1461                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1462 }
1463
1464 static void i965_update_wm(struct drm_device *dev)
1465 {
1466         struct drm_i915_private *dev_priv = dev->dev_private;
1467         struct drm_crtc *crtc;
1468         int srwm = 1;
1469         int cursor_sr = 16;
1470
1471         /* Calc sr entries for one plane configs */
1472         crtc = single_enabled_crtc(dev);
1473         if (crtc) {
1474                 /* self-refresh has much higher latency */
1475                 static const int sr_latency_ns = 12000;
1476                 int clock = crtc->mode.clock;
1477                 int htotal = crtc->mode.htotal;
1478                 int hdisplay = crtc->mode.hdisplay;
1479                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1480                 unsigned long line_time_us;
1481                 int entries;
1482
1483                 line_time_us = ((htotal * 1000) / clock);
1484
1485                 /* Use ns/us then divide to preserve precision */
1486                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1487                         pixel_size * hdisplay;
1488                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1489                 srwm = I965_FIFO_SIZE - entries;
1490                 if (srwm < 0)
1491                         srwm = 1;
1492                 srwm &= 0x1ff;
1493                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1494                               entries, srwm);
1495
1496                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1497                         pixel_size * 64;
1498                 entries = DIV_ROUND_UP(entries,
1499                                           i965_cursor_wm_info.cacheline_size);
1500                 cursor_sr = i965_cursor_wm_info.fifo_size -
1501                         (entries + i965_cursor_wm_info.guard_size);
1502
1503                 if (cursor_sr > i965_cursor_wm_info.max_wm)
1504                         cursor_sr = i965_cursor_wm_info.max_wm;
1505
1506                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1507                               "cursor %d\n", srwm, cursor_sr);
1508
1509                 if (IS_CRESTLINE(dev))
1510                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1511         } else {
1512                 /* Turn off self refresh if both pipes are enabled */
1513                 if (IS_CRESTLINE(dev))
1514                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1515                                    & ~FW_BLC_SELF_EN);
1516         }
1517
1518         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1519                       srwm);
1520
1521         /* 965 has limitations... */
1522         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1523                    (8 << 16) | (8 << 8) | (8 << 0));
1524         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1525         /* update cursor SR watermark */
1526         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1527 }
1528
1529 static void i9xx_update_wm(struct drm_device *dev)
1530 {
1531         struct drm_i915_private *dev_priv = dev->dev_private;
1532         const struct intel_watermark_params *wm_info;
1533         uint32_t fwater_lo;
1534         uint32_t fwater_hi;
1535         int cwm, srwm = 1;
1536         int fifo_size;
1537         int planea_wm, planeb_wm;
1538         struct drm_crtc *crtc, *enabled = NULL;
1539
1540         if (IS_I945GM(dev))
1541                 wm_info = &i945_wm_info;
1542         else if (!IS_GEN2(dev))
1543                 wm_info = &i915_wm_info;
1544         else
1545                 wm_info = &i855_wm_info;
1546
1547         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1548         crtc = intel_get_crtc_for_plane(dev, 0);
1549         if (intel_crtc_active(crtc)) {
1550                 int cpp = crtc->fb->bits_per_pixel / 8;
1551                 if (IS_GEN2(dev))
1552                         cpp = 4;
1553
1554                 planea_wm = intel_calculate_wm(crtc->mode.clock,
1555                                                wm_info, fifo_size, cpp,
1556                                                latency_ns);
1557                 enabled = crtc;
1558         } else
1559                 planea_wm = fifo_size - wm_info->guard_size;
1560
1561         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1562         crtc = intel_get_crtc_for_plane(dev, 1);
1563         if (intel_crtc_active(crtc)) {
1564                 int cpp = crtc->fb->bits_per_pixel / 8;
1565                 if (IS_GEN2(dev))
1566                         cpp = 4;
1567
1568                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1569                                                wm_info, fifo_size, cpp,
1570                                                latency_ns);
1571                 if (enabled == NULL)
1572                         enabled = crtc;
1573                 else
1574                         enabled = NULL;
1575         } else
1576                 planeb_wm = fifo_size - wm_info->guard_size;
1577
1578         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1579
1580         /*
1581          * Overlay gets an aggressive default since video jitter is bad.
1582          */
1583         cwm = 2;
1584
1585         /* Play safe and disable self-refresh before adjusting watermarks. */
1586         if (IS_I945G(dev) || IS_I945GM(dev))
1587                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1588         else if (IS_I915GM(dev))
1589                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1590
1591         /* Calc sr entries for one plane configs */
1592         if (HAS_FW_BLC(dev) && enabled) {
1593                 /* self-refresh has much higher latency */
1594                 static const int sr_latency_ns = 6000;
1595                 int clock = enabled->mode.clock;
1596                 int htotal = enabled->mode.htotal;
1597                 int hdisplay = enabled->mode.hdisplay;
1598                 int pixel_size = enabled->fb->bits_per_pixel / 8;
1599                 unsigned long line_time_us;
1600                 int entries;
1601
1602                 line_time_us = (htotal * 1000) / clock;
1603
1604                 /* Use ns/us then divide to preserve precision */
1605                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1606                         pixel_size * hdisplay;
1607                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1608                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1609                 srwm = wm_info->fifo_size - entries;
1610                 if (srwm < 0)
1611                         srwm = 1;
1612
1613                 if (IS_I945G(dev) || IS_I945GM(dev))
1614                         I915_WRITE(FW_BLC_SELF,
1615                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1616                 else if (IS_I915GM(dev))
1617                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1618         }
1619
1620         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1621                       planea_wm, planeb_wm, cwm, srwm);
1622
1623         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1624         fwater_hi = (cwm & 0x1f);
1625
1626         /* Set request length to 8 cachelines per fetch */
1627         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1628         fwater_hi = fwater_hi | (1 << 8);
1629
1630         I915_WRITE(FW_BLC, fwater_lo);
1631         I915_WRITE(FW_BLC2, fwater_hi);
1632
1633         if (HAS_FW_BLC(dev)) {
1634                 if (enabled) {
1635                         if (IS_I945G(dev) || IS_I945GM(dev))
1636                                 I915_WRITE(FW_BLC_SELF,
1637                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1638                         else if (IS_I915GM(dev))
1639                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1640                         DRM_DEBUG_KMS("memory self refresh enabled\n");
1641                 } else
1642                         DRM_DEBUG_KMS("memory self refresh disabled\n");
1643         }
1644 }
1645
1646 static void i830_update_wm(struct drm_device *dev)
1647 {
1648         struct drm_i915_private *dev_priv = dev->dev_private;
1649         struct drm_crtc *crtc;
1650         uint32_t fwater_lo;
1651         int planea_wm;
1652
1653         crtc = single_enabled_crtc(dev);
1654         if (crtc == NULL)
1655                 return;
1656
1657         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1658                                        dev_priv->display.get_fifo_size(dev, 0),
1659                                        4, latency_ns);
1660         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1661         fwater_lo |= (3<<8) | planea_wm;
1662
1663         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1664
1665         I915_WRITE(FW_BLC, fwater_lo);
1666 }
1667
1668 #define ILK_LP0_PLANE_LATENCY           700
1669 #define ILK_LP0_CURSOR_LATENCY          1300
1670
1671 /*
1672  * Check the wm result.
1673  *
1674  * If any calculated watermark values is larger than the maximum value that
1675  * can be programmed into the associated watermark register, that watermark
1676  * must be disabled.
1677  */
1678 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1679                                 int fbc_wm, int display_wm, int cursor_wm,
1680                                 const struct intel_watermark_params *display,
1681                                 const struct intel_watermark_params *cursor)
1682 {
1683         struct drm_i915_private *dev_priv = dev->dev_private;
1684
1685         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1686                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1687
1688         if (fbc_wm > SNB_FBC_MAX_SRWM) {
1689                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1690                               fbc_wm, SNB_FBC_MAX_SRWM, level);
1691
1692                 /* fbc has it's own way to disable FBC WM */
1693                 I915_WRITE(DISP_ARB_CTL,
1694                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1695                 return false;
1696         } else if (INTEL_INFO(dev)->gen >= 6) {
1697                 /* enable FBC WM (except on ILK, where it must remain off) */
1698                 I915_WRITE(DISP_ARB_CTL,
1699                            I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1700         }
1701
1702         if (display_wm > display->max_wm) {
1703                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1704                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
1705                 return false;
1706         }
1707
1708         if (cursor_wm > cursor->max_wm) {
1709                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1710                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1711                 return false;
1712         }
1713
1714         if (!(fbc_wm || display_wm || cursor_wm)) {
1715                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1716                 return false;
1717         }
1718
1719         return true;
1720 }
1721
1722 /*
1723  * Compute watermark values of WM[1-3],
1724  */
1725 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1726                                   int latency_ns,
1727                                   const struct intel_watermark_params *display,
1728                                   const struct intel_watermark_params *cursor,
1729                                   int *fbc_wm, int *display_wm, int *cursor_wm)
1730 {
1731         struct drm_crtc *crtc;
1732         unsigned long line_time_us;
1733         int hdisplay, htotal, pixel_size, clock;
1734         int line_count, line_size;
1735         int small, large;
1736         int entries;
1737
1738         if (!latency_ns) {
1739                 *fbc_wm = *display_wm = *cursor_wm = 0;
1740                 return false;
1741         }
1742
1743         crtc = intel_get_crtc_for_plane(dev, plane);
1744         hdisplay = crtc->mode.hdisplay;
1745         htotal = crtc->mode.htotal;
1746         clock = crtc->mode.clock;
1747         pixel_size = crtc->fb->bits_per_pixel / 8;
1748
1749         line_time_us = (htotal * 1000) / clock;
1750         line_count = (latency_ns / line_time_us + 1000) / 1000;
1751         line_size = hdisplay * pixel_size;
1752
1753         /* Use the minimum of the small and large buffer method for primary */
1754         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1755         large = line_count * line_size;
1756
1757         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1758         *display_wm = entries + display->guard_size;
1759
1760         /*
1761          * Spec says:
1762          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1763          */
1764         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1765
1766         /* calculate the self-refresh watermark for display cursor */
1767         entries = line_count * pixel_size * 64;
1768         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1769         *cursor_wm = entries + cursor->guard_size;
1770
1771         return ironlake_check_srwm(dev, level,
1772                                    *fbc_wm, *display_wm, *cursor_wm,
1773                                    display, cursor);
1774 }
1775
1776 static void ironlake_update_wm(struct drm_device *dev)
1777 {
1778         struct drm_i915_private *dev_priv = dev->dev_private;
1779         int fbc_wm, plane_wm, cursor_wm;
1780         unsigned int enabled;
1781
1782         enabled = 0;
1783         if (g4x_compute_wm0(dev, PIPE_A,
1784                             &ironlake_display_wm_info,
1785                             ILK_LP0_PLANE_LATENCY,
1786                             &ironlake_cursor_wm_info,
1787                             ILK_LP0_CURSOR_LATENCY,
1788                             &plane_wm, &cursor_wm)) {
1789                 I915_WRITE(WM0_PIPEA_ILK,
1790                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1791                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1792                               " plane %d, " "cursor: %d\n",
1793                               plane_wm, cursor_wm);
1794                 enabled |= 1 << PIPE_A;
1795         }
1796
1797         if (g4x_compute_wm0(dev, PIPE_B,
1798                             &ironlake_display_wm_info,
1799                             ILK_LP0_PLANE_LATENCY,
1800                             &ironlake_cursor_wm_info,
1801                             ILK_LP0_CURSOR_LATENCY,
1802                             &plane_wm, &cursor_wm)) {
1803                 I915_WRITE(WM0_PIPEB_ILK,
1804                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1805                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1806                               " plane %d, cursor: %d\n",
1807                               plane_wm, cursor_wm);
1808                 enabled |= 1 << PIPE_B;
1809         }
1810
1811         /*
1812          * Calculate and update the self-refresh watermark only when one
1813          * display plane is used.
1814          */
1815         I915_WRITE(WM3_LP_ILK, 0);
1816         I915_WRITE(WM2_LP_ILK, 0);
1817         I915_WRITE(WM1_LP_ILK, 0);
1818
1819         if (!single_plane_enabled(enabled))
1820                 return;
1821         enabled = ffs(enabled) - 1;
1822
1823         /* WM1 */
1824         if (!ironlake_compute_srwm(dev, 1, enabled,
1825                                    ILK_READ_WM1_LATENCY() * 500,
1826                                    &ironlake_display_srwm_info,
1827                                    &ironlake_cursor_srwm_info,
1828                                    &fbc_wm, &plane_wm, &cursor_wm))
1829                 return;
1830
1831         I915_WRITE(WM1_LP_ILK,
1832                    WM1_LP_SR_EN |
1833                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1834                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1835                    (plane_wm << WM1_LP_SR_SHIFT) |
1836                    cursor_wm);
1837
1838         /* WM2 */
1839         if (!ironlake_compute_srwm(dev, 2, enabled,
1840                                    ILK_READ_WM2_LATENCY() * 500,
1841                                    &ironlake_display_srwm_info,
1842                                    &ironlake_cursor_srwm_info,
1843                                    &fbc_wm, &plane_wm, &cursor_wm))
1844                 return;
1845
1846         I915_WRITE(WM2_LP_ILK,
1847                    WM2_LP_EN |
1848                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1849                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1850                    (plane_wm << WM1_LP_SR_SHIFT) |
1851                    cursor_wm);
1852
1853         /*
1854          * WM3 is unsupported on ILK, probably because we don't have latency
1855          * data for that power state
1856          */
1857 }
1858
1859 static void sandybridge_update_wm(struct drm_device *dev)
1860 {
1861         struct drm_i915_private *dev_priv = dev->dev_private;
1862         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1863         u32 val;
1864         int fbc_wm, plane_wm, cursor_wm;
1865         unsigned int enabled;
1866
1867         enabled = 0;
1868         if (g4x_compute_wm0(dev, PIPE_A,
1869                             &sandybridge_display_wm_info, latency,
1870                             &sandybridge_cursor_wm_info, latency,
1871                             &plane_wm, &cursor_wm)) {
1872                 val = I915_READ(WM0_PIPEA_ILK);
1873                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1874                 I915_WRITE(WM0_PIPEA_ILK, val |
1875                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1876                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1877                               " plane %d, " "cursor: %d\n",
1878                               plane_wm, cursor_wm);
1879                 enabled |= 1 << PIPE_A;
1880         }
1881
1882         if (g4x_compute_wm0(dev, PIPE_B,
1883                             &sandybridge_display_wm_info, latency,
1884                             &sandybridge_cursor_wm_info, latency,
1885                             &plane_wm, &cursor_wm)) {
1886                 val = I915_READ(WM0_PIPEB_ILK);
1887                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1888                 I915_WRITE(WM0_PIPEB_ILK, val |
1889                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1890                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1891                               " plane %d, cursor: %d\n",
1892                               plane_wm, cursor_wm);
1893                 enabled |= 1 << PIPE_B;
1894         }
1895
1896         /*
1897          * Calculate and update the self-refresh watermark only when one
1898          * display plane is used.
1899          *
1900          * SNB support 3 levels of watermark.
1901          *
1902          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1903          * and disabled in the descending order
1904          *
1905          */
1906         I915_WRITE(WM3_LP_ILK, 0);
1907         I915_WRITE(WM2_LP_ILK, 0);
1908         I915_WRITE(WM1_LP_ILK, 0);
1909
1910         if (!single_plane_enabled(enabled) ||
1911             dev_priv->sprite_scaling_enabled)
1912                 return;
1913         enabled = ffs(enabled) - 1;
1914
1915         /* WM1 */
1916         if (!ironlake_compute_srwm(dev, 1, enabled,
1917                                    SNB_READ_WM1_LATENCY() * 500,
1918                                    &sandybridge_display_srwm_info,
1919                                    &sandybridge_cursor_srwm_info,
1920                                    &fbc_wm, &plane_wm, &cursor_wm))
1921                 return;
1922
1923         I915_WRITE(WM1_LP_ILK,
1924                    WM1_LP_SR_EN |
1925                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1926                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1927                    (plane_wm << WM1_LP_SR_SHIFT) |
1928                    cursor_wm);
1929
1930         /* WM2 */
1931         if (!ironlake_compute_srwm(dev, 2, enabled,
1932                                    SNB_READ_WM2_LATENCY() * 500,
1933                                    &sandybridge_display_srwm_info,
1934                                    &sandybridge_cursor_srwm_info,
1935                                    &fbc_wm, &plane_wm, &cursor_wm))
1936                 return;
1937
1938         I915_WRITE(WM2_LP_ILK,
1939                    WM2_LP_EN |
1940                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1941                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1942                    (plane_wm << WM1_LP_SR_SHIFT) |
1943                    cursor_wm);
1944
1945         /* WM3 */
1946         if (!ironlake_compute_srwm(dev, 3, enabled,
1947                                    SNB_READ_WM3_LATENCY() * 500,
1948                                    &sandybridge_display_srwm_info,
1949                                    &sandybridge_cursor_srwm_info,
1950                                    &fbc_wm, &plane_wm, &cursor_wm))
1951                 return;
1952
1953         I915_WRITE(WM3_LP_ILK,
1954                    WM3_LP_EN |
1955                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1956                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1957                    (plane_wm << WM1_LP_SR_SHIFT) |
1958                    cursor_wm);
1959 }
1960
1961 static void ivybridge_update_wm(struct drm_device *dev)
1962 {
1963         struct drm_i915_private *dev_priv = dev->dev_private;
1964         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1965         u32 val;
1966         int fbc_wm, plane_wm, cursor_wm;
1967         int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1968         unsigned int enabled;
1969
1970         enabled = 0;
1971         if (g4x_compute_wm0(dev, PIPE_A,
1972                             &sandybridge_display_wm_info, latency,
1973                             &sandybridge_cursor_wm_info, latency,
1974                             &plane_wm, &cursor_wm)) {
1975                 val = I915_READ(WM0_PIPEA_ILK);
1976                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1977                 I915_WRITE(WM0_PIPEA_ILK, val |
1978                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1979                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1980                               " plane %d, " "cursor: %d\n",
1981                               plane_wm, cursor_wm);
1982                 enabled |= 1 << PIPE_A;
1983         }
1984
1985         if (g4x_compute_wm0(dev, PIPE_B,
1986                             &sandybridge_display_wm_info, latency,
1987                             &sandybridge_cursor_wm_info, latency,
1988                             &plane_wm, &cursor_wm)) {
1989                 val = I915_READ(WM0_PIPEB_ILK);
1990                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1991                 I915_WRITE(WM0_PIPEB_ILK, val |
1992                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1993                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1994                               " plane %d, cursor: %d\n",
1995                               plane_wm, cursor_wm);
1996                 enabled |= 1 << PIPE_B;
1997         }
1998
1999         if (g4x_compute_wm0(dev, PIPE_C,
2000                             &sandybridge_display_wm_info, latency,
2001                             &sandybridge_cursor_wm_info, latency,
2002                             &plane_wm, &cursor_wm)) {
2003                 val = I915_READ(WM0_PIPEC_IVB);
2004                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2005                 I915_WRITE(WM0_PIPEC_IVB, val |
2006                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2007                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2008                               " plane %d, cursor: %d\n",
2009                               plane_wm, cursor_wm);
2010                 enabled |= 1 << PIPE_C;
2011         }
2012
2013         /*
2014          * Calculate and update the self-refresh watermark only when one
2015          * display plane is used.
2016          *
2017          * SNB support 3 levels of watermark.
2018          *
2019          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2020          * and disabled in the descending order
2021          *
2022          */
2023         I915_WRITE(WM3_LP_ILK, 0);
2024         I915_WRITE(WM2_LP_ILK, 0);
2025         I915_WRITE(WM1_LP_ILK, 0);
2026
2027         if (!single_plane_enabled(enabled) ||
2028             dev_priv->sprite_scaling_enabled)
2029                 return;
2030         enabled = ffs(enabled) - 1;
2031
2032         /* WM1 */
2033         if (!ironlake_compute_srwm(dev, 1, enabled,
2034                                    SNB_READ_WM1_LATENCY() * 500,
2035                                    &sandybridge_display_srwm_info,
2036                                    &sandybridge_cursor_srwm_info,
2037                                    &fbc_wm, &plane_wm, &cursor_wm))
2038                 return;
2039
2040         I915_WRITE(WM1_LP_ILK,
2041                    WM1_LP_SR_EN |
2042                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2043                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2044                    (plane_wm << WM1_LP_SR_SHIFT) |
2045                    cursor_wm);
2046
2047         /* WM2 */
2048         if (!ironlake_compute_srwm(dev, 2, enabled,
2049                                    SNB_READ_WM2_LATENCY() * 500,
2050                                    &sandybridge_display_srwm_info,
2051                                    &sandybridge_cursor_srwm_info,
2052                                    &fbc_wm, &plane_wm, &cursor_wm))
2053                 return;
2054
2055         I915_WRITE(WM2_LP_ILK,
2056                    WM2_LP_EN |
2057                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2058                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2059                    (plane_wm << WM1_LP_SR_SHIFT) |
2060                    cursor_wm);
2061
2062         /* WM3, note we have to correct the cursor latency */
2063         if (!ironlake_compute_srwm(dev, 3, enabled,
2064                                    SNB_READ_WM3_LATENCY() * 500,
2065                                    &sandybridge_display_srwm_info,
2066                                    &sandybridge_cursor_srwm_info,
2067                                    &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2068             !ironlake_compute_srwm(dev, 3, enabled,
2069                                    2 * SNB_READ_WM3_LATENCY() * 500,
2070                                    &sandybridge_display_srwm_info,
2071                                    &sandybridge_cursor_srwm_info,
2072                                    &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2073                 return;
2074
2075         I915_WRITE(WM3_LP_ILK,
2076                    WM3_LP_EN |
2077                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2078                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2079                    (plane_wm << WM1_LP_SR_SHIFT) |
2080                    cursor_wm);
2081 }
2082
2083 static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2084                                       struct drm_crtc *crtc)
2085 {
2086         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2087         uint32_t pixel_rate, pfit_size;
2088
2089         pixel_rate = intel_crtc->config.adjusted_mode.clock;
2090
2091         /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2092          * adjust the pixel_rate here. */
2093
2094         pfit_size = intel_crtc->config.pch_pfit.size;
2095         if (pfit_size) {
2096                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2097
2098                 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2099                 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2100                 pfit_w = (pfit_size >> 16) & 0xFFFF;
2101                 pfit_h = pfit_size & 0xFFFF;
2102                 if (pipe_w < pfit_w)
2103                         pipe_w = pfit_w;
2104                 if (pipe_h < pfit_h)
2105                         pipe_h = pfit_h;
2106
2107                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2108                                      pfit_w * pfit_h);
2109         }
2110
2111         return pixel_rate;
2112 }
2113
2114 static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2115                                uint32_t latency)
2116 {
2117         uint64_t ret;
2118
2119         ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2120         ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2121
2122         return ret;
2123 }
2124
2125 static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2126                                uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2127                                uint32_t latency)
2128 {
2129         uint32_t ret;
2130
2131         ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2132         ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2133         ret = DIV_ROUND_UP(ret, 64) + 2;
2134         return ret;
2135 }
2136
2137 static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2138                            uint8_t bytes_per_pixel)
2139 {
2140         return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2141 }
2142
2143 struct hsw_pipe_wm_parameters {
2144         bool active;
2145         bool sprite_enabled;
2146         uint8_t pri_bytes_per_pixel;
2147         uint8_t spr_bytes_per_pixel;
2148         uint8_t cur_bytes_per_pixel;
2149         uint32_t pri_horiz_pixels;
2150         uint32_t spr_horiz_pixels;
2151         uint32_t cur_horiz_pixels;
2152         uint32_t pipe_htotal;
2153         uint32_t pixel_rate;
2154 };
2155
2156 struct hsw_wm_maximums {
2157         uint16_t pri;
2158         uint16_t spr;
2159         uint16_t cur;
2160         uint16_t fbc;
2161 };
2162
2163 struct hsw_lp_wm_result {
2164         bool enable;
2165         bool fbc_enable;
2166         uint32_t pri_val;
2167         uint32_t spr_val;
2168         uint32_t cur_val;
2169         uint32_t fbc_val;
2170 };
2171
2172 struct hsw_wm_values {
2173         uint32_t wm_pipe[3];
2174         uint32_t wm_lp[3];
2175         uint32_t wm_lp_spr[3];
2176         uint32_t wm_linetime[3];
2177         bool enable_fbc_wm;
2178 };
2179
2180 enum hsw_data_buf_partitioning {
2181         HSW_DATA_BUF_PART_1_2,
2182         HSW_DATA_BUF_PART_5_6,
2183 };
2184
2185 /* For both WM_PIPE and WM_LP. */
2186 static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2187                                    uint32_t mem_value,
2188                                    bool is_lp)
2189 {
2190         uint32_t method1, method2;
2191
2192         /* TODO: for now, assume the primary plane is always enabled. */
2193         if (!params->active)
2194                 return 0;
2195
2196         method1 = hsw_wm_method1(params->pixel_rate,
2197                                  params->pri_bytes_per_pixel,
2198                                  mem_value);
2199
2200         if (!is_lp)
2201                 return method1;
2202
2203         method2 = hsw_wm_method2(params->pixel_rate,
2204                                  params->pipe_htotal,
2205                                  params->pri_horiz_pixels,
2206                                  params->pri_bytes_per_pixel,
2207                                  mem_value);
2208
2209         return min(method1, method2);
2210 }
2211
2212 /* For both WM_PIPE and WM_LP. */
2213 static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2214                                    uint32_t mem_value)
2215 {
2216         uint32_t method1, method2;
2217
2218         if (!params->active || !params->sprite_enabled)
2219                 return 0;
2220
2221         method1 = hsw_wm_method1(params->pixel_rate,
2222                                  params->spr_bytes_per_pixel,
2223                                  mem_value);
2224         method2 = hsw_wm_method2(params->pixel_rate,
2225                                  params->pipe_htotal,
2226                                  params->spr_horiz_pixels,
2227                                  params->spr_bytes_per_pixel,
2228                                  mem_value);
2229         return min(method1, method2);
2230 }
2231
2232 /* For both WM_PIPE and WM_LP. */
2233 static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2234                                    uint32_t mem_value)
2235 {
2236         if (!params->active)
2237                 return 0;
2238
2239         return hsw_wm_method2(params->pixel_rate,
2240                               params->pipe_htotal,
2241                               params->cur_horiz_pixels,
2242                               params->cur_bytes_per_pixel,
2243                               mem_value);
2244 }
2245
2246 /* Only for WM_LP. */
2247 static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2248                                    uint32_t pri_val,
2249                                    uint32_t mem_value)
2250 {
2251         if (!params->active)
2252                 return 0;
2253
2254         return hsw_wm_fbc(pri_val,
2255                           params->pri_horiz_pixels,
2256                           params->pri_bytes_per_pixel);
2257 }
2258
2259 static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
2260                               struct hsw_pipe_wm_parameters *params,
2261                               struct hsw_lp_wm_result *result)
2262 {
2263         enum pipe pipe;
2264         uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
2265
2266         for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
2267                 struct hsw_pipe_wm_parameters *p = &params[pipe];
2268
2269                 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
2270                 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
2271                 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
2272                 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2273         }
2274
2275         result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
2276         result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
2277         result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
2278         result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2279
2280         if (result->fbc_val > max->fbc) {
2281                 result->fbc_enable = false;
2282                 result->fbc_val = 0;
2283         } else {
2284                 result->fbc_enable = true;
2285         }
2286
2287         result->enable = result->pri_val <= max->pri &&
2288                          result->spr_val <= max->spr &&
2289                          result->cur_val <= max->cur;
2290         return result->enable;
2291 }
2292
2293 static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2294                                     uint32_t mem_value, enum pipe pipe,
2295                                     struct hsw_pipe_wm_parameters *params)
2296 {
2297         uint32_t pri_val, cur_val, spr_val;
2298
2299         pri_val = hsw_compute_pri_wm(params, mem_value, false);
2300         spr_val = hsw_compute_spr_wm(params, mem_value);
2301         cur_val = hsw_compute_cur_wm(params, mem_value);
2302
2303         WARN(pri_val > 127,
2304              "Primary WM error, mode not supported for pipe %c\n",
2305              pipe_name(pipe));
2306         WARN(spr_val > 127,
2307              "Sprite WM error, mode not supported for pipe %c\n",
2308              pipe_name(pipe));
2309         WARN(cur_val > 63,
2310              "Cursor WM error, mode not supported for pipe %c\n",
2311              pipe_name(pipe));
2312
2313         return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2314                (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2315                cur_val;
2316 }
2317
2318 static uint32_t
2319 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2320 {
2321         struct drm_i915_private *dev_priv = dev->dev_private;
2322         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2323         struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2324         u32 linetime, ips_linetime;
2325
2326         if (!intel_crtc_active(crtc))
2327                 return 0;
2328
2329         /* The WM are computed with base on how long it takes to fill a single
2330          * row at the given clock rate, multiplied by 8.
2331          * */
2332         linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2333         ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2334                                          intel_ddi_get_cdclk_freq(dev_priv));
2335
2336         return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2337                PIPE_WM_LINETIME_TIME(linetime);
2338 }
2339
2340 static void hsw_compute_wm_parameters(struct drm_device *dev,
2341                                       struct hsw_pipe_wm_parameters *params,
2342                                       uint32_t *wm,
2343                                       struct hsw_wm_maximums *lp_max_1_2,
2344                                       struct hsw_wm_maximums *lp_max_5_6)
2345 {
2346         struct drm_i915_private *dev_priv = dev->dev_private;
2347         struct drm_crtc *crtc;
2348         struct drm_plane *plane;
2349         uint64_t sskpd = I915_READ64(MCH_SSKPD);
2350         enum pipe pipe;
2351         int pipes_active = 0, sprites_enabled = 0;
2352
2353         if ((sskpd >> 56) & 0xFF)
2354                 wm[0] = (sskpd >> 56) & 0xFF;
2355         else
2356                 wm[0] = sskpd & 0xF;
2357         wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2358         wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2359         wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2360         wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2361
2362         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2363                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2364                 struct hsw_pipe_wm_parameters *p;
2365
2366                 pipe = intel_crtc->pipe;
2367                 p = &params[pipe];
2368
2369                 p->active = intel_crtc_active(crtc);
2370                 if (!p->active)
2371                         continue;
2372
2373                 pipes_active++;
2374
2375                 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2376                 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
2377                 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2378                 p->cur_bytes_per_pixel = 4;
2379                 p->pri_horiz_pixels =
2380                         intel_crtc->config.requested_mode.hdisplay;
2381                 p->cur_horiz_pixels = 64;
2382         }
2383
2384         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2385                 struct intel_plane *intel_plane = to_intel_plane(plane);
2386                 struct hsw_pipe_wm_parameters *p;
2387
2388                 pipe = intel_plane->pipe;
2389                 p = &params[pipe];
2390
2391                 p->sprite_enabled = intel_plane->wm.enable;
2392                 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2393                 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2394
2395                 if (p->sprite_enabled)
2396                         sprites_enabled++;
2397         }
2398
2399         if (pipes_active > 1) {
2400                 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
2401                 lp_max_1_2->spr = lp_max_5_6->spr = 128;
2402                 lp_max_1_2->cur = lp_max_5_6->cur = 64;
2403         } else {
2404                 lp_max_1_2->pri = sprites_enabled ? 384 : 768;
2405                 lp_max_5_6->pri = sprites_enabled ? 128 : 768;
2406                 lp_max_1_2->spr = 384;
2407                 lp_max_5_6->spr = 640;
2408                 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2409         }
2410         lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2411 }
2412
2413 static void hsw_compute_wm_results(struct drm_device *dev,
2414                                    struct hsw_pipe_wm_parameters *params,
2415                                    uint32_t *wm,
2416                                    struct hsw_wm_maximums *lp_maximums,
2417                                    struct hsw_wm_values *results)
2418 {
2419         struct drm_i915_private *dev_priv = dev->dev_private;
2420         struct drm_crtc *crtc;
2421         struct hsw_lp_wm_result lp_results[4] = {};
2422         enum pipe pipe;
2423         int level, max_level, wm_lp;
2424
2425         for (level = 1; level <= 4; level++)
2426                 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
2427                                        &lp_results[level - 1]))
2428                         break;
2429         max_level = level - 1;
2430
2431         /* The spec says it is preferred to disable FBC WMs instead of disabling
2432          * a WM level. */
2433         results->enable_fbc_wm = true;
2434         for (level = 1; level <= max_level; level++) {
2435                 if (!lp_results[level - 1].fbc_enable) {
2436                         results->enable_fbc_wm = false;
2437                         break;
2438                 }
2439         }
2440
2441         memset(results, 0, sizeof(*results));
2442         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2443                 const struct hsw_lp_wm_result *r;
2444
2445                 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2446                 if (level > max_level)
2447                         break;
2448
2449                 r = &lp_results[level - 1];
2450                 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2451                                                           r->fbc_val,
2452                                                           r->pri_val,
2453                                                           r->cur_val);
2454                 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2455         }
2456
2457         for_each_pipe(pipe)
2458                 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
2459                                                              pipe,
2460                                                              &params[pipe]);
2461
2462         for_each_pipe(pipe) {
2463                 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2464                 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2465         }
2466 }
2467
2468 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2469  * case both are at the same level. Prefer r1 in case they're the same. */
2470 static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2471                                                   struct hsw_wm_values *r2)
2472 {
2473         int i, val_r1 = 0, val_r2 = 0;
2474
2475         for (i = 0; i < 3; i++) {
2476                 if (r1->wm_lp[i] & WM3_LP_EN)
2477                         val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2478                 if (r2->wm_lp[i] & WM3_LP_EN)
2479                         val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2480         }
2481
2482         if (val_r1 == val_r2) {
2483                 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2484                         return r2;
2485                 else
2486                         return r1;
2487         } else if (val_r1 > val_r2) {
2488                 return r1;
2489         } else {
2490                 return r2;
2491         }
2492 }
2493
2494 /*
2495  * The spec says we shouldn't write when we don't need, because every write
2496  * causes WMs to be re-evaluated, expending some power.
2497  */
2498 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2499                                 struct hsw_wm_values *results,
2500                                 enum hsw_data_buf_partitioning partitioning)
2501 {
2502         struct hsw_wm_values previous;
2503         uint32_t val;
2504         enum hsw_data_buf_partitioning prev_partitioning;
2505         bool prev_enable_fbc_wm;
2506
2507         previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2508         previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2509         previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2510         previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2511         previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2512         previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2513         previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2514         previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2515         previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2516         previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2517         previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2518         previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2519
2520         prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2521                             HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
2522
2523         prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2524
2525         if (memcmp(results->wm_pipe, previous.wm_pipe,
2526                    sizeof(results->wm_pipe)) == 0 &&
2527             memcmp(results->wm_lp, previous.wm_lp,
2528                    sizeof(results->wm_lp)) == 0 &&
2529             memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2530                    sizeof(results->wm_lp_spr)) == 0 &&
2531             memcmp(results->wm_linetime, previous.wm_linetime,
2532                    sizeof(results->wm_linetime)) == 0 &&
2533             partitioning == prev_partitioning &&
2534             results->enable_fbc_wm == prev_enable_fbc_wm)
2535                 return;
2536
2537         if (previous.wm_lp[2] != 0)
2538                 I915_WRITE(WM3_LP_ILK, 0);
2539         if (previous.wm_lp[1] != 0)
2540                 I915_WRITE(WM2_LP_ILK, 0);
2541         if (previous.wm_lp[0] != 0)
2542                 I915_WRITE(WM1_LP_ILK, 0);
2543
2544         if (previous.wm_pipe[0] != results->wm_pipe[0])
2545                 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2546         if (previous.wm_pipe[1] != results->wm_pipe[1])
2547                 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2548         if (previous.wm_pipe[2] != results->wm_pipe[2])
2549                 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2550
2551         if (previous.wm_linetime[0] != results->wm_linetime[0])
2552                 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2553         if (previous.wm_linetime[1] != results->wm_linetime[1])
2554                 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2555         if (previous.wm_linetime[2] != results->wm_linetime[2])
2556                 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2557
2558         if (prev_partitioning != partitioning) {
2559                 val = I915_READ(WM_MISC);
2560                 if (partitioning == HSW_DATA_BUF_PART_1_2)
2561                         val &= ~WM_MISC_DATA_PARTITION_5_6;
2562                 else
2563                         val |= WM_MISC_DATA_PARTITION_5_6;
2564                 I915_WRITE(WM_MISC, val);
2565         }
2566
2567         if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2568                 val = I915_READ(DISP_ARB_CTL);
2569                 if (results->enable_fbc_wm)
2570                         val &= ~DISP_FBC_WM_DIS;
2571                 else
2572                         val |= DISP_FBC_WM_DIS;
2573                 I915_WRITE(DISP_ARB_CTL, val);
2574         }
2575
2576         if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2577                 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2578         if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2579                 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2580         if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2581                 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2582
2583         if (results->wm_lp[0] != 0)
2584                 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2585         if (results->wm_lp[1] != 0)
2586                 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2587         if (results->wm_lp[2] != 0)
2588                 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2589 }
2590
2591 static void haswell_update_wm(struct drm_device *dev)
2592 {
2593         struct drm_i915_private *dev_priv = dev->dev_private;
2594         struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2595         struct hsw_pipe_wm_parameters params[3];
2596         struct hsw_wm_values results_1_2, results_5_6, *best_results;
2597         uint32_t wm[5];
2598         enum hsw_data_buf_partitioning partitioning;
2599
2600         hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
2601
2602         hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
2603         if (lp_max_1_2.pri != lp_max_5_6.pri) {
2604                 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
2605                                        &results_5_6);
2606                 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2607         } else {
2608                 best_results = &results_1_2;
2609         }
2610
2611         partitioning = (best_results == &results_1_2) ?
2612                        HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
2613
2614         hsw_write_wm_values(dev_priv, best_results, partitioning);
2615 }
2616
2617 static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
2618                                      uint32_t sprite_width, int pixel_size,
2619                                      bool enable)
2620 {
2621         struct drm_plane *plane;
2622
2623         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2624                 struct intel_plane *intel_plane = to_intel_plane(plane);
2625
2626                 if (intel_plane->pipe == pipe) {
2627                         intel_plane->wm.enable = enable;
2628                         intel_plane->wm.horiz_pixels = sprite_width + 1;
2629                         intel_plane->wm.bytes_per_pixel = pixel_size;
2630                         break;
2631                 }
2632         }
2633
2634         haswell_update_wm(dev);
2635 }
2636
2637 static bool
2638 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2639                               uint32_t sprite_width, int pixel_size,
2640                               const struct intel_watermark_params *display,
2641                               int display_latency_ns, int *sprite_wm)
2642 {
2643         struct drm_crtc *crtc;
2644         int clock;
2645         int entries, tlb_miss;
2646
2647         crtc = intel_get_crtc_for_plane(dev, plane);
2648         if (!intel_crtc_active(crtc)) {
2649                 *sprite_wm = display->guard_size;
2650                 return false;
2651         }
2652
2653         clock = crtc->mode.clock;
2654
2655         /* Use the small buffer method to calculate the sprite watermark */
2656         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2657         tlb_miss = display->fifo_size*display->cacheline_size -
2658                 sprite_width * 8;
2659         if (tlb_miss > 0)
2660                 entries += tlb_miss;
2661         entries = DIV_ROUND_UP(entries, display->cacheline_size);
2662         *sprite_wm = entries + display->guard_size;
2663         if (*sprite_wm > (int)display->max_wm)
2664                 *sprite_wm = display->max_wm;
2665
2666         return true;
2667 }
2668
2669 static bool
2670 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2671                                 uint32_t sprite_width, int pixel_size,
2672                                 const struct intel_watermark_params *display,
2673                                 int latency_ns, int *sprite_wm)
2674 {
2675         struct drm_crtc *crtc;
2676         unsigned long line_time_us;
2677         int clock;
2678         int line_count, line_size;
2679         int small, large;
2680         int entries;
2681
2682         if (!latency_ns) {
2683                 *sprite_wm = 0;
2684                 return false;
2685         }
2686
2687         crtc = intel_get_crtc_for_plane(dev, plane);
2688         clock = crtc->mode.clock;
2689         if (!clock) {
2690                 *sprite_wm = 0;
2691                 return false;
2692         }
2693
2694         line_time_us = (sprite_width * 1000) / clock;
2695         if (!line_time_us) {
2696                 *sprite_wm = 0;
2697                 return false;
2698         }
2699
2700         line_count = (latency_ns / line_time_us + 1000) / 1000;
2701         line_size = sprite_width * pixel_size;
2702
2703         /* Use the minimum of the small and large buffer method for primary */
2704         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
2705         large = line_count * line_size;
2706
2707         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
2708         *sprite_wm = entries + display->guard_size;
2709
2710         return *sprite_wm > 0x3ff ? false : true;
2711 }
2712
2713 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2714                                          uint32_t sprite_width, int pixel_size,
2715                                          bool enable)
2716 {
2717         struct drm_i915_private *dev_priv = dev->dev_private;
2718         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
2719         u32 val;
2720         int sprite_wm, reg;
2721         int ret;
2722
2723         if (!enable)
2724                 return;
2725
2726         switch (pipe) {
2727         case 0:
2728                 reg = WM0_PIPEA_ILK;
2729                 break;
2730         case 1:
2731                 reg = WM0_PIPEB_ILK;
2732                 break;
2733         case 2:
2734                 reg = WM0_PIPEC_IVB;
2735                 break;
2736         default:
2737                 return; /* bad pipe */
2738         }
2739
2740         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2741                                             &sandybridge_display_wm_info,
2742                                             latency, &sprite_wm);
2743         if (!ret) {
2744                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
2745                               pipe_name(pipe));
2746                 return;
2747         }
2748
2749         val = I915_READ(reg);
2750         val &= ~WM0_PIPE_SPRITE_MASK;
2751         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2752         DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
2753
2754
2755         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2756                                               pixel_size,
2757                                               &sandybridge_display_srwm_info,
2758                                               SNB_READ_WM1_LATENCY() * 500,
2759                                               &sprite_wm);
2760         if (!ret) {
2761                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
2762                               pipe_name(pipe));
2763                 return;
2764         }
2765         I915_WRITE(WM1S_LP_ILK, sprite_wm);
2766
2767         /* Only IVB has two more LP watermarks for sprite */
2768         if (!IS_IVYBRIDGE(dev))
2769                 return;
2770
2771         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2772                                               pixel_size,
2773                                               &sandybridge_display_srwm_info,
2774                                               SNB_READ_WM2_LATENCY() * 500,
2775                                               &sprite_wm);
2776         if (!ret) {
2777                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
2778                               pipe_name(pipe));
2779                 return;
2780         }
2781         I915_WRITE(WM2S_LP_IVB, sprite_wm);
2782
2783         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2784                                               pixel_size,
2785                                               &sandybridge_display_srwm_info,
2786                                               SNB_READ_WM3_LATENCY() * 500,
2787                                               &sprite_wm);
2788         if (!ret) {
2789                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
2790                               pipe_name(pipe));
2791                 return;
2792         }
2793         I915_WRITE(WM3S_LP_IVB, sprite_wm);
2794 }
2795
2796 /**
2797  * intel_update_watermarks - update FIFO watermark values based on current modes
2798  *
2799  * Calculate watermark values for the various WM regs based on current mode
2800  * and plane configuration.
2801  *
2802  * There are several cases to deal with here:
2803  *   - normal (i.e. non-self-refresh)
2804  *   - self-refresh (SR) mode
2805  *   - lines are large relative to FIFO size (buffer can hold up to 2)
2806  *   - lines are small relative to FIFO size (buffer can hold more than 2
2807  *     lines), so need to account for TLB latency
2808  *
2809  *   The normal calculation is:
2810  *     watermark = dotclock * bytes per pixel * latency
2811  *   where latency is platform & configuration dependent (we assume pessimal
2812  *   values here).
2813  *
2814  *   The SR calculation is:
2815  *     watermark = (trunc(latency/line time)+1) * surface width *
2816  *       bytes per pixel
2817  *   where
2818  *     line time = htotal / dotclock
2819  *     surface width = hdisplay for normal plane and 64 for cursor
2820  *   and latency is assumed to be high, as above.
2821  *
2822  * The final value programmed to the register should always be rounded up,
2823  * and include an extra 2 entries to account for clock crossings.
2824  *
2825  * We don't use the sprite, so we can ignore that.  And on Crestline we have
2826  * to set the non-SR watermarks to 8.
2827  */
2828 void intel_update_watermarks(struct drm_device *dev)
2829 {
2830         struct drm_i915_private *dev_priv = dev->dev_private;
2831
2832         if (dev_priv->display.update_wm)
2833                 dev_priv->display.update_wm(dev);
2834 }
2835
2836 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2837                                     uint32_t sprite_width, int pixel_size,
2838                                     bool enable)
2839 {
2840         struct drm_i915_private *dev_priv = dev->dev_private;
2841
2842         if (dev_priv->display.update_sprite_wm)
2843                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2844                                                    pixel_size, enable);
2845 }
2846
2847 static struct drm_i915_gem_object *
2848 intel_alloc_context_page(struct drm_device *dev)
2849 {
2850         struct drm_i915_gem_object *ctx;
2851         int ret;
2852
2853         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2854
2855         ctx = i915_gem_alloc_object(dev, 4096);
2856         if (!ctx) {
2857                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2858                 return NULL;
2859         }
2860
2861         ret = i915_gem_object_pin(ctx, 4096, true, false);
2862         if (ret) {
2863                 DRM_ERROR("failed to pin power context: %d\n", ret);
2864                 goto err_unref;
2865         }
2866
2867         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2868         if (ret) {
2869                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2870                 goto err_unpin;
2871         }
2872
2873         return ctx;
2874
2875 err_unpin:
2876         i915_gem_object_unpin(ctx);
2877 err_unref:
2878         drm_gem_object_unreference(&ctx->base);
2879         return NULL;
2880 }
2881
2882 /**
2883  * Lock protecting IPS related data structures
2884  */
2885 DEFINE_SPINLOCK(mchdev_lock);
2886
2887 /* Global for IPS driver to get at the current i915 device. Protected by
2888  * mchdev_lock. */
2889 static struct drm_i915_private *i915_mch_dev;
2890
2891 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2892 {
2893         struct drm_i915_private *dev_priv = dev->dev_private;
2894         u16 rgvswctl;
2895
2896         assert_spin_locked(&mchdev_lock);
2897
2898         rgvswctl = I915_READ16(MEMSWCTL);
2899         if (rgvswctl & MEMCTL_CMD_STS) {
2900                 DRM_DEBUG("gpu busy, RCS change rejected\n");
2901                 return false; /* still busy with another command */
2902         }
2903
2904         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2905                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2906         I915_WRITE16(MEMSWCTL, rgvswctl);
2907         POSTING_READ16(MEMSWCTL);
2908
2909         rgvswctl |= MEMCTL_CMD_STS;
2910         I915_WRITE16(MEMSWCTL, rgvswctl);
2911
2912         return true;
2913 }
2914
2915 static void ironlake_enable_drps(struct drm_device *dev)
2916 {
2917         struct drm_i915_private *dev_priv = dev->dev_private;
2918         u32 rgvmodectl = I915_READ(MEMMODECTL);
2919         u8 fmax, fmin, fstart, vstart;
2920
2921         spin_lock_irq(&mchdev_lock);
2922
2923         /* Enable temp reporting */
2924         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2925         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2926
2927         /* 100ms RC evaluation intervals */
2928         I915_WRITE(RCUPEI, 100000);
2929         I915_WRITE(RCDNEI, 100000);
2930
2931         /* Set max/min thresholds to 90ms and 80ms respectively */
2932         I915_WRITE(RCBMAXAVG, 90000);
2933         I915_WRITE(RCBMINAVG, 80000);
2934
2935         I915_WRITE(MEMIHYST, 1);
2936
2937         /* Set up min, max, and cur for interrupt handling */
2938         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2939         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2940         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2941                 MEMMODE_FSTART_SHIFT;
2942
2943         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2944                 PXVFREQ_PX_SHIFT;
2945
2946         dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2947         dev_priv->ips.fstart = fstart;
2948
2949         dev_priv->ips.max_delay = fstart;
2950         dev_priv->ips.min_delay = fmin;
2951         dev_priv->ips.cur_delay = fstart;
2952
2953         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2954                          fmax, fmin, fstart);
2955
2956         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2957
2958         /*
2959          * Interrupts will be enabled in ironlake_irq_postinstall
2960          */
2961
2962         I915_WRITE(VIDSTART, vstart);
2963         POSTING_READ(VIDSTART);
2964
2965         rgvmodectl |= MEMMODE_SWMODE_EN;
2966         I915_WRITE(MEMMODECTL, rgvmodectl);
2967
2968         if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2969                 DRM_ERROR("stuck trying to change perf mode\n");
2970         mdelay(1);
2971
2972         ironlake_set_drps(dev, fstart);
2973
2974         dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2975                 I915_READ(0x112e0);
2976         dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2977         dev_priv->ips.last_count2 = I915_READ(0x112f4);
2978         getrawmonotonic(&dev_priv->ips.last_time2);
2979
2980         spin_unlock_irq(&mchdev_lock);
2981 }
2982
2983 static void ironlake_disable_drps(struct drm_device *dev)
2984 {
2985         struct drm_i915_private *dev_priv = dev->dev_private;
2986         u16 rgvswctl;
2987
2988         spin_lock_irq(&mchdev_lock);
2989
2990         rgvswctl = I915_READ16(MEMSWCTL);
2991
2992         /* Ack interrupts, disable EFC interrupt */
2993         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2994         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2995         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2996         I915_WRITE(DEIIR, DE_PCU_EVENT);
2997         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2998
2999         /* Go back to the starting frequency */
3000         ironlake_set_drps(dev, dev_priv->ips.fstart);
3001         mdelay(1);
3002         rgvswctl |= MEMCTL_CMD_STS;
3003         I915_WRITE(MEMSWCTL, rgvswctl);
3004         mdelay(1);
3005
3006         spin_unlock_irq(&mchdev_lock);
3007 }
3008
3009 /* There's a funny hw issue where the hw returns all 0 when reading from
3010  * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3011  * ourselves, instead of doing a rmw cycle (which might result in us clearing
3012  * all limits and the gpu stuck at whatever frequency it is at atm).
3013  */
3014 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3015 {
3016         u32 limits;
3017
3018         limits = 0;
3019
3020         if (*val >= dev_priv->rps.max_delay)
3021                 *val = dev_priv->rps.max_delay;
3022         limits |= dev_priv->rps.max_delay << 24;
3023
3024         /* Only set the down limit when we've reached the lowest level to avoid
3025          * getting more interrupts, otherwise leave this clear. This prevents a
3026          * race in the hw when coming out of rc6: There's a tiny window where
3027          * the hw runs at the minimal clock before selecting the desired
3028          * frequency, if the down threshold expires in that window we will not
3029          * receive a down interrupt. */
3030         if (*val <= dev_priv->rps.min_delay) {
3031                 *val = dev_priv->rps.min_delay;
3032                 limits |= dev_priv->rps.min_delay << 16;
3033         }
3034
3035         return limits;
3036 }
3037
3038 void gen6_set_rps(struct drm_device *dev, u8 val)
3039 {
3040         struct drm_i915_private *dev_priv = dev->dev_private;
3041         u32 limits = gen6_rps_limits(dev_priv, &val);
3042
3043         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3044         WARN_ON(val > dev_priv->rps.max_delay);
3045         WARN_ON(val < dev_priv->rps.min_delay);
3046
3047         if (val == dev_priv->rps.cur_delay)
3048                 return;
3049
3050         if (IS_HASWELL(dev))
3051                 I915_WRITE(GEN6_RPNSWREQ,
3052                            HSW_FREQUENCY(val));
3053         else
3054                 I915_WRITE(GEN6_RPNSWREQ,
3055                            GEN6_FREQUENCY(val) |
3056                            GEN6_OFFSET(0) |
3057                            GEN6_AGGRESSIVE_TURBO);
3058
3059         /* Make sure we continue to get interrupts
3060          * until we hit the minimum or maximum frequencies.
3061          */
3062         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3063
3064         POSTING_READ(GEN6_RPNSWREQ);
3065
3066         dev_priv->rps.cur_delay = val;
3067
3068         trace_intel_gpu_freq_change(val * 50);
3069 }
3070
3071 /*
3072  * Wait until the previous freq change has completed,
3073  * or the timeout elapsed, and then update our notion
3074  * of the current GPU frequency.
3075  */
3076 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3077 {
3078         u32 pval;
3079
3080         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3081
3082         if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3083                 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3084
3085         pval >>= 8;
3086
3087         if (pval != dev_priv->rps.cur_delay)
3088                 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3089                                  vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3090                                  dev_priv->rps.cur_delay,
3091                                  vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3092
3093         dev_priv->rps.cur_delay = pval;
3094 }
3095
3096 void valleyview_set_rps(struct drm_device *dev, u8 val)
3097 {
3098         struct drm_i915_private *dev_priv = dev->dev_private;
3099
3100         gen6_rps_limits(dev_priv, &val);
3101
3102         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3103         WARN_ON(val > dev_priv->rps.max_delay);
3104         WARN_ON(val < dev_priv->rps.min_delay);
3105
3106         vlv_update_rps_cur_delay(dev_priv);
3107
3108         DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3109                          vlv_gpu_freq(dev_priv->mem_freq,
3110                                       dev_priv->rps.cur_delay),
3111                          dev_priv->rps.cur_delay,
3112                          vlv_gpu_freq(dev_priv->mem_freq, val), val);
3113
3114         if (val == dev_priv->rps.cur_delay)
3115                 return;
3116
3117         vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3118
3119         dev_priv->rps.cur_delay = val;
3120
3121         trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3122 }
3123
3124 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3125 {
3126         struct drm_i915_private *dev_priv = dev->dev_private;
3127
3128         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3129         I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3130         /* Complete PM interrupt masking here doesn't race with the rps work
3131          * item again unmasking PM interrupts because that is using a different
3132          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3133          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3134
3135         spin_lock_irq(&dev_priv->irq_lock);
3136         dev_priv->rps.pm_iir = 0;
3137         spin_unlock_irq(&dev_priv->irq_lock);
3138
3139         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3140 }
3141
3142 static void gen6_disable_rps(struct drm_device *dev)
3143 {
3144         struct drm_i915_private *dev_priv = dev->dev_private;
3145
3146         I915_WRITE(GEN6_RC_CONTROL, 0);
3147         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3148
3149         gen6_disable_rps_interrupts(dev);
3150 }
3151
3152 static void valleyview_disable_rps(struct drm_device *dev)
3153 {
3154         struct drm_i915_private *dev_priv = dev->dev_private;
3155
3156         I915_WRITE(GEN6_RC_CONTROL, 0);
3157
3158         gen6_disable_rps_interrupts(dev);
3159
3160         if (dev_priv->vlv_pctx) {
3161                 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3162                 dev_priv->vlv_pctx = NULL;
3163         }
3164 }
3165
3166 int intel_enable_rc6(const struct drm_device *dev)
3167 {
3168         /* Respect the kernel parameter if it is set */
3169         if (i915_enable_rc6 >= 0)
3170                 return i915_enable_rc6;
3171
3172         /* Disable RC6 on Ironlake */
3173         if (INTEL_INFO(dev)->gen == 5)
3174                 return 0;
3175
3176         if (IS_HASWELL(dev)) {
3177                 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3178                 return INTEL_RC6_ENABLE;
3179         }
3180
3181         /* snb/ivb have more than one rc6 state. */
3182         if (INTEL_INFO(dev)->gen == 6) {
3183                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3184                 return INTEL_RC6_ENABLE;
3185         }
3186
3187         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3188         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3189 }
3190
3191 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3192 {
3193         struct drm_i915_private *dev_priv = dev->dev_private;
3194
3195         spin_lock_irq(&dev_priv->irq_lock);
3196         WARN_ON(dev_priv->rps.pm_iir);
3197         I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3198         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3199         spin_unlock_irq(&dev_priv->irq_lock);
3200         /* unmask all PM interrupts */
3201         I915_WRITE(GEN6_PMINTRMSK, 0);
3202 }
3203
3204 static void gen6_enable_rps(struct drm_device *dev)
3205 {
3206         struct drm_i915_private *dev_priv = dev->dev_private;
3207         struct intel_ring_buffer *ring;
3208         u32 rp_state_cap;
3209         u32 gt_perf_status;
3210         u32 rc6vids, pcu_mbox, rc6_mask = 0;
3211         u32 gtfifodbg;
3212         int rc6_mode;
3213         int i, ret;
3214
3215         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3216
3217         /* Here begins a magic sequence of register writes to enable
3218          * auto-downclocking.
3219          *
3220          * Perhaps there might be some value in exposing these to
3221          * userspace...
3222          */
3223         I915_WRITE(GEN6_RC_STATE, 0);
3224
3225         /* Clear the DBG now so we don't confuse earlier errors */
3226         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3227                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3228                 I915_WRITE(GTFIFODBG, gtfifodbg);
3229         }
3230
3231         gen6_gt_force_wake_get(dev_priv);
3232
3233         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3234         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3235
3236         /* In units of 50MHz */
3237         dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3238         dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
3239         dev_priv->rps.cur_delay = 0;
3240
3241         /* disable the counters and set deterministic thresholds */
3242         I915_WRITE(GEN6_RC_CONTROL, 0);
3243
3244         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3245         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3246         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3247         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3248         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3249
3250         for_each_ring(ring, dev_priv, i)
3251                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3252
3253         I915_WRITE(GEN6_RC_SLEEP, 0);
3254         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3255         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3256         I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3257         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3258
3259         /* Check if we are enabling RC6 */
3260         rc6_mode = intel_enable_rc6(dev_priv->dev);
3261         if (rc6_mode & INTEL_RC6_ENABLE)
3262                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3263
3264         /* We don't use those on Haswell */
3265         if (!IS_HASWELL(dev)) {
3266                 if (rc6_mode & INTEL_RC6p_ENABLE)
3267                         rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3268
3269                 if (rc6_mode & INTEL_RC6pp_ENABLE)
3270                         rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3271         }
3272
3273         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3274                         (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3275                         (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3276                         (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3277
3278         I915_WRITE(GEN6_RC_CONTROL,
3279                    rc6_mask |
3280                    GEN6_RC_CTL_EI_MODE(1) |
3281                    GEN6_RC_CTL_HW_ENABLE);
3282
3283         if (IS_HASWELL(dev)) {
3284                 I915_WRITE(GEN6_RPNSWREQ,
3285                            HSW_FREQUENCY(10));
3286                 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3287                            HSW_FREQUENCY(12));
3288         } else {
3289                 I915_WRITE(GEN6_RPNSWREQ,
3290                            GEN6_FREQUENCY(10) |
3291                            GEN6_OFFSET(0) |
3292                            GEN6_AGGRESSIVE_TURBO);
3293                 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3294                            GEN6_FREQUENCY(12));
3295         }
3296
3297         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3298         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3299                    dev_priv->rps.max_delay << 24 |
3300                    dev_priv->rps.min_delay << 16);
3301
3302         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3303         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3304         I915_WRITE(GEN6_RP_UP_EI, 66000);
3305         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3306
3307         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3308         I915_WRITE(GEN6_RP_CONTROL,
3309                    GEN6_RP_MEDIA_TURBO |
3310                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
3311                    GEN6_RP_MEDIA_IS_GFX |
3312                    GEN6_RP_ENABLE |
3313                    GEN6_RP_UP_BUSY_AVG |
3314                    (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3315
3316         ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3317         if (!ret) {
3318                 pcu_mbox = 0;
3319                 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3320                 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3321                         DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3322                                          (dev_priv->rps.max_delay & 0xff) * 50,
3323                                          (pcu_mbox & 0xff) * 50);
3324                         dev_priv->rps.hw_max = pcu_mbox & 0xff;
3325                 }
3326         } else {
3327                 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3328         }
3329
3330         gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3331
3332         gen6_enable_rps_interrupts(dev);
3333
3334         rc6vids = 0;
3335         ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3336         if (IS_GEN6(dev) && ret) {
3337                 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3338         } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3339                 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3340                           GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3341                 rc6vids &= 0xffff00;
3342                 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3343                 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3344                 if (ret)
3345                         DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3346         }
3347
3348         gen6_gt_force_wake_put(dev_priv);
3349 }
3350
3351 static void gen6_update_ring_freq(struct drm_device *dev)
3352 {
3353         struct drm_i915_private *dev_priv = dev->dev_private;
3354         int min_freq = 15;
3355         unsigned int gpu_freq;
3356         unsigned int max_ia_freq, min_ring_freq;
3357         int scaling_factor = 180;
3358
3359         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3360
3361         max_ia_freq = cpufreq_quick_get_max(0);
3362         /*
3363          * Default to measured freq if none found, PCU will ensure we don't go
3364          * over
3365          */
3366         if (!max_ia_freq)
3367                 max_ia_freq = tsc_khz;
3368
3369         /* Convert from kHz to MHz */
3370         max_ia_freq /= 1000;
3371
3372         min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
3373         /* convert DDR frequency from units of 133.3MHz to bandwidth */
3374         min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
3375
3376         /*
3377          * For each potential GPU frequency, load a ring frequency we'd like
3378          * to use for memory access.  We do this by specifying the IA frequency
3379          * the PCU should use as a reference to determine the ring frequency.
3380          */
3381         for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3382              gpu_freq--) {
3383                 int diff = dev_priv->rps.max_delay - gpu_freq;
3384                 unsigned int ia_freq = 0, ring_freq = 0;
3385
3386                 if (IS_HASWELL(dev)) {
3387                         ring_freq = (gpu_freq * 5 + 3) / 4;
3388                         ring_freq = max(min_ring_freq, ring_freq);
3389                         /* leave ia_freq as the default, chosen by cpufreq */
3390                 } else {
3391                         /* On older processors, there is no separate ring
3392                          * clock domain, so in order to boost the bandwidth
3393                          * of the ring, we need to upclock the CPU (ia_freq).
3394                          *
3395                          * For GPU frequencies less than 750MHz,
3396                          * just use the lowest ring freq.
3397                          */
3398                         if (gpu_freq < min_freq)
3399                                 ia_freq = 800;
3400                         else
3401                                 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3402                         ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3403                 }
3404
3405                 sandybridge_pcode_write(dev_priv,
3406                                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3407                                         ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3408                                         ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3409                                         gpu_freq);
3410         }
3411 }
3412
3413 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3414 {
3415         u32 val, rp0;
3416
3417         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3418
3419         rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3420         /* Clamp to max */
3421         rp0 = min_t(u32, rp0, 0xea);
3422
3423         return rp0;
3424 }
3425
3426 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3427 {
3428         u32 val, rpe;
3429
3430         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3431         rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3432         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3433         rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3434
3435         return rpe;
3436 }
3437
3438 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3439 {
3440         return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3441 }
3442
3443 static void vlv_rps_timer_work(struct work_struct *work)
3444 {
3445         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3446                                                     rps.vlv_work.work);
3447
3448         /*
3449          * Timer fired, we must be idle.  Drop to min voltage state.
3450          * Note: we use RPe here since it should match the
3451          * Vmin we were shooting for.  That should give us better
3452          * perf when we come back out of RC6 than if we used the
3453          * min freq available.
3454          */
3455         mutex_lock(&dev_priv->rps.hw_lock);
3456         if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3457                 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3458         mutex_unlock(&dev_priv->rps.hw_lock);
3459 }
3460
3461 static void valleyview_setup_pctx(struct drm_device *dev)
3462 {
3463         struct drm_i915_private *dev_priv = dev->dev_private;
3464         struct drm_i915_gem_object *pctx;
3465         unsigned long pctx_paddr;
3466         u32 pcbr;
3467         int pctx_size = 24*1024;
3468
3469         pcbr = I915_READ(VLV_PCBR);
3470         if (pcbr) {
3471                 /* BIOS set it up already, grab the pre-alloc'd space */
3472                 int pcbr_offset;
3473
3474                 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3475                 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3476                                                                       pcbr_offset,
3477                                                                       I915_GTT_OFFSET_NONE,
3478                                                                       pctx_size);
3479                 goto out;
3480         }
3481
3482         /*
3483          * From the Gunit register HAS:
3484          * The Gfx driver is expected to program this register and ensure
3485          * proper allocation within Gfx stolen memory.  For example, this
3486          * register should be programmed such than the PCBR range does not
3487          * overlap with other ranges, such as the frame buffer, protected
3488          * memory, or any other relevant ranges.
3489          */
3490         pctx = i915_gem_object_create_stolen(dev, pctx_size);
3491         if (!pctx) {
3492                 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3493                 return;
3494         }
3495
3496         pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3497         I915_WRITE(VLV_PCBR, pctx_paddr);
3498
3499 out:
3500         dev_priv->vlv_pctx = pctx;
3501 }
3502
3503 static void valleyview_enable_rps(struct drm_device *dev)
3504 {
3505         struct drm_i915_private *dev_priv = dev->dev_private;
3506         struct intel_ring_buffer *ring;
3507         u32 gtfifodbg, val;
3508         int i;
3509
3510         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3511
3512         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3513                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3514                 I915_WRITE(GTFIFODBG, gtfifodbg);
3515         }
3516
3517         valleyview_setup_pctx(dev);
3518
3519         gen6_gt_force_wake_get(dev_priv);
3520
3521         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3522         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3523         I915_WRITE(GEN6_RP_UP_EI, 66000);
3524         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3525
3526         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3527
3528         I915_WRITE(GEN6_RP_CONTROL,
3529                    GEN6_RP_MEDIA_TURBO |
3530                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
3531                    GEN6_RP_MEDIA_IS_GFX |
3532                    GEN6_RP_ENABLE |
3533                    GEN6_RP_UP_BUSY_AVG |
3534                    GEN6_RP_DOWN_IDLE_CONT);
3535
3536         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3537         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3538         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3539
3540         for_each_ring(ring, dev_priv, i)
3541                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3542
3543         I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3544
3545         /* allows RC6 residency counter to work */
3546         I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3547         I915_WRITE(GEN6_RC_CONTROL,
3548                    GEN7_RC_CTL_TO_MODE);
3549
3550         val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3551         switch ((val >> 6) & 3) {
3552         case 0:
3553         case 1:
3554                 dev_priv->mem_freq = 800;
3555                 break;
3556         case 2:
3557                 dev_priv->mem_freq = 1066;
3558                 break;
3559         case 3:
3560                 dev_priv->mem_freq = 1333;
3561                 break;
3562         }
3563         DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3564
3565         DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3566         DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3567
3568         dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3569         DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3570                          vlv_gpu_freq(dev_priv->mem_freq,
3571                                       dev_priv->rps.cur_delay),
3572                          dev_priv->rps.cur_delay);
3573
3574         dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3575         dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3576         DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3577                          vlv_gpu_freq(dev_priv->mem_freq,
3578                                       dev_priv->rps.max_delay),
3579                          dev_priv->rps.max_delay);
3580
3581         dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3582         DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3583                          vlv_gpu_freq(dev_priv->mem_freq,
3584                                       dev_priv->rps.rpe_delay),
3585                          dev_priv->rps.rpe_delay);
3586
3587         dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3588         DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3589                          vlv_gpu_freq(dev_priv->mem_freq,
3590                                       dev_priv->rps.min_delay),
3591                          dev_priv->rps.min_delay);
3592
3593         DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3594                          vlv_gpu_freq(dev_priv->mem_freq,
3595                                       dev_priv->rps.rpe_delay),
3596                          dev_priv->rps.rpe_delay);
3597
3598         INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3599
3600         valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3601
3602         gen6_enable_rps_interrupts(dev);
3603
3604         gen6_gt_force_wake_put(dev_priv);
3605 }
3606
3607 void ironlake_teardown_rc6(struct drm_device *dev)
3608 {
3609         struct drm_i915_private *dev_priv = dev->dev_private;
3610
3611         if (dev_priv->ips.renderctx) {
3612                 i915_gem_object_unpin(dev_priv->ips.renderctx);
3613                 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3614                 dev_priv->ips.renderctx = NULL;
3615         }
3616
3617         if (dev_priv->ips.pwrctx) {
3618                 i915_gem_object_unpin(dev_priv->ips.pwrctx);
3619                 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3620                 dev_priv->ips.pwrctx = NULL;
3621         }
3622 }
3623
3624 static void ironlake_disable_rc6(struct drm_device *dev)
3625 {
3626         struct drm_i915_private *dev_priv = dev->dev_private;
3627
3628         if (I915_READ(PWRCTXA)) {
3629                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3630                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3631                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3632                          50);
3633
3634                 I915_WRITE(PWRCTXA, 0);
3635                 POSTING_READ(PWRCTXA);
3636
3637                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3638                 POSTING_READ(RSTDBYCTL);
3639         }
3640 }
3641
3642 static int ironlake_setup_rc6(struct drm_device *dev)
3643 {
3644         struct drm_i915_private *dev_priv = dev->dev_private;
3645
3646         if (dev_priv->ips.renderctx == NULL)
3647                 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3648         if (!dev_priv->ips.renderctx)
3649                 return -ENOMEM;
3650
3651         if (dev_priv->ips.pwrctx == NULL)
3652                 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3653         if (!dev_priv->ips.pwrctx) {
3654                 ironlake_teardown_rc6(dev);
3655                 return -ENOMEM;
3656         }
3657
3658         return 0;
3659 }
3660
3661 static void ironlake_enable_rc6(struct drm_device *dev)
3662 {
3663         struct drm_i915_private *dev_priv = dev->dev_private;
3664         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3665         bool was_interruptible;
3666         int ret;
3667
3668         /* rc6 disabled by default due to repeated reports of hanging during
3669          * boot and resume.
3670          */
3671         if (!intel_enable_rc6(dev))
3672                 return;
3673
3674         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3675
3676         ret = ironlake_setup_rc6(dev);
3677         if (ret)
3678                 return;
3679
3680         was_interruptible = dev_priv->mm.interruptible;
3681         dev_priv->mm.interruptible = false;
3682
3683         /*
3684          * GPU can automatically power down the render unit if given a page
3685          * to save state.
3686          */
3687         ret = intel_ring_begin(ring, 6);
3688         if (ret) {
3689                 ironlake_teardown_rc6(dev);
3690                 dev_priv->mm.interruptible = was_interruptible;
3691                 return;
3692         }
3693
3694         intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3695         intel_ring_emit(ring, MI_SET_CONTEXT);
3696         intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3697                         MI_MM_SPACE_GTT |
3698                         MI_SAVE_EXT_STATE_EN |
3699                         MI_RESTORE_EXT_STATE_EN |
3700                         MI_RESTORE_INHIBIT);
3701         intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3702         intel_ring_emit(ring, MI_NOOP);
3703         intel_ring_emit(ring, MI_FLUSH);
3704         intel_ring_advance(ring);
3705
3706         /*
3707          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3708          * does an implicit flush, combined with MI_FLUSH above, it should be
3709          * safe to assume that renderctx is valid
3710          */
3711         ret = intel_ring_idle(ring);
3712         dev_priv->mm.interruptible = was_interruptible;
3713         if (ret) {
3714                 DRM_ERROR("failed to enable ironlake power savings\n");
3715                 ironlake_teardown_rc6(dev);
3716                 return;
3717         }
3718
3719         I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3720         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3721 }
3722
3723 static unsigned long intel_pxfreq(u32 vidfreq)
3724 {
3725         unsigned long freq;
3726         int div = (vidfreq & 0x3f0000) >> 16;
3727         int post = (vidfreq & 0x3000) >> 12;
3728         int pre = (vidfreq & 0x7);
3729
3730         if (!pre)
3731                 return 0;
3732
3733         freq = ((div * 133333) / ((1<<post) * pre));
3734
3735         return freq;
3736 }
3737
3738 static const struct cparams {
3739         u16 i;
3740         u16 t;
3741         u16 m;
3742         u16 c;
3743 } cparams[] = {
3744         { 1, 1333, 301, 28664 },
3745         { 1, 1066, 294, 24460 },
3746         { 1, 800, 294, 25192 },
3747         { 0, 1333, 276, 27605 },
3748         { 0, 1066, 276, 27605 },
3749         { 0, 800, 231, 23784 },
3750 };
3751
3752 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3753 {
3754         u64 total_count, diff, ret;
3755         u32 count1, count2, count3, m = 0, c = 0;
3756         unsigned long now = jiffies_to_msecs(jiffies), diff1;
3757         int i;
3758
3759         assert_spin_locked(&mchdev_lock);
3760
3761         diff1 = now - dev_priv->ips.last_time1;
3762
3763         /* Prevent division-by-zero if we are asking too fast.
3764          * Also, we don't get interesting results if we are polling
3765          * faster than once in 10ms, so just return the saved value
3766          * in such cases.
3767          */
3768         if (diff1 <= 10)
3769                 return dev_priv->ips.chipset_power;
3770
3771         count1 = I915_READ(DMIEC);
3772         count2 = I915_READ(DDREC);
3773         count3 = I915_READ(CSIEC);
3774
3775         total_count = count1 + count2 + count3;
3776
3777         /* FIXME: handle per-counter overflow */
3778         if (total_count < dev_priv->ips.last_count1) {
3779                 diff = ~0UL - dev_priv->ips.last_count1;
3780                 diff += total_count;
3781         } else {
3782                 diff = total_count - dev_priv->ips.last_count1;
3783         }
3784
3785         for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3786                 if (cparams[i].i == dev_priv->ips.c_m &&
3787                     cparams[i].t == dev_priv->ips.r_t) {
3788                         m = cparams[i].m;
3789                         c = cparams[i].c;
3790                         break;
3791                 }
3792         }
3793
3794         diff = div_u64(diff, diff1);
3795         ret = ((m * diff) + c);
3796         ret = div_u64(ret, 10);
3797
3798         dev_priv->ips.last_count1 = total_count;
3799         dev_priv->ips.last_time1 = now;
3800
3801         dev_priv->ips.chipset_power = ret;
3802
3803         return ret;
3804 }
3805
3806 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3807 {
3808         unsigned long val;
3809
3810         if (dev_priv->info->gen != 5)
3811                 return 0;
3812
3813         spin_lock_irq(&mchdev_lock);
3814
3815         val = __i915_chipset_val(dev_priv);
3816
3817         spin_unlock_irq(&mchdev_lock);
3818
3819         return val;
3820 }
3821
3822 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
3823 {
3824         unsigned long m, x, b;
3825         u32 tsfs;
3826
3827         tsfs = I915_READ(TSFS);
3828
3829         m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
3830         x = I915_READ8(TR1);
3831
3832         b = tsfs & TSFS_INTR_MASK;
3833
3834         return ((m * x) / 127) - b;
3835 }
3836
3837 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3838 {
3839         static const struct v_table {
3840                 u16 vd; /* in .1 mil */
3841                 u16 vm; /* in .1 mil */
3842         } v_table[] = {
3843                 { 0, 0, },
3844                 { 375, 0, },
3845                 { 500, 0, },
3846                 { 625, 0, },
3847                 { 750, 0, },
3848                 { 875, 0, },
3849                 { 1000, 0, },
3850                 { 1125, 0, },
3851                 { 4125, 3000, },
3852                 { 4125, 3000, },
3853                 { 4125, 3000, },
3854                 { 4125, 3000, },
3855                 { 4125, 3000, },
3856                 { 4125, 3000, },
3857                 { 4125, 3000, },
3858                 { 4125, 3000, },
3859                 { 4125, 3000, },
3860                 { 4125, 3000, },
3861                 { 4125, 3000, },
3862                 { 4125, 3000, },
3863                 { 4125, 3000, },
3864                 { 4125, 3000, },
3865                 { 4125, 3000, },
3866                 { 4125, 3000, },
3867                 { 4125, 3000, },
3868                 { 4125, 3000, },
3869                 { 4125, 3000, },
3870                 { 4125, 3000, },
3871                 { 4125, 3000, },
3872                 { 4125, 3000, },
3873                 { 4125, 3000, },
3874                 { 4125, 3000, },
3875                 { 4250, 3125, },
3876                 { 4375, 3250, },
3877                 { 4500, 3375, },
3878                 { 4625, 3500, },
3879                 { 4750, 3625, },
3880                 { 4875, 3750, },
3881                 { 5000, 3875, },
3882                 { 5125, 4000, },
3883                 { 5250, 4125, },
3884                 { 5375, 4250, },
3885                 { 5500, 4375, },
3886                 { 5625, 4500, },
3887                 { 5750, 4625, },
3888                 { 5875, 4750, },
3889                 { 6000, 4875, },
3890                 { 6125, 5000, },
3891                 { 6250, 5125, },
3892                 { 6375, 5250, },
3893                 { 6500, 5375, },
3894                 { 6625, 5500, },
3895                 { 6750, 5625, },
3896                 { 6875, 5750, },
3897                 { 7000, 5875, },
3898                 { 7125, 6000, },
3899                 { 7250, 6125, },
3900                 { 7375, 6250, },
3901                 { 7500, 6375, },
3902                 { 7625, 6500, },
3903                 { 7750, 6625, },
3904                 { 7875, 6750, },
3905                 { 8000, 6875, },
3906                 { 8125, 7000, },
3907                 { 8250, 7125, },
3908                 { 8375, 7250, },
3909                 { 8500, 7375, },
3910                 { 8625, 7500, },
3911                 { 8750, 7625, },
3912                 { 8875, 7750, },
3913                 { 9000, 7875, },
3914                 { 9125, 8000, },
3915                 { 9250, 8125, },
3916                 { 9375, 8250, },
3917                 { 9500, 8375, },
3918                 { 9625, 8500, },
3919                 { 9750, 8625, },
3920                 { 9875, 8750, },
3921                 { 10000, 8875, },
3922                 { 10125, 9000, },
3923                 { 10250, 9125, },
3924                 { 10375, 9250, },
3925                 { 10500, 9375, },
3926                 { 10625, 9500, },
3927                 { 10750, 9625, },
3928                 { 10875, 9750, },
3929                 { 11000, 9875, },
3930                 { 11125, 10000, },
3931                 { 11250, 10125, },
3932                 { 11375, 10250, },
3933                 { 11500, 10375, },
3934                 { 11625, 10500, },
3935                 { 11750, 10625, },
3936                 { 11875, 10750, },
3937                 { 12000, 10875, },
3938                 { 12125, 11000, },
3939                 { 12250, 11125, },
3940                 { 12375, 11250, },
3941                 { 12500, 11375, },
3942                 { 12625, 11500, },
3943                 { 12750, 11625, },
3944                 { 12875, 11750, },
3945                 { 13000, 11875, },
3946                 { 13125, 12000, },
3947                 { 13250, 12125, },
3948                 { 13375, 12250, },
3949                 { 13500, 12375, },
3950                 { 13625, 12500, },
3951                 { 13750, 12625, },
3952                 { 13875, 12750, },
3953                 { 14000, 12875, },
3954                 { 14125, 13000, },
3955                 { 14250, 13125, },
3956                 { 14375, 13250, },
3957                 { 14500, 13375, },
3958                 { 14625, 13500, },
3959                 { 14750, 13625, },
3960                 { 14875, 13750, },
3961                 { 15000, 13875, },
3962                 { 15125, 14000, },
3963                 { 15250, 14125, },
3964                 { 15375, 14250, },
3965                 { 15500, 14375, },
3966                 { 15625, 14500, },
3967                 { 15750, 14625, },
3968                 { 15875, 14750, },
3969                 { 16000, 14875, },
3970                 { 16125, 15000, },
3971         };
3972         if (dev_priv->info->is_mobile)
3973                 return v_table[pxvid].vm;
3974         else
3975                 return v_table[pxvid].vd;
3976 }
3977
3978 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
3979 {
3980         struct timespec now, diff1;
3981         u64 diff;
3982         unsigned long diffms;
3983         u32 count;
3984
3985         assert_spin_locked(&mchdev_lock);
3986
3987         getrawmonotonic(&now);
3988         diff1 = timespec_sub(now, dev_priv->ips.last_time2);
3989
3990         /* Don't divide by 0 */
3991         diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
3992         if (!diffms)
3993                 return;
3994
3995         count = I915_READ(GFXEC);
3996
3997         if (count < dev_priv->ips.last_count2) {
3998                 diff = ~0UL - dev_priv->ips.last_count2;
3999                 diff += count;
4000         } else {
4001                 diff = count - dev_priv->ips.last_count2;
4002         }
4003
4004         dev_priv->ips.last_count2 = count;
4005         dev_priv->ips.last_time2 = now;
4006
4007         /* More magic constants... */
4008         diff = diff * 1181;
4009         diff = div_u64(diff, diffms * 10);
4010         dev_priv->ips.gfx_power = diff;
4011 }
4012
4013 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4014 {
4015         if (dev_priv->info->gen != 5)
4016                 return;
4017
4018         spin_lock_irq(&mchdev_lock);
4019
4020         __i915_update_gfx_val(dev_priv);
4021
4022         spin_unlock_irq(&mchdev_lock);
4023 }
4024
4025 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4026 {
4027         unsigned long t, corr, state1, corr2, state2;
4028         u32 pxvid, ext_v;
4029
4030         assert_spin_locked(&mchdev_lock);
4031
4032         pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
4033         pxvid = (pxvid >> 24) & 0x7f;
4034         ext_v = pvid_to_extvid(dev_priv, pxvid);
4035
4036         state1 = ext_v;
4037
4038         t = i915_mch_val(dev_priv);
4039
4040         /* Revel in the empirically derived constants */
4041
4042         /* Correction factor in 1/100000 units */
4043         if (t > 80)
4044                 corr = ((t * 2349) + 135940);
4045         else if (t >= 50)
4046                 corr = ((t * 964) + 29317);
4047         else /* < 50 */
4048                 corr = ((t * 301) + 1004);
4049
4050         corr = corr * ((150142 * state1) / 10000 - 78642);
4051         corr /= 100000;
4052         corr2 = (corr * dev_priv->ips.corr);
4053
4054         state2 = (corr2 * state1) / 10000;
4055         state2 /= 100; /* convert to mW */
4056
4057         __i915_update_gfx_val(dev_priv);
4058
4059         return dev_priv->ips.gfx_power + state2;
4060 }
4061
4062 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4063 {
4064         unsigned long val;
4065
4066         if (dev_priv->info->gen != 5)
4067                 return 0;
4068
4069         spin_lock_irq(&mchdev_lock);
4070
4071         val = __i915_gfx_val(dev_priv);
4072
4073         spin_unlock_irq(&mchdev_lock);
4074
4075         return val;
4076 }
4077
4078 /**
4079  * i915_read_mch_val - return value for IPS use
4080  *
4081  * Calculate and return a value for the IPS driver to use when deciding whether
4082  * we have thermal and power headroom to increase CPU or GPU power budget.
4083  */
4084 unsigned long i915_read_mch_val(void)
4085 {
4086         struct drm_i915_private *dev_priv;
4087         unsigned long chipset_val, graphics_val, ret = 0;
4088
4089         spin_lock_irq(&mchdev_lock);
4090         if (!i915_mch_dev)
4091                 goto out_unlock;
4092         dev_priv = i915_mch_dev;
4093
4094         chipset_val = __i915_chipset_val(dev_priv);
4095         graphics_val = __i915_gfx_val(dev_priv);
4096
4097         ret = chipset_val + graphics_val;
4098
4099 out_unlock:
4100         spin_unlock_irq(&mchdev_lock);
4101
4102         return ret;
4103 }
4104 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4105
4106 /**
4107  * i915_gpu_raise - raise GPU frequency limit
4108  *
4109  * Raise the limit; IPS indicates we have thermal headroom.
4110  */
4111 bool i915_gpu_raise(void)
4112 {
4113         struct drm_i915_private *dev_priv;
4114         bool ret = true;
4115
4116         spin_lock_irq(&mchdev_lock);
4117         if (!i915_mch_dev) {
4118                 ret = false;
4119                 goto out_unlock;
4120         }
4121         dev_priv = i915_mch_dev;
4122
4123         if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4124                 dev_priv->ips.max_delay--;
4125
4126 out_unlock:
4127         spin_unlock_irq(&mchdev_lock);
4128
4129         return ret;
4130 }
4131 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4132
4133 /**
4134  * i915_gpu_lower - lower GPU frequency limit
4135  *
4136  * IPS indicates we're close to a thermal limit, so throttle back the GPU
4137  * frequency maximum.
4138  */
4139 bool i915_gpu_lower(void)
4140 {
4141         struct drm_i915_private *dev_priv;
4142         bool ret = true;
4143
4144         spin_lock_irq(&mchdev_lock);
4145         if (!i915_mch_dev) {
4146                 ret = false;
4147                 goto out_unlock;
4148         }
4149         dev_priv = i915_mch_dev;
4150
4151         if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4152                 dev_priv->ips.max_delay++;
4153
4154 out_unlock:
4155         spin_unlock_irq(&mchdev_lock);
4156
4157         return ret;
4158 }
4159 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4160
4161 /**
4162  * i915_gpu_busy - indicate GPU business to IPS
4163  *
4164  * Tell the IPS driver whether or not the GPU is busy.
4165  */
4166 bool i915_gpu_busy(void)
4167 {
4168         struct drm_i915_private *dev_priv;
4169         struct intel_ring_buffer *ring;
4170         bool ret = false;
4171         int i;
4172
4173         spin_lock_irq(&mchdev_lock);
4174         if (!i915_mch_dev)
4175                 goto out_unlock;
4176         dev_priv = i915_mch_dev;
4177
4178         for_each_ring(ring, dev_priv, i)
4179                 ret |= !list_empty(&ring->request_list);
4180
4181 out_unlock:
4182         spin_unlock_irq(&mchdev_lock);
4183
4184         return ret;
4185 }
4186 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4187
4188 /**
4189  * i915_gpu_turbo_disable - disable graphics turbo
4190  *
4191  * Disable graphics turbo by resetting the max frequency and setting the
4192  * current frequency to the default.
4193  */
4194 bool i915_gpu_turbo_disable(void)
4195 {
4196         struct drm_i915_private *dev_priv;
4197         bool ret = true;
4198
4199         spin_lock_irq(&mchdev_lock);
4200         if (!i915_mch_dev) {
4201                 ret = false;
4202                 goto out_unlock;
4203         }
4204         dev_priv = i915_mch_dev;
4205
4206         dev_priv->ips.max_delay = dev_priv->ips.fstart;
4207
4208         if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4209                 ret = false;
4210
4211 out_unlock:
4212         spin_unlock_irq(&mchdev_lock);
4213
4214         return ret;
4215 }
4216 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4217
4218 /**
4219  * Tells the intel_ips driver that the i915 driver is now loaded, if
4220  * IPS got loaded first.
4221  *
4222  * This awkward dance is so that neither module has to depend on the
4223  * other in order for IPS to do the appropriate communication of
4224  * GPU turbo limits to i915.
4225  */
4226 static void
4227 ips_ping_for_i915_load(void)
4228 {
4229         void (*link)(void);
4230
4231         link = symbol_get(ips_link_to_i915_driver);
4232         if (link) {
4233                 link();
4234                 symbol_put(ips_link_to_i915_driver);
4235         }
4236 }
4237
4238 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4239 {
4240         /* We only register the i915 ips part with intel-ips once everything is
4241          * set up, to avoid intel-ips sneaking in and reading bogus values. */
4242         spin_lock_irq(&mchdev_lock);
4243         i915_mch_dev = dev_priv;
4244         spin_unlock_irq(&mchdev_lock);
4245
4246         ips_ping_for_i915_load();
4247 }
4248
4249 void intel_gpu_ips_teardown(void)
4250 {
4251         spin_lock_irq(&mchdev_lock);
4252         i915_mch_dev = NULL;
4253         spin_unlock_irq(&mchdev_lock);
4254 }
4255 static void intel_init_emon(struct drm_device *dev)
4256 {
4257         struct drm_i915_private *dev_priv = dev->dev_private;
4258         u32 lcfuse;
4259         u8 pxw[16];
4260         int i;
4261
4262         /* Disable to program */
4263         I915_WRITE(ECR, 0);
4264         POSTING_READ(ECR);
4265
4266         /* Program energy weights for various events */
4267         I915_WRITE(SDEW, 0x15040d00);
4268         I915_WRITE(CSIEW0, 0x007f0000);
4269         I915_WRITE(CSIEW1, 0x1e220004);
4270         I915_WRITE(CSIEW2, 0x04000004);
4271
4272         for (i = 0; i < 5; i++)
4273                 I915_WRITE(PEW + (i * 4), 0);
4274         for (i = 0; i < 3; i++)
4275                 I915_WRITE(DEW + (i * 4), 0);
4276
4277         /* Program P-state weights to account for frequency power adjustment */
4278         for (i = 0; i < 16; i++) {
4279                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4280                 unsigned long freq = intel_pxfreq(pxvidfreq);
4281                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4282                         PXVFREQ_PX_SHIFT;
4283                 unsigned long val;
4284
4285                 val = vid * vid;
4286                 val *= (freq / 1000);
4287                 val *= 255;
4288                 val /= (127*127*900);
4289                 if (val > 0xff)
4290                         DRM_ERROR("bad pxval: %ld\n", val);
4291                 pxw[i] = val;
4292         }
4293         /* Render standby states get 0 weight */
4294         pxw[14] = 0;
4295         pxw[15] = 0;
4296
4297         for (i = 0; i < 4; i++) {
4298                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4299                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4300                 I915_WRITE(PXW + (i * 4), val);
4301         }
4302
4303         /* Adjust magic regs to magic values (more experimental results) */
4304         I915_WRITE(OGW0, 0);
4305         I915_WRITE(OGW1, 0);
4306         I915_WRITE(EG0, 0x00007f00);
4307         I915_WRITE(EG1, 0x0000000e);
4308         I915_WRITE(EG2, 0x000e0000);
4309         I915_WRITE(EG3, 0x68000300);
4310         I915_WRITE(EG4, 0x42000000);
4311         I915_WRITE(EG5, 0x00140031);
4312         I915_WRITE(EG6, 0);
4313         I915_WRITE(EG7, 0);
4314
4315         for (i = 0; i < 8; i++)
4316                 I915_WRITE(PXWL + (i * 4), 0);
4317
4318         /* Enable PMON + select events */
4319         I915_WRITE(ECR, 0x80000019);
4320
4321         lcfuse = I915_READ(LCFUSE02);
4322
4323         dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4324 }
4325
4326 void intel_disable_gt_powersave(struct drm_device *dev)
4327 {
4328         struct drm_i915_private *dev_priv = dev->dev_private;
4329
4330         /* Interrupts should be disabled already to avoid re-arming. */
4331         WARN_ON(dev->irq_enabled);
4332
4333         if (IS_IRONLAKE_M(dev)) {
4334                 ironlake_disable_drps(dev);
4335                 ironlake_disable_rc6(dev);
4336         } else if (INTEL_INFO(dev)->gen >= 6) {
4337                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4338                 cancel_work_sync(&dev_priv->rps.work);
4339                 if (IS_VALLEYVIEW(dev))
4340                         cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4341                 mutex_lock(&dev_priv->rps.hw_lock);
4342                 if (IS_VALLEYVIEW(dev))
4343                         valleyview_disable_rps(dev);
4344                 else
4345                         gen6_disable_rps(dev);
4346                 mutex_unlock(&dev_priv->rps.hw_lock);
4347         }
4348 }
4349
4350 static void intel_gen6_powersave_work(struct work_struct *work)
4351 {
4352         struct drm_i915_private *dev_priv =
4353                 container_of(work, struct drm_i915_private,
4354                              rps.delayed_resume_work.work);
4355         struct drm_device *dev = dev_priv->dev;
4356
4357         mutex_lock(&dev_priv->rps.hw_lock);
4358
4359         if (IS_VALLEYVIEW(dev)) {
4360                 valleyview_enable_rps(dev);
4361         } else {
4362                 gen6_enable_rps(dev);
4363                 gen6_update_ring_freq(dev);
4364         }
4365         mutex_unlock(&dev_priv->rps.hw_lock);
4366 }
4367
4368 void intel_enable_gt_powersave(struct drm_device *dev)
4369 {
4370         struct drm_i915_private *dev_priv = dev->dev_private;
4371
4372         if (IS_IRONLAKE_M(dev)) {
4373                 ironlake_enable_drps(dev);
4374                 ironlake_enable_rc6(dev);
4375                 intel_init_emon(dev);
4376         } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4377                 /*
4378                  * PCU communication is slow and this doesn't need to be
4379                  * done at any specific time, so do this out of our fast path
4380                  * to make resume and init faster.
4381                  */
4382                 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4383                                       round_jiffies_up_relative(HZ));
4384         }
4385 }
4386
4387 static void ibx_init_clock_gating(struct drm_device *dev)
4388 {
4389         struct drm_i915_private *dev_priv = dev->dev_private;
4390
4391         /*
4392          * On Ibex Peak and Cougar Point, we need to disable clock
4393          * gating for the panel power sequencer or it will fail to
4394          * start up when no ports are active.
4395          */
4396         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4397 }
4398
4399 static void g4x_disable_trickle_feed(struct drm_device *dev)
4400 {
4401         struct drm_i915_private *dev_priv = dev->dev_private;
4402         int pipe;
4403
4404         for_each_pipe(pipe) {
4405                 I915_WRITE(DSPCNTR(pipe),
4406                            I915_READ(DSPCNTR(pipe)) |
4407                            DISPPLANE_TRICKLE_FEED_DISABLE);
4408                 intel_flush_display_plane(dev_priv, pipe);
4409         }
4410 }
4411
4412 static void ironlake_init_clock_gating(struct drm_device *dev)
4413 {
4414         struct drm_i915_private *dev_priv = dev->dev_private;
4415         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4416
4417         /* Required for FBC */
4418         dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4419                    ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4420                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4421
4422         I915_WRITE(PCH_3DCGDIS0,
4423                    MARIUNIT_CLOCK_GATE_DISABLE |
4424                    SVSMUNIT_CLOCK_GATE_DISABLE);
4425         I915_WRITE(PCH_3DCGDIS1,
4426                    VFMUNIT_CLOCK_GATE_DISABLE);
4427
4428         /*
4429          * According to the spec the following bits should be set in
4430          * order to enable memory self-refresh
4431          * The bit 22/21 of 0x42004
4432          * The bit 5 of 0x42020
4433          * The bit 15 of 0x45000
4434          */
4435         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4436                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
4437                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4438         dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4439         I915_WRITE(DISP_ARB_CTL,
4440                    (I915_READ(DISP_ARB_CTL) |
4441                     DISP_FBC_WM_DIS));
4442         I915_WRITE(WM3_LP_ILK, 0);
4443         I915_WRITE(WM2_LP_ILK, 0);
4444         I915_WRITE(WM1_LP_ILK, 0);
4445
4446         /*
4447          * Based on the document from hardware guys the following bits
4448          * should be set unconditionally in order to enable FBC.
4449          * The bit 22 of 0x42000
4450          * The bit 22 of 0x42004
4451          * The bit 7,8,9 of 0x42020.
4452          */
4453         if (IS_IRONLAKE_M(dev)) {
4454                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4455                            I915_READ(ILK_DISPLAY_CHICKEN1) |
4456                            ILK_FBCQ_DIS);
4457                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4458                            I915_READ(ILK_DISPLAY_CHICKEN2) |
4459                            ILK_DPARB_GATE);
4460         }
4461
4462         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4463
4464         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4465                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4466                    ILK_ELPIN_409_SELECT);
4467         I915_WRITE(_3D_CHICKEN2,
4468                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4469                    _3D_CHICKEN2_WM_READ_PIPELINED);
4470
4471         /* WaDisableRenderCachePipelinedFlush:ilk */
4472         I915_WRITE(CACHE_MODE_0,
4473                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4474
4475         g4x_disable_trickle_feed(dev);
4476
4477         ibx_init_clock_gating(dev);
4478 }
4479
4480 static void cpt_init_clock_gating(struct drm_device *dev)
4481 {
4482         struct drm_i915_private *dev_priv = dev->dev_private;
4483         int pipe;
4484         uint32_t val;
4485
4486         /*
4487          * On Ibex Peak and Cougar Point, we need to disable clock
4488          * gating for the panel power sequencer or it will fail to
4489          * start up when no ports are active.
4490          */
4491         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4492         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4493                    DPLS_EDP_PPS_FIX_DIS);
4494         /* The below fixes the weird display corruption, a few pixels shifted
4495          * downward, on (only) LVDS of some HP laptops with IVY.
4496          */
4497         for_each_pipe(pipe) {
4498                 val = I915_READ(TRANS_CHICKEN2(pipe));
4499                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4500                 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4501                 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4502                         val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4503                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4504                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4505                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4506                 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4507         }
4508         /* WADP0ClockGatingDisable */
4509         for_each_pipe(pipe) {
4510                 I915_WRITE(TRANS_CHICKEN1(pipe),
4511                            TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4512         }
4513 }
4514
4515 static void gen6_check_mch_setup(struct drm_device *dev)
4516 {
4517         struct drm_i915_private *dev_priv = dev->dev_private;
4518         uint32_t tmp;
4519
4520         tmp = I915_READ(MCH_SSKPD);
4521         if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4522                 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4523                 DRM_INFO("This can cause pipe underruns and display issues.\n");
4524                 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4525         }
4526 }
4527
4528 static void gen6_init_clock_gating(struct drm_device *dev)
4529 {
4530         struct drm_i915_private *dev_priv = dev->dev_private;
4531         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4532
4533         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4534
4535         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4536                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4537                    ILK_ELPIN_409_SELECT);
4538
4539         /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4540         I915_WRITE(_3D_CHICKEN,
4541                    _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4542
4543         /* WaSetupGtModeTdRowDispatch:snb */
4544         if (IS_SNB_GT1(dev))
4545                 I915_WRITE(GEN6_GT_MODE,
4546                            _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4547
4548         I915_WRITE(WM3_LP_ILK, 0);
4549         I915_WRITE(WM2_LP_ILK, 0);
4550         I915_WRITE(WM1_LP_ILK, 0);
4551
4552         I915_WRITE(CACHE_MODE_0,
4553                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4554
4555         I915_WRITE(GEN6_UCGCTL1,
4556                    I915_READ(GEN6_UCGCTL1) |
4557                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4558                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4559
4560         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4561          * gating disable must be set.  Failure to set it results in
4562          * flickering pixels due to Z write ordering failures after
4563          * some amount of runtime in the Mesa "fire" demo, and Unigine
4564          * Sanctuary and Tropics, and apparently anything else with
4565          * alpha test or pixel discard.
4566          *
4567          * According to the spec, bit 11 (RCCUNIT) must also be set,
4568          * but we didn't debug actual testcases to find it out.
4569          *
4570          * Also apply WaDisableVDSUnitClockGating:snb and
4571          * WaDisableRCPBUnitClockGating:snb.
4572          */
4573         I915_WRITE(GEN6_UCGCTL2,
4574                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4575                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4576                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4577
4578         /* Bspec says we need to always set all mask bits. */
4579         I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
4580                    _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
4581
4582         /*
4583          * According to the spec the following bits should be
4584          * set in order to enable memory self-refresh and fbc:
4585          * The bit21 and bit22 of 0x42000
4586          * The bit21 and bit22 of 0x42004
4587          * The bit5 and bit7 of 0x42020
4588          * The bit14 of 0x70180
4589          * The bit14 of 0x71180
4590          */
4591         I915_WRITE(ILK_DISPLAY_CHICKEN1,
4592                    I915_READ(ILK_DISPLAY_CHICKEN1) |
4593                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4594         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4595                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4596                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4597         I915_WRITE(ILK_DSPCLK_GATE_D,
4598                    I915_READ(ILK_DSPCLK_GATE_D) |
4599                    ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
4600                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4601
4602         /* WaMbcDriverBootEnable:snb */
4603         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4604                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4605
4606         g4x_disable_trickle_feed(dev);
4607
4608         /* The default value should be 0x200 according to docs, but the two
4609          * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
4610         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
4611         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
4612
4613         cpt_init_clock_gating(dev);
4614
4615         gen6_check_mch_setup(dev);
4616 }
4617
4618 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4619 {
4620         uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4621
4622         reg &= ~GEN7_FF_SCHED_MASK;
4623         reg |= GEN7_FF_TS_SCHED_HW;
4624         reg |= GEN7_FF_VS_SCHED_HW;
4625         reg |= GEN7_FF_DS_SCHED_HW;
4626
4627         if (IS_HASWELL(dev_priv->dev))
4628                 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4629
4630         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4631 }
4632
4633 static void lpt_init_clock_gating(struct drm_device *dev)
4634 {
4635         struct drm_i915_private *dev_priv = dev->dev_private;
4636
4637         /*
4638          * TODO: this bit should only be enabled when really needed, then
4639          * disabled when not needed anymore in order to save power.
4640          */
4641         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4642                 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4643                            I915_READ(SOUTH_DSPCLK_GATE_D) |
4644                            PCH_LP_PARTITION_LEVEL_DISABLE);
4645
4646         /* WADPOClockGatingDisable:hsw */
4647         I915_WRITE(_TRANSA_CHICKEN1,
4648                    I915_READ(_TRANSA_CHICKEN1) |
4649                    TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4650 }
4651
4652 static void lpt_suspend_hw(struct drm_device *dev)
4653 {
4654         struct drm_i915_private *dev_priv = dev->dev_private;
4655
4656         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4657                 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4658
4659                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4660                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4661         }
4662 }
4663
4664 static void haswell_init_clock_gating(struct drm_device *dev)
4665 {
4666         struct drm_i915_private *dev_priv = dev->dev_private;
4667
4668         I915_WRITE(WM3_LP_ILK, 0);
4669         I915_WRITE(WM2_LP_ILK, 0);
4670         I915_WRITE(WM1_LP_ILK, 0);
4671
4672         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4673          * This implements the WaDisableRCZUnitClockGating:hsw workaround.
4674          */
4675         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4676
4677         /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
4678         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4679                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4680
4681         /* WaApplyL3ControlAndL3ChickenMode:hsw */
4682         I915_WRITE(GEN7_L3CNTLREG1,
4683                         GEN7_WA_FOR_GEN7_L3_CONTROL);
4684         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4685                         GEN7_WA_L3_CHICKEN_MODE);
4686
4687         /* This is required by WaCatErrorRejectionIssue:hsw */
4688         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4689                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4690                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4691
4692         g4x_disable_trickle_feed(dev);
4693
4694         /* WaVSRefCountFullforceMissDisable:hsw */
4695         gen7_setup_fixed_func_scheduler(dev_priv);
4696
4697         /* WaDisable4x2SubspanOptimization:hsw */
4698         I915_WRITE(CACHE_MODE_1,
4699                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4700
4701         /* WaMbcDriverBootEnable:hsw */
4702         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4703                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4704
4705         /* WaSwitchSolVfFArbitrationPriority:hsw */
4706         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4707
4708         /* WaRsPkgCStateDisplayPMReq:hsw */
4709         I915_WRITE(CHICKEN_PAR1_1,
4710                    I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
4711
4712         lpt_init_clock_gating(dev);
4713 }
4714
4715 static void ivybridge_init_clock_gating(struct drm_device *dev)
4716 {
4717         struct drm_i915_private *dev_priv = dev->dev_private;
4718         uint32_t snpcr;
4719
4720         I915_WRITE(WM3_LP_ILK, 0);
4721         I915_WRITE(WM2_LP_ILK, 0);
4722         I915_WRITE(WM1_LP_ILK, 0);
4723
4724         I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
4725
4726         /* WaDisableEarlyCull:ivb */
4727         I915_WRITE(_3D_CHICKEN3,
4728                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4729
4730         /* WaDisableBackToBackFlipFix:ivb */
4731         I915_WRITE(IVB_CHICKEN3,
4732                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4733                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
4734
4735         /* WaDisablePSDDualDispatchEnable:ivb */
4736         if (IS_IVB_GT1(dev))
4737                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4738                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4739         else
4740                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
4741                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4742
4743         /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4744         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4745                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4746
4747         /* WaApplyL3ControlAndL3ChickenMode:ivb */
4748         I915_WRITE(GEN7_L3CNTLREG1,
4749                         GEN7_WA_FOR_GEN7_L3_CONTROL);
4750         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4751                    GEN7_WA_L3_CHICKEN_MODE);
4752         if (IS_IVB_GT1(dev))
4753                 I915_WRITE(GEN7_ROW_CHICKEN2,
4754                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4755         else
4756                 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4757                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4758
4759
4760         /* WaForceL3Serialization:ivb */
4761         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4762                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4763
4764         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4765          * gating disable must be set.  Failure to set it results in
4766          * flickering pixels due to Z write ordering failures after
4767          * some amount of runtime in the Mesa "fire" demo, and Unigine
4768          * Sanctuary and Tropics, and apparently anything else with
4769          * alpha test or pixel discard.
4770          *
4771          * According to the spec, bit 11 (RCCUNIT) must also be set,
4772          * but we didn't debug actual testcases to find it out.
4773          *
4774          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4775          * This implements the WaDisableRCZUnitClockGating:ivb workaround.
4776          */
4777         I915_WRITE(GEN6_UCGCTL2,
4778                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4779                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4780
4781         /* This is required by WaCatErrorRejectionIssue:ivb */
4782         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4783                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4784                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4785
4786         g4x_disable_trickle_feed(dev);
4787
4788         /* WaMbcDriverBootEnable:ivb */
4789         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4790                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4791
4792         /* WaVSRefCountFullforceMissDisable:ivb */
4793         gen7_setup_fixed_func_scheduler(dev_priv);
4794
4795         /* WaDisable4x2SubspanOptimization:ivb */
4796         I915_WRITE(CACHE_MODE_1,
4797                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4798
4799         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4800         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4801         snpcr |= GEN6_MBC_SNPCR_MED;
4802         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4803
4804         if (!HAS_PCH_NOP(dev))
4805                 cpt_init_clock_gating(dev);
4806
4807         gen6_check_mch_setup(dev);
4808 }
4809
4810 static void valleyview_init_clock_gating(struct drm_device *dev)
4811 {
4812         struct drm_i915_private *dev_priv = dev->dev_private;
4813
4814         I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
4815
4816         /* WaDisableEarlyCull:vlv */
4817         I915_WRITE(_3D_CHICKEN3,
4818                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4819
4820         /* WaDisableBackToBackFlipFix:vlv */
4821         I915_WRITE(IVB_CHICKEN3,
4822                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4823                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
4824
4825         /* WaDisablePSDDualDispatchEnable:vlv */
4826         I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4827                    _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
4828                                       GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4829
4830         /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
4831         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4832                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4833
4834         /* WaApplyL3ControlAndL3ChickenMode:vlv */
4835         I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
4836         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
4837
4838         /* WaForceL3Serialization:vlv */
4839         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4840                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4841
4842         /* WaDisableDopClockGating:vlv */
4843         I915_WRITE(GEN7_ROW_CHICKEN2,
4844                    _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4845
4846         /* This is required by WaCatErrorRejectionIssue:vlv */
4847         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4848                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4849                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4850
4851         /* WaMbcDriverBootEnable:vlv */
4852         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4853                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4854
4855
4856         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4857          * gating disable must be set.  Failure to set it results in
4858          * flickering pixels due to Z write ordering failures after
4859          * some amount of runtime in the Mesa "fire" demo, and Unigine
4860          * Sanctuary and Tropics, and apparently anything else with
4861          * alpha test or pixel discard.
4862          *
4863          * According to the spec, bit 11 (RCCUNIT) must also be set,
4864          * but we didn't debug actual testcases to find it out.
4865          *
4866          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4867          * This implements the WaDisableRCZUnitClockGating:vlv workaround.
4868          *
4869          * Also apply WaDisableVDSUnitClockGating:vlv and
4870          * WaDisableRCPBUnitClockGating:vlv.
4871          */
4872         I915_WRITE(GEN6_UCGCTL2,
4873                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4874                    GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
4875                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4876                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4877                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4878
4879         I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4880
4881         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4882
4883         I915_WRITE(CACHE_MODE_1,
4884                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4885
4886         /*
4887          * WaDisableVLVClockGating_VBIIssue:vlv
4888          * Disable clock gating on th GCFG unit to prevent a delay
4889          * in the reporting of vblank events.
4890          */
4891         I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
4892
4893         /* Conservative clock gating settings for now */
4894         I915_WRITE(0x9400, 0xffffffff);
4895         I915_WRITE(0x9404, 0xffffffff);
4896         I915_WRITE(0x9408, 0xffffffff);
4897         I915_WRITE(0x940c, 0xffffffff);
4898         I915_WRITE(0x9410, 0xffffffff);
4899         I915_WRITE(0x9414, 0xffffffff);
4900         I915_WRITE(0x9418, 0xffffffff);
4901 }
4902
4903 static void g4x_init_clock_gating(struct drm_device *dev)
4904 {
4905         struct drm_i915_private *dev_priv = dev->dev_private;
4906         uint32_t dspclk_gate;
4907
4908         I915_WRITE(RENCLK_GATE_D1, 0);
4909         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
4910                    GS_UNIT_CLOCK_GATE_DISABLE |
4911                    CL_UNIT_CLOCK_GATE_DISABLE);
4912         I915_WRITE(RAMCLK_GATE_D, 0);
4913         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
4914                 OVRUNIT_CLOCK_GATE_DISABLE |
4915                 OVCUNIT_CLOCK_GATE_DISABLE;
4916         if (IS_GM45(dev))
4917                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
4918         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4919
4920         /* WaDisableRenderCachePipelinedFlush */
4921         I915_WRITE(CACHE_MODE_0,
4922                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4923
4924         g4x_disable_trickle_feed(dev);
4925 }
4926
4927 static void crestline_init_clock_gating(struct drm_device *dev)
4928 {
4929         struct drm_i915_private *dev_priv = dev->dev_private;
4930
4931         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
4932         I915_WRITE(RENCLK_GATE_D2, 0);
4933         I915_WRITE(DSPCLK_GATE_D, 0);
4934         I915_WRITE(RAMCLK_GATE_D, 0);
4935         I915_WRITE16(DEUC, 0);
4936         I915_WRITE(MI_ARB_STATE,
4937                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4938 }
4939
4940 static void broadwater_init_clock_gating(struct drm_device *dev)
4941 {
4942         struct drm_i915_private *dev_priv = dev->dev_private;
4943
4944         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
4945                    I965_RCC_CLOCK_GATE_DISABLE |
4946                    I965_RCPB_CLOCK_GATE_DISABLE |
4947                    I965_ISC_CLOCK_GATE_DISABLE |
4948                    I965_FBC_CLOCK_GATE_DISABLE);
4949         I915_WRITE(RENCLK_GATE_D2, 0);
4950         I915_WRITE(MI_ARB_STATE,
4951                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4952 }
4953
4954 static void gen3_init_clock_gating(struct drm_device *dev)
4955 {
4956         struct drm_i915_private *dev_priv = dev->dev_private;
4957         u32 dstate = I915_READ(D_STATE);
4958
4959         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4960                 DSTATE_DOT_CLOCK_GATING;
4961         I915_WRITE(D_STATE, dstate);
4962
4963         if (IS_PINEVIEW(dev))
4964                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
4965
4966         /* IIR "flip pending" means done if this bit is set */
4967         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
4968 }
4969
4970 static void i85x_init_clock_gating(struct drm_device *dev)
4971 {
4972         struct drm_i915_private *dev_priv = dev->dev_private;
4973
4974         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4975 }
4976
4977 static void i830_init_clock_gating(struct drm_device *dev)
4978 {
4979         struct drm_i915_private *dev_priv = dev->dev_private;
4980
4981         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4982 }
4983
4984 void intel_init_clock_gating(struct drm_device *dev)
4985 {
4986         struct drm_i915_private *dev_priv = dev->dev_private;
4987
4988         dev_priv->display.init_clock_gating(dev);
4989 }
4990
4991 void intel_suspend_hw(struct drm_device *dev)
4992 {
4993         if (HAS_PCH_LPT(dev))
4994                 lpt_suspend_hw(dev);
4995 }
4996
4997 /**
4998  * We should only use the power well if we explicitly asked the hardware to
4999  * enable it, so check if it's enabled and also check if we've requested it to
5000  * be enabled.
5001  */
5002 bool intel_display_power_enabled(struct drm_device *dev,
5003                                  enum intel_display_power_domain domain)
5004 {
5005         struct drm_i915_private *dev_priv = dev->dev_private;
5006
5007         if (!HAS_POWER_WELL(dev))
5008                 return true;
5009
5010         switch (domain) {
5011         case POWER_DOMAIN_PIPE_A:
5012         case POWER_DOMAIN_TRANSCODER_EDP:
5013                 return true;
5014         case POWER_DOMAIN_PIPE_B:
5015         case POWER_DOMAIN_PIPE_C:
5016         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5017         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5018         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5019         case POWER_DOMAIN_TRANSCODER_A:
5020         case POWER_DOMAIN_TRANSCODER_B:
5021         case POWER_DOMAIN_TRANSCODER_C:
5022                 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5023                        (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
5024         default:
5025                 BUG();
5026         }
5027 }
5028
5029 static void __intel_set_power_well(struct drm_device *dev, bool enable)
5030 {
5031         struct drm_i915_private *dev_priv = dev->dev_private;
5032         bool is_enabled, enable_requested;
5033         uint32_t tmp;
5034
5035         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5036         is_enabled = tmp & HSW_PWR_WELL_STATE;
5037         enable_requested = tmp & HSW_PWR_WELL_ENABLE;
5038
5039         if (enable) {
5040                 if (!enable_requested)
5041                         I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
5042
5043                 if (!is_enabled) {
5044                         DRM_DEBUG_KMS("Enabling power well\n");
5045                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5046                                       HSW_PWR_WELL_STATE), 20))
5047                                 DRM_ERROR("Timeout enabling power well\n");
5048                 }
5049         } else {
5050                 if (enable_requested) {
5051                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5052                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
5053                 }
5054         }
5055 }
5056
5057 static struct i915_power_well *hsw_pwr;
5058
5059 /* Display audio driver power well request */
5060 void i915_request_power_well(void)
5061 {
5062         if (WARN_ON(!hsw_pwr))
5063                 return;
5064
5065         spin_lock_irq(&hsw_pwr->lock);
5066         if (!hsw_pwr->count++ &&
5067                         !hsw_pwr->i915_request)
5068                 __intel_set_power_well(hsw_pwr->device, true);
5069         spin_unlock_irq(&hsw_pwr->lock);
5070 }
5071 EXPORT_SYMBOL_GPL(i915_request_power_well);
5072
5073 /* Display audio driver power well release */
5074 void i915_release_power_well(void)
5075 {
5076         if (WARN_ON(!hsw_pwr))
5077                 return;
5078
5079         spin_lock_irq(&hsw_pwr->lock);
5080         WARN_ON(!hsw_pwr->count);
5081         if (!--hsw_pwr->count &&
5082                        !hsw_pwr->i915_request)
5083                 __intel_set_power_well(hsw_pwr->device, false);
5084         spin_unlock_irq(&hsw_pwr->lock);
5085 }
5086 EXPORT_SYMBOL_GPL(i915_release_power_well);
5087
5088 int i915_init_power_well(struct drm_device *dev)
5089 {
5090         struct drm_i915_private *dev_priv = dev->dev_private;
5091
5092         hsw_pwr = &dev_priv->power_well;
5093
5094         hsw_pwr->device = dev;
5095         spin_lock_init(&hsw_pwr->lock);
5096         hsw_pwr->count = 0;
5097
5098         return 0;
5099 }
5100
5101 void i915_remove_power_well(struct drm_device *dev)
5102 {
5103         hsw_pwr = NULL;
5104 }
5105
5106 void intel_set_power_well(struct drm_device *dev, bool enable)
5107 {
5108         struct drm_i915_private *dev_priv = dev->dev_private;
5109         struct i915_power_well *power_well = &dev_priv->power_well;
5110
5111         if (!HAS_POWER_WELL(dev))
5112                 return;
5113
5114         if (!i915_disable_power_well && !enable)
5115                 return;
5116
5117         spin_lock_irq(&power_well->lock);
5118         power_well->i915_request = enable;
5119
5120         /* only reject "disable" power well request */
5121         if (power_well->count && !enable) {
5122                 spin_unlock_irq(&power_well->lock);
5123                 return;
5124         }
5125
5126         __intel_set_power_well(dev, enable);
5127         spin_unlock_irq(&power_well->lock);
5128 }
5129
5130 /*
5131  * Starting with Haswell, we have a "Power Down Well" that can be turned off
5132  * when not needed anymore. We have 4 registers that can request the power well
5133  * to be enabled, and it will only be disabled if none of the registers is
5134  * requesting it to be enabled.
5135  */
5136 void intel_init_power_well(struct drm_device *dev)
5137 {
5138         struct drm_i915_private *dev_priv = dev->dev_private;
5139
5140         if (!HAS_POWER_WELL(dev))
5141                 return;
5142
5143         /* For now, we need the power well to be always enabled. */
5144         intel_set_power_well(dev, true);
5145
5146         /* We're taking over the BIOS, so clear any requests made by it since
5147          * the driver is in charge now. */
5148         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
5149                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5150 }
5151
5152 /* Set up chip specific power management-related functions */
5153 void intel_init_pm(struct drm_device *dev)
5154 {
5155         struct drm_i915_private *dev_priv = dev->dev_private;
5156
5157         if (I915_HAS_FBC(dev)) {
5158                 if (HAS_PCH_SPLIT(dev)) {
5159                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5160                         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5161                                 dev_priv->display.enable_fbc =
5162                                         gen7_enable_fbc;
5163                         else
5164                                 dev_priv->display.enable_fbc =
5165                                         ironlake_enable_fbc;
5166                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
5167                 } else if (IS_GM45(dev)) {
5168                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5169                         dev_priv->display.enable_fbc = g4x_enable_fbc;
5170                         dev_priv->display.disable_fbc = g4x_disable_fbc;
5171                 } else if (IS_CRESTLINE(dev)) {
5172                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5173                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
5174                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
5175                 }
5176                 /* 855GM needs testing */
5177         }
5178
5179         /* For cxsr */
5180         if (IS_PINEVIEW(dev))
5181                 i915_pineview_get_mem_freq(dev);
5182         else if (IS_GEN5(dev))
5183                 i915_ironlake_get_mem_freq(dev);
5184
5185         /* For FIFO watermark updates */
5186         if (HAS_PCH_SPLIT(dev)) {
5187                 if (IS_GEN5(dev)) {
5188                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5189                                 dev_priv->display.update_wm = ironlake_update_wm;
5190                         else {
5191                                 DRM_DEBUG_KMS("Failed to get proper latency. "
5192                                               "Disable CxSR\n");
5193                                 dev_priv->display.update_wm = NULL;
5194                         }
5195                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5196                 } else if (IS_GEN6(dev)) {
5197                         if (SNB_READ_WM0_LATENCY()) {
5198                                 dev_priv->display.update_wm = sandybridge_update_wm;
5199                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5200                         } else {
5201                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5202                                               "Disable CxSR\n");
5203                                 dev_priv->display.update_wm = NULL;
5204                         }
5205                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5206                 } else if (IS_IVYBRIDGE(dev)) {
5207                         if (SNB_READ_WM0_LATENCY()) {
5208                                 dev_priv->display.update_wm = ivybridge_update_wm;
5209                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5210                         } else {
5211                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5212                                               "Disable CxSR\n");
5213                                 dev_priv->display.update_wm = NULL;
5214                         }
5215                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5216                 } else if (IS_HASWELL(dev)) {
5217                         if (I915_READ64(MCH_SSKPD)) {
5218                                 dev_priv->display.update_wm = haswell_update_wm;
5219                                 dev_priv->display.update_sprite_wm =
5220                                         haswell_update_sprite_wm;
5221                         } else {
5222                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5223                                               "Disable CxSR\n");
5224                                 dev_priv->display.update_wm = NULL;
5225                         }
5226                         dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5227                 } else
5228                         dev_priv->display.update_wm = NULL;
5229         } else if (IS_VALLEYVIEW(dev)) {
5230                 dev_priv->display.update_wm = valleyview_update_wm;
5231                 dev_priv->display.init_clock_gating =
5232                         valleyview_init_clock_gating;
5233         } else if (IS_PINEVIEW(dev)) {
5234                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5235                                             dev_priv->is_ddr3,
5236                                             dev_priv->fsb_freq,
5237                                             dev_priv->mem_freq)) {
5238                         DRM_INFO("failed to find known CxSR latency "
5239                                  "(found ddr%s fsb freq %d, mem freq %d), "
5240                                  "disabling CxSR\n",
5241                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
5242                                  dev_priv->fsb_freq, dev_priv->mem_freq);
5243                         /* Disable CxSR and never update its watermark again */
5244                         pineview_disable_cxsr(dev);
5245                         dev_priv->display.update_wm = NULL;
5246                 } else
5247                         dev_priv->display.update_wm = pineview_update_wm;
5248                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5249         } else if (IS_G4X(dev)) {
5250                 dev_priv->display.update_wm = g4x_update_wm;
5251                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
5252         } else if (IS_GEN4(dev)) {
5253                 dev_priv->display.update_wm = i965_update_wm;
5254                 if (IS_CRESTLINE(dev))
5255                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
5256                 else if (IS_BROADWATER(dev))
5257                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5258         } else if (IS_GEN3(dev)) {
5259                 dev_priv->display.update_wm = i9xx_update_wm;
5260                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5261                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5262         } else if (IS_I865G(dev)) {
5263                 dev_priv->display.update_wm = i830_update_wm;
5264                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5265                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5266         } else if (IS_I85X(dev)) {
5267                 dev_priv->display.update_wm = i9xx_update_wm;
5268                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5269                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5270         } else {
5271                 dev_priv->display.update_wm = i830_update_wm;
5272                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5273                 if (IS_845G(dev))
5274                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
5275                 else
5276                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
5277         }
5278 }
5279
5280 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5281 {
5282         u32 gt_thread_status_mask;
5283
5284         if (IS_HASWELL(dev_priv->dev))
5285                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5286         else
5287                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5288
5289         /* w/a for a sporadic read returning 0 by waiting for the GT
5290          * thread to wake up.
5291          */
5292         if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5293                 DRM_ERROR("GT thread status wait timed out\n");
5294 }
5295
5296 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5297 {
5298         I915_WRITE_NOTRACE(FORCEWAKE, 0);
5299         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5300 }
5301
5302 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5303 {
5304         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5305                             FORCEWAKE_ACK_TIMEOUT_MS))
5306                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5307
5308         I915_WRITE_NOTRACE(FORCEWAKE, 1);
5309         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5310
5311         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5312                             FORCEWAKE_ACK_TIMEOUT_MS))
5313                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5314
5315         /* WaRsForcewakeWaitTC0:snb */
5316         __gen6_gt_wait_for_thread_c0(dev_priv);
5317 }
5318
5319 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5320 {
5321         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5322         /* something from same cacheline, but !FORCEWAKE_MT */
5323         POSTING_READ(ECOBUS);
5324 }
5325
5326 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5327 {
5328         u32 forcewake_ack;
5329
5330         if (IS_HASWELL(dev_priv->dev))
5331                 forcewake_ack = FORCEWAKE_ACK_HSW;
5332         else
5333                 forcewake_ack = FORCEWAKE_MT_ACK;
5334
5335         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5336                             FORCEWAKE_ACK_TIMEOUT_MS))
5337                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5338
5339         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5340         /* something from same cacheline, but !FORCEWAKE_MT */
5341         POSTING_READ(ECOBUS);
5342
5343         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5344                             FORCEWAKE_ACK_TIMEOUT_MS))
5345                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5346
5347         /* WaRsForcewakeWaitTC0:ivb,hsw */
5348         __gen6_gt_wait_for_thread_c0(dev_priv);
5349 }
5350
5351 /*
5352  * Generally this is called implicitly by the register read function. However,
5353  * if some sequence requires the GT to not power down then this function should
5354  * be called at the beginning of the sequence followed by a call to
5355  * gen6_gt_force_wake_put() at the end of the sequence.
5356  */
5357 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5358 {
5359         unsigned long irqflags;
5360
5361         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5362         if (dev_priv->forcewake_count++ == 0)
5363                 dev_priv->gt.force_wake_get(dev_priv);
5364         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5365 }
5366
5367 void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5368 {
5369         u32 gtfifodbg;
5370         gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5371         if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5372              "MMIO read or write has been dropped %x\n", gtfifodbg))
5373                 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5374 }
5375
5376 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5377 {
5378         I915_WRITE_NOTRACE(FORCEWAKE, 0);
5379         /* something from same cacheline, but !FORCEWAKE */
5380         POSTING_READ(ECOBUS);
5381         gen6_gt_check_fifodbg(dev_priv);
5382 }
5383
5384 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5385 {
5386         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5387         /* something from same cacheline, but !FORCEWAKE_MT */
5388         POSTING_READ(ECOBUS);
5389         gen6_gt_check_fifodbg(dev_priv);
5390 }
5391
5392 /*
5393  * see gen6_gt_force_wake_get()
5394  */
5395 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5396 {
5397         unsigned long irqflags;
5398
5399         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5400         if (--dev_priv->forcewake_count == 0)
5401                 dev_priv->gt.force_wake_put(dev_priv);
5402         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5403 }
5404
5405 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5406 {
5407         int ret = 0;
5408
5409         if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5410                 int loop = 500;
5411                 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5412                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5413                         udelay(10);
5414                         fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5415                 }
5416                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5417                         ++ret;
5418                 dev_priv->gt_fifo_count = fifo;
5419         }
5420         dev_priv->gt_fifo_count--;
5421
5422         return ret;
5423 }
5424
5425 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5426 {
5427         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5428         /* something from same cacheline, but !FORCEWAKE_VLV */
5429         POSTING_READ(FORCEWAKE_ACK_VLV);
5430 }
5431
5432 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5433 {
5434         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5435                             FORCEWAKE_ACK_TIMEOUT_MS))
5436                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5437
5438         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5439         I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5440                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5441
5442         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5443                             FORCEWAKE_ACK_TIMEOUT_MS))
5444                 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5445
5446         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5447                              FORCEWAKE_KERNEL),
5448                             FORCEWAKE_ACK_TIMEOUT_MS))
5449                 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5450
5451         /* WaRsForcewakeWaitTC0:vlv */
5452         __gen6_gt_wait_for_thread_c0(dev_priv);
5453 }
5454
5455 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5456 {
5457         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5458         I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5459                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5460         /* The below doubles as a POSTING_READ */
5461         gen6_gt_check_fifodbg(dev_priv);
5462 }
5463
5464 void intel_gt_reset(struct drm_device *dev)
5465 {
5466         struct drm_i915_private *dev_priv = dev->dev_private;
5467
5468         if (IS_VALLEYVIEW(dev)) {
5469                 vlv_force_wake_reset(dev_priv);
5470         } else if (INTEL_INFO(dev)->gen >= 6) {
5471                 __gen6_gt_force_wake_reset(dev_priv);
5472                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5473                         __gen6_gt_force_wake_mt_reset(dev_priv);
5474         }
5475 }
5476
5477 void intel_gt_init(struct drm_device *dev)
5478 {
5479         struct drm_i915_private *dev_priv = dev->dev_private;
5480
5481         spin_lock_init(&dev_priv->gt_lock);
5482
5483         intel_gt_reset(dev);
5484
5485         if (IS_VALLEYVIEW(dev)) {
5486                 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5487                 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5488         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5489                 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5490                 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5491         } else if (IS_GEN6(dev)) {
5492                 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5493                 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5494         }
5495         INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5496                           intel_gen6_powersave_work);
5497 }
5498
5499 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5500 {
5501         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5502
5503         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5504                 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5505                 return -EAGAIN;
5506         }
5507
5508         I915_WRITE(GEN6_PCODE_DATA, *val);
5509         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5510
5511         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5512                      500)) {
5513                 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5514                 return -ETIMEDOUT;
5515         }
5516
5517         *val = I915_READ(GEN6_PCODE_DATA);
5518         I915_WRITE(GEN6_PCODE_DATA, 0);
5519
5520         return 0;
5521 }
5522
5523 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5524 {
5525         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5526
5527         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5528                 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5529                 return -EAGAIN;
5530         }
5531
5532         I915_WRITE(GEN6_PCODE_DATA, val);
5533         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5534
5535         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5536                      500)) {
5537                 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5538                 return -ETIMEDOUT;
5539         }
5540
5541         I915_WRITE(GEN6_PCODE_DATA, 0);
5542
5543         return 0;
5544 }
5545
5546 int vlv_gpu_freq(int ddr_freq, int val)
5547 {
5548         int mult, base;
5549
5550         switch (ddr_freq) {
5551         case 800:
5552                 mult = 20;
5553                 base = 120;
5554                 break;
5555         case 1066:
5556                 mult = 22;
5557                 base = 133;
5558                 break;
5559         case 1333:
5560                 mult = 21;
5561                 base = 125;
5562                 break;
5563         default:
5564                 return -1;
5565         }
5566
5567         return ((val - 0xbd) * mult) + base;
5568 }
5569
5570 int vlv_freq_opcode(int ddr_freq, int val)
5571 {
5572         int mult, base;
5573
5574         switch (ddr_freq) {
5575         case 800:
5576                 mult = 20;
5577                 base = 120;
5578                 break;
5579         case 1066:
5580                 mult = 22;
5581                 base = 133;
5582                 break;
5583         case 1333:
5584                 mult = 21;
5585                 base = 125;
5586                 break;
5587         default:
5588                 return -1;
5589         }
5590
5591         val /= mult;
5592         val -= base / mult;
5593         val += 0xbd;
5594
5595         if (val > 0xea)
5596                 val = 0xea;
5597
5598         return val;
5599 }
5600