2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
34 #include <drm/drm_atomic_helper.h>
39 * RC6 is a special power stage which allows the GPU to enter an very
40 * low-voltage mode when idle, using down to 0V while at this stage. This
41 * stage is entered automatically when the GPU is idle when RC6 support is
42 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
44 * There are different RC6 modes available in Intel GPU, which differentiate
45 * among each other with the latency required to enter and leave RC6 and
46 * voltage consumed by the GPU in different states.
48 * The combination of the following flags define which states GPU is allowed
49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
50 * RC6pp is deepest RC6. Their support by hardware varies according to the
51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
52 * which brings the most power savings; deeper states save more power, but
53 * require higher latency to switch to and wake up.
55 #define INTEL_RC6_ENABLE (1<<0)
56 #define INTEL_RC6p_ENABLE (1<<1)
57 #define INTEL_RC6pp_ENABLE (1<<2)
59 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68 /* WaEnableChickenDCPR:skl,bxt,kbl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_MEMORY_WAKE);
78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 ILK_DPFC_DISABLE_DUMMY0);
83 static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
85 gen9_init_clock_gating(dev_priv);
87 /* WaDisableSDEUnitClockGating:bxt */
88 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
95 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
99 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
103 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
104 PWM1_GATING_DIS | PWM2_GATING_DIS);
107 static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
111 tmp = I915_READ(CLKCFG);
113 switch (tmp & CLKCFG_FSB_MASK) {
115 dev_priv->fsb_freq = 533; /* 133*4 */
118 dev_priv->fsb_freq = 800; /* 200*4 */
121 dev_priv->fsb_freq = 667; /* 167*4 */
124 dev_priv->fsb_freq = 400; /* 100*4 */
128 switch (tmp & CLKCFG_MEM_MASK) {
130 dev_priv->mem_freq = 533;
133 dev_priv->mem_freq = 667;
136 dev_priv->mem_freq = 800;
140 /* detect pineview DDR3 setting */
141 tmp = I915_READ(CSHRDDR3CTL);
142 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
145 static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
149 ddrpll = I915_READ16(DDRMPLL1);
150 csipll = I915_READ16(CSIPLL0);
152 switch (ddrpll & 0xff) {
154 dev_priv->mem_freq = 800;
157 dev_priv->mem_freq = 1066;
160 dev_priv->mem_freq = 1333;
163 dev_priv->mem_freq = 1600;
166 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
168 dev_priv->mem_freq = 0;
172 dev_priv->ips.r_t = dev_priv->mem_freq;
174 switch (csipll & 0x3ff) {
176 dev_priv->fsb_freq = 3200;
179 dev_priv->fsb_freq = 3733;
182 dev_priv->fsb_freq = 4266;
185 dev_priv->fsb_freq = 4800;
188 dev_priv->fsb_freq = 5333;
191 dev_priv->fsb_freq = 5866;
194 dev_priv->fsb_freq = 6400;
197 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
199 dev_priv->fsb_freq = 0;
203 if (dev_priv->fsb_freq == 3200) {
204 dev_priv->ips.c_m = 0;
205 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
206 dev_priv->ips.c_m = 1;
208 dev_priv->ips.c_m = 2;
212 static const struct cxsr_latency cxsr_latency_table[] = {
213 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
214 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
215 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
216 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
217 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
219 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
220 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
221 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
222 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
223 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
225 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
226 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
227 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
228 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
229 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
231 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
232 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
233 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
234 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
235 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
237 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
238 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
239 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
240 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
241 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
243 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
244 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
245 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
246 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
247 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
250 static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
255 const struct cxsr_latency *latency;
258 if (fsb == 0 || mem == 0)
261 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
262 latency = &cxsr_latency_table[i];
263 if (is_desktop == latency->is_desktop &&
264 is_ddr3 == latency->is_ddr3 &&
265 fsb == latency->fsb_freq && mem == latency->mem_freq)
269 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
274 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
278 mutex_lock(&dev_priv->rps.hw_lock);
280 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
282 val &= ~FORCE_DDR_HIGH_FREQ;
284 val |= FORCE_DDR_HIGH_FREQ;
285 val &= ~FORCE_DDR_LOW_FREQ;
286 val |= FORCE_DDR_FREQ_REQ_ACK;
287 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
289 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
290 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
291 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
293 mutex_unlock(&dev_priv->rps.hw_lock);
296 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
300 mutex_lock(&dev_priv->rps.hw_lock);
302 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
304 val |= DSP_MAXFIFO_PM5_ENABLE;
306 val &= ~DSP_MAXFIFO_PM5_ENABLE;
307 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
309 mutex_unlock(&dev_priv->rps.hw_lock);
312 #define FW_WM(value, plane) \
313 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
315 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
319 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
320 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
321 POSTING_READ(FW_BLC_SELF_VLV);
322 dev_priv->wm.vlv.cxsr = enable;
323 } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
324 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
325 POSTING_READ(FW_BLC_SELF);
326 } else if (IS_PINEVIEW(dev_priv)) {
327 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
328 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
329 I915_WRITE(DSPFW3, val);
330 POSTING_READ(DSPFW3);
331 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
332 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
333 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
334 I915_WRITE(FW_BLC_SELF, val);
335 POSTING_READ(FW_BLC_SELF);
336 } else if (IS_I915GM(dev_priv)) {
338 * FIXME can't find a bit like this for 915G, and
339 * and yet it does have the related watermark in
340 * FW_BLC_SELF. What's going on?
342 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
343 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
344 I915_WRITE(INSTPM, val);
345 POSTING_READ(INSTPM);
350 DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
355 * Latency for FIFO fetches is dependent on several factors:
356 * - memory configuration (speed, channels)
358 * - current MCH state
359 * It can be fairly high in some situations, so here we assume a fairly
360 * pessimal value. It's a tradeoff between extra memory fetches (if we
361 * set this value too high, the FIFO will fetch frequently to stay full)
362 * and power consumption (set it too low to save power and we might see
363 * FIFO underruns and display "flicker").
365 * A value of 5us seems to be a good balance; safe for very low end
366 * platforms but not overly aggressive on lower latency configs.
368 static const int pessimal_latency_ns = 5000;
370 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
371 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
373 static int vlv_get_fifo_size(struct intel_plane *plane)
375 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
376 int sprite0_start, sprite1_start, size;
378 if (plane->id == PLANE_CURSOR)
381 switch (plane->pipe) {
382 uint32_t dsparb, dsparb2, dsparb3;
384 dsparb = I915_READ(DSPARB);
385 dsparb2 = I915_READ(DSPARB2);
386 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
387 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
390 dsparb = I915_READ(DSPARB);
391 dsparb2 = I915_READ(DSPARB2);
392 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
393 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
396 dsparb2 = I915_READ(DSPARB2);
397 dsparb3 = I915_READ(DSPARB3);
398 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
399 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
407 size = sprite0_start;
410 size = sprite1_start - sprite0_start;
413 size = 512 - 1 - sprite1_start;
419 DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size);
424 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
426 uint32_t dsparb = I915_READ(DSPARB);
429 size = dsparb & 0x7f;
431 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
433 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
434 plane ? "B" : "A", size);
439 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
441 uint32_t dsparb = I915_READ(DSPARB);
444 size = dsparb & 0x1ff;
446 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
447 size >>= 1; /* Convert to cachelines */
449 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
450 plane ? "B" : "A", size);
455 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
457 uint32_t dsparb = I915_READ(DSPARB);
460 size = dsparb & 0x7f;
461 size >>= 2; /* Convert to cachelines */
463 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
470 /* Pineview has different values for various configs */
471 static const struct intel_watermark_params pineview_display_wm = {
472 .fifo_size = PINEVIEW_DISPLAY_FIFO,
473 .max_wm = PINEVIEW_MAX_WM,
474 .default_wm = PINEVIEW_DFT_WM,
475 .guard_size = PINEVIEW_GUARD_WM,
476 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
478 static const struct intel_watermark_params pineview_display_hplloff_wm = {
479 .fifo_size = PINEVIEW_DISPLAY_FIFO,
480 .max_wm = PINEVIEW_MAX_WM,
481 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
482 .guard_size = PINEVIEW_GUARD_WM,
483 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
485 static const struct intel_watermark_params pineview_cursor_wm = {
486 .fifo_size = PINEVIEW_CURSOR_FIFO,
487 .max_wm = PINEVIEW_CURSOR_MAX_WM,
488 .default_wm = PINEVIEW_CURSOR_DFT_WM,
489 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
490 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
492 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
493 .fifo_size = PINEVIEW_CURSOR_FIFO,
494 .max_wm = PINEVIEW_CURSOR_MAX_WM,
495 .default_wm = PINEVIEW_CURSOR_DFT_WM,
496 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
497 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
499 static const struct intel_watermark_params g4x_wm_info = {
500 .fifo_size = G4X_FIFO_SIZE,
501 .max_wm = G4X_MAX_WM,
502 .default_wm = G4X_MAX_WM,
504 .cacheline_size = G4X_FIFO_LINE_SIZE,
506 static const struct intel_watermark_params g4x_cursor_wm_info = {
507 .fifo_size = I965_CURSOR_FIFO,
508 .max_wm = I965_CURSOR_MAX_WM,
509 .default_wm = I965_CURSOR_DFT_WM,
511 .cacheline_size = G4X_FIFO_LINE_SIZE,
513 static const struct intel_watermark_params i965_cursor_wm_info = {
514 .fifo_size = I965_CURSOR_FIFO,
515 .max_wm = I965_CURSOR_MAX_WM,
516 .default_wm = I965_CURSOR_DFT_WM,
518 .cacheline_size = I915_FIFO_LINE_SIZE,
520 static const struct intel_watermark_params i945_wm_info = {
521 .fifo_size = I945_FIFO_SIZE,
522 .max_wm = I915_MAX_WM,
525 .cacheline_size = I915_FIFO_LINE_SIZE,
527 static const struct intel_watermark_params i915_wm_info = {
528 .fifo_size = I915_FIFO_SIZE,
529 .max_wm = I915_MAX_WM,
532 .cacheline_size = I915_FIFO_LINE_SIZE,
534 static const struct intel_watermark_params i830_a_wm_info = {
535 .fifo_size = I855GM_FIFO_SIZE,
536 .max_wm = I915_MAX_WM,
539 .cacheline_size = I830_FIFO_LINE_SIZE,
541 static const struct intel_watermark_params i830_bc_wm_info = {
542 .fifo_size = I855GM_FIFO_SIZE,
543 .max_wm = I915_MAX_WM/2,
546 .cacheline_size = I830_FIFO_LINE_SIZE,
548 static const struct intel_watermark_params i845_wm_info = {
549 .fifo_size = I830_FIFO_SIZE,
550 .max_wm = I915_MAX_WM,
553 .cacheline_size = I830_FIFO_LINE_SIZE,
557 * intel_calculate_wm - calculate watermark level
558 * @clock_in_khz: pixel clock
559 * @wm: chip FIFO params
560 * @cpp: bytes per pixel
561 * @latency_ns: memory latency for the platform
563 * Calculate the watermark level (the level at which the display plane will
564 * start fetching from memory again). Each chip has a different display
565 * FIFO size and allocation, so the caller needs to figure that out and pass
566 * in the correct intel_watermark_params structure.
568 * As the pixel clock runs, the FIFO will be drained at a rate that depends
569 * on the pixel size. When it reaches the watermark level, it'll start
570 * fetching FIFO line sized based chunks from memory until the FIFO fills
571 * past the watermark point. If the FIFO drains completely, a FIFO underrun
572 * will occur, and a display engine hang could result.
574 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
575 const struct intel_watermark_params *wm,
576 int fifo_size, int cpp,
577 unsigned long latency_ns)
579 long entries_required, wm_size;
582 * Note: we need to make sure we don't overflow for various clock &
584 * clocks go from a few thousand to several hundred thousand.
585 * latency is usually a few thousand
587 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
589 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
591 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
593 wm_size = fifo_size - (entries_required + wm->guard_size);
595 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
597 /* Don't promote wm_size to unsigned... */
598 if (wm_size > (long)wm->max_wm)
599 wm_size = wm->max_wm;
601 wm_size = wm->default_wm;
604 * Bspec seems to indicate that the value shouldn't be lower than
605 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
606 * Lets go for 8 which is the burst size since certain platforms
607 * already use a hardcoded 8 (which is what the spec says should be
616 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
618 struct intel_crtc *crtc, *enabled = NULL;
620 for_each_intel_crtc(&dev_priv->drm, crtc) {
621 if (intel_crtc_active(crtc)) {
631 static void pineview_update_wm(struct intel_crtc *unused_crtc)
633 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
634 struct intel_crtc *crtc;
635 const struct cxsr_latency *latency;
639 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
644 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
645 intel_set_memory_cxsr(dev_priv, false);
649 crtc = single_enabled_crtc(dev_priv);
651 const struct drm_display_mode *adjusted_mode =
652 &crtc->config->base.adjusted_mode;
653 const struct drm_framebuffer *fb =
654 crtc->base.primary->state->fb;
655 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
656 int clock = adjusted_mode->crtc_clock;
659 wm = intel_calculate_wm(clock, &pineview_display_wm,
660 pineview_display_wm.fifo_size,
661 cpp, latency->display_sr);
662 reg = I915_READ(DSPFW1);
663 reg &= ~DSPFW_SR_MASK;
664 reg |= FW_WM(wm, SR);
665 I915_WRITE(DSPFW1, reg);
666 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
669 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
670 pineview_display_wm.fifo_size,
671 cpp, latency->cursor_sr);
672 reg = I915_READ(DSPFW3);
673 reg &= ~DSPFW_CURSOR_SR_MASK;
674 reg |= FW_WM(wm, CURSOR_SR);
675 I915_WRITE(DSPFW3, reg);
677 /* Display HPLL off SR */
678 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
679 pineview_display_hplloff_wm.fifo_size,
680 cpp, latency->display_hpll_disable);
681 reg = I915_READ(DSPFW3);
682 reg &= ~DSPFW_HPLL_SR_MASK;
683 reg |= FW_WM(wm, HPLL_SR);
684 I915_WRITE(DSPFW3, reg);
686 /* cursor HPLL off SR */
687 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
688 pineview_display_hplloff_wm.fifo_size,
689 cpp, latency->cursor_hpll_disable);
690 reg = I915_READ(DSPFW3);
691 reg &= ~DSPFW_HPLL_CURSOR_MASK;
692 reg |= FW_WM(wm, HPLL_CURSOR);
693 I915_WRITE(DSPFW3, reg);
694 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
696 intel_set_memory_cxsr(dev_priv, true);
698 intel_set_memory_cxsr(dev_priv, false);
702 static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
704 const struct intel_watermark_params *display,
705 int display_latency_ns,
706 const struct intel_watermark_params *cursor,
707 int cursor_latency_ns,
711 struct intel_crtc *crtc;
712 const struct drm_display_mode *adjusted_mode;
713 const struct drm_framebuffer *fb;
714 int htotal, hdisplay, clock, cpp;
715 int line_time_us, line_count;
716 int entries, tlb_miss;
718 crtc = intel_get_crtc_for_plane(dev_priv, plane);
719 if (!intel_crtc_active(crtc)) {
720 *cursor_wm = cursor->guard_size;
721 *plane_wm = display->guard_size;
725 adjusted_mode = &crtc->config->base.adjusted_mode;
726 fb = crtc->base.primary->state->fb;
727 clock = adjusted_mode->crtc_clock;
728 htotal = adjusted_mode->crtc_htotal;
729 hdisplay = crtc->config->pipe_src_w;
730 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
732 /* Use the small buffer method to calculate plane watermark */
733 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
734 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
737 entries = DIV_ROUND_UP(entries, display->cacheline_size);
738 *plane_wm = entries + display->guard_size;
739 if (*plane_wm > (int)display->max_wm)
740 *plane_wm = display->max_wm;
742 /* Use the large buffer method to calculate cursor watermark */
743 line_time_us = max(htotal * 1000 / clock, 1);
744 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
745 entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
746 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
749 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
750 *cursor_wm = entries + cursor->guard_size;
751 if (*cursor_wm > (int)cursor->max_wm)
752 *cursor_wm = (int)cursor->max_wm;
758 * Check the wm result.
760 * If any calculated watermark values is larger than the maximum value that
761 * can be programmed into the associated watermark register, that watermark
764 static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
765 int display_wm, int cursor_wm,
766 const struct intel_watermark_params *display,
767 const struct intel_watermark_params *cursor)
769 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
770 display_wm, cursor_wm);
772 if (display_wm > display->max_wm) {
773 DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
774 display_wm, display->max_wm);
778 if (cursor_wm > cursor->max_wm) {
779 DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
780 cursor_wm, cursor->max_wm);
784 if (!(display_wm || cursor_wm)) {
785 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
792 static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
795 const struct intel_watermark_params *display,
796 const struct intel_watermark_params *cursor,
797 int *display_wm, int *cursor_wm)
799 struct intel_crtc *crtc;
800 const struct drm_display_mode *adjusted_mode;
801 const struct drm_framebuffer *fb;
802 int hdisplay, htotal, cpp, clock;
803 unsigned long line_time_us;
804 int line_count, line_size;
809 *display_wm = *cursor_wm = 0;
813 crtc = intel_get_crtc_for_plane(dev_priv, plane);
814 adjusted_mode = &crtc->config->base.adjusted_mode;
815 fb = crtc->base.primary->state->fb;
816 clock = adjusted_mode->crtc_clock;
817 htotal = adjusted_mode->crtc_htotal;
818 hdisplay = crtc->config->pipe_src_w;
819 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
821 line_time_us = max(htotal * 1000 / clock, 1);
822 line_count = (latency_ns / line_time_us + 1000) / 1000;
823 line_size = hdisplay * cpp;
825 /* Use the minimum of the small and large buffer method for primary */
826 small = ((clock * cpp / 1000) * latency_ns) / 1000;
827 large = line_count * line_size;
829 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
830 *display_wm = entries + display->guard_size;
832 /* calculate the self-refresh watermark for display cursor */
833 entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
834 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
835 *cursor_wm = entries + cursor->guard_size;
837 return g4x_check_srwm(dev_priv,
838 *display_wm, *cursor_wm,
842 #define FW_WM_VLV(value, plane) \
843 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
845 static void vlv_write_wm_values(struct intel_crtc *crtc,
846 const struct vlv_wm_values *wm)
848 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
849 enum pipe pipe = crtc->pipe;
851 I915_WRITE(VLV_DDL(pipe),
852 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
853 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
854 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
855 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
858 FW_WM(wm->sr.plane, SR) |
859 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
860 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
861 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
863 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
864 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
865 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
867 FW_WM(wm->sr.cursor, CURSOR_SR));
869 if (IS_CHERRYVIEW(dev_priv)) {
870 I915_WRITE(DSPFW7_CHV,
871 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
872 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
873 I915_WRITE(DSPFW8_CHV,
874 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
875 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
876 I915_WRITE(DSPFW9_CHV,
877 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
878 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
880 FW_WM(wm->sr.plane >> 9, SR_HI) |
881 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
882 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
883 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
884 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
885 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
886 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
887 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
888 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
889 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
892 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
893 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
895 FW_WM(wm->sr.plane >> 9, SR_HI) |
896 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
897 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
898 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
899 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
900 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
901 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
904 /* zero (unused) WM1 watermarks */
905 I915_WRITE(DSPFW4, 0);
906 I915_WRITE(DSPFW5, 0);
907 I915_WRITE(DSPFW6, 0);
908 I915_WRITE(DSPHOWM1, 0);
910 POSTING_READ(DSPFW1);
918 VLV_WM_LEVEL_DDR_DVFS,
921 /* latency must be in 0.1us units. */
922 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
923 unsigned int pipe_htotal,
924 unsigned int horiz_pixels,
926 unsigned int latency)
930 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
931 ret = (ret + 1) * horiz_pixels * cpp;
932 ret = DIV_ROUND_UP(ret, 64);
937 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
939 /* all latencies in usec */
940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
942 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
944 if (IS_CHERRYVIEW(dev_priv)) {
945 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
946 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
948 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
952 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
953 struct intel_crtc *crtc,
954 const struct intel_plane_state *state,
957 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
958 int clock, htotal, cpp, width, wm;
960 if (dev_priv->wm.pri_latency[level] == 0)
963 if (!state->base.visible)
966 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
967 clock = crtc->config->base.adjusted_mode.crtc_clock;
968 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
969 width = crtc->config->pipe_src_w;
970 if (WARN_ON(htotal == 0))
973 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
975 * FIXME the formula gives values that are
976 * too big for the cursor FIFO, and hence we
977 * would never be able to use cursors. For
978 * now just hardcode the watermark.
982 wm = vlv_wm_method2(clock, htotal, width, cpp,
983 dev_priv->wm.pri_latency[level] * 10);
986 return min_t(int, wm, USHRT_MAX);
989 static void vlv_compute_fifo(struct intel_crtc *crtc)
991 struct drm_device *dev = crtc->base.dev;
992 struct vlv_wm_state *wm_state = &crtc->wm_state;
993 struct intel_plane *plane;
994 unsigned int total_rate = 0;
995 const int fifo_size = 512 - 1;
996 int fifo_extra, fifo_left = fifo_size;
998 for_each_intel_plane_on_crtc(dev, crtc, plane) {
999 struct intel_plane_state *state =
1000 to_intel_plane_state(plane->base.state);
1002 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1005 if (state->base.visible) {
1006 wm_state->num_active_planes++;
1007 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1011 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1012 struct intel_plane_state *state =
1013 to_intel_plane_state(plane->base.state);
1016 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1017 plane->wm.fifo_size = 63;
1021 if (!state->base.visible) {
1022 plane->wm.fifo_size = 0;
1026 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1027 plane->wm.fifo_size = fifo_size * rate / total_rate;
1028 fifo_left -= plane->wm.fifo_size;
1031 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1033 /* spread the remainder evenly */
1034 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1040 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1043 /* give it all to the first plane if none are active */
1044 if (plane->wm.fifo_size == 0 &&
1045 wm_state->num_active_planes)
1048 plane_extra = min(fifo_extra, fifo_left);
1049 plane->wm.fifo_size += plane_extra;
1050 fifo_left -= plane_extra;
1053 WARN_ON(fifo_left != 0);
1056 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1061 return fifo_size - wm;
1064 static void vlv_invert_wms(struct intel_crtc *crtc)
1066 struct vlv_wm_state *wm_state = &crtc->wm_state;
1069 for (level = 0; level < wm_state->num_levels; level++) {
1070 struct drm_device *dev = crtc->base.dev;
1071 const int sr_fifo_size =
1072 INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
1073 struct intel_plane *plane;
1075 wm_state->sr[level].plane =
1076 vlv_invert_wm_value(wm_state->sr[level].plane,
1078 wm_state->sr[level].cursor =
1079 vlv_invert_wm_value(wm_state->sr[level].cursor,
1082 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1083 wm_state->wm[level].plane[plane->id] =
1084 vlv_invert_wm_value(wm_state->wm[level].plane[plane->id],
1085 plane->wm.fifo_size);
1090 static void vlv_compute_wm(struct intel_crtc *crtc)
1092 struct drm_device *dev = crtc->base.dev;
1093 struct drm_i915_private *dev_priv = to_i915(dev);
1094 struct vlv_wm_state *wm_state = &crtc->wm_state;
1095 struct intel_plane *plane;
1098 memset(wm_state, 0, sizeof(*wm_state));
1100 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1101 wm_state->num_levels = dev_priv->wm.max_level + 1;
1103 wm_state->num_active_planes = 0;
1105 vlv_compute_fifo(crtc);
1107 if (wm_state->num_active_planes != 1)
1108 wm_state->cxsr = false;
1110 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1111 struct intel_plane_state *state =
1112 to_intel_plane_state(plane->base.state);
1115 if (!state->base.visible)
1118 /* normal watermarks */
1119 for (level = 0; level < wm_state->num_levels; level++) {
1120 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1121 int max_wm = plane->wm.fifo_size;
1124 if (WARN_ON(level == 0 && wm > max_wm))
1130 wm_state->wm[level].plane[plane->id] = wm;
1133 wm_state->num_levels = level;
1135 if (!wm_state->cxsr)
1138 /* maxfifo watermarks */
1139 if (plane->id == PLANE_CURSOR) {
1140 for (level = 0; level < wm_state->num_levels; level++)
1141 wm_state->sr[level].cursor =
1142 wm_state->wm[level].plane[PLANE_CURSOR];
1144 for (level = 0; level < wm_state->num_levels; level++)
1145 wm_state->sr[level].plane =
1146 max(wm_state->sr[level].plane,
1147 wm_state->wm[level].plane[plane->id]);
1151 /* clear any (partially) filled invalid levels */
1152 for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
1153 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1154 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1157 vlv_invert_wms(crtc);
1160 #define VLV_FIFO(plane, value) \
1161 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1163 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1165 struct drm_device *dev = crtc->base.dev;
1166 struct drm_i915_private *dev_priv = to_i915(dev);
1167 struct intel_plane *plane;
1168 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1170 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1171 switch (plane->id) {
1173 sprite0_start = plane->wm.fifo_size;
1176 sprite1_start = sprite0_start + plane->wm.fifo_size;
1179 fifo_size = sprite1_start + plane->wm.fifo_size;
1182 WARN_ON(plane->wm.fifo_size != 63);
1185 MISSING_CASE(plane->id);
1190 WARN_ON(fifo_size != 512 - 1);
1192 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1193 pipe_name(crtc->pipe), sprite0_start,
1194 sprite1_start, fifo_size);
1196 switch (crtc->pipe) {
1197 uint32_t dsparb, dsparb2, dsparb3;
1199 dsparb = I915_READ(DSPARB);
1200 dsparb2 = I915_READ(DSPARB2);
1202 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1203 VLV_FIFO(SPRITEB, 0xff));
1204 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1205 VLV_FIFO(SPRITEB, sprite1_start));
1207 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1208 VLV_FIFO(SPRITEB_HI, 0x1));
1209 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1210 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1212 I915_WRITE(DSPARB, dsparb);
1213 I915_WRITE(DSPARB2, dsparb2);
1216 dsparb = I915_READ(DSPARB);
1217 dsparb2 = I915_READ(DSPARB2);
1219 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1220 VLV_FIFO(SPRITED, 0xff));
1221 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1222 VLV_FIFO(SPRITED, sprite1_start));
1224 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1225 VLV_FIFO(SPRITED_HI, 0xff));
1226 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1227 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1229 I915_WRITE(DSPARB, dsparb);
1230 I915_WRITE(DSPARB2, dsparb2);
1233 dsparb3 = I915_READ(DSPARB3);
1234 dsparb2 = I915_READ(DSPARB2);
1236 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1237 VLV_FIFO(SPRITEF, 0xff));
1238 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1239 VLV_FIFO(SPRITEF, sprite1_start));
1241 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1242 VLV_FIFO(SPRITEF_HI, 0xff));
1243 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1244 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1246 I915_WRITE(DSPARB3, dsparb3);
1247 I915_WRITE(DSPARB2, dsparb2);
1256 static void vlv_merge_wm(struct drm_device *dev,
1257 struct vlv_wm_values *wm)
1259 struct intel_crtc *crtc;
1260 int num_active_crtcs = 0;
1262 wm->level = to_i915(dev)->wm.max_level;
1265 for_each_intel_crtc(dev, crtc) {
1266 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1271 if (!wm_state->cxsr)
1275 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1278 if (num_active_crtcs != 1)
1281 if (num_active_crtcs > 1)
1282 wm->level = VLV_WM_LEVEL_PM2;
1284 for_each_intel_crtc(dev, crtc) {
1285 struct vlv_wm_state *wm_state = &crtc->wm_state;
1286 enum pipe pipe = crtc->pipe;
1291 wm->pipe[pipe] = wm_state->wm[wm->level];
1293 wm->sr = wm_state->sr[wm->level];
1295 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
1296 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
1297 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
1298 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
1302 static void vlv_update_wm(struct intel_crtc *crtc)
1304 struct drm_device *dev = crtc->base.dev;
1305 struct drm_i915_private *dev_priv = to_i915(dev);
1306 enum pipe pipe = crtc->pipe;
1307 struct vlv_wm_values wm = {};
1309 vlv_compute_wm(crtc);
1310 vlv_merge_wm(dev, &wm);
1312 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1313 /* FIXME should be part of crtc atomic commit */
1314 vlv_pipe_set_fifo_size(crtc);
1318 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1319 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1320 chv_set_memory_dvfs(dev_priv, false);
1322 if (wm.level < VLV_WM_LEVEL_PM5 &&
1323 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1324 chv_set_memory_pm5(dev_priv, false);
1326 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1327 intel_set_memory_cxsr(dev_priv, false);
1329 /* FIXME should be part of crtc atomic commit */
1330 vlv_pipe_set_fifo_size(crtc);
1332 vlv_write_wm_values(crtc, &wm);
1334 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1335 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1336 pipe_name(pipe), wm.pipe[pipe].plane[PLANE_PRIMARY], wm.pipe[pipe].plane[PLANE_CURSOR],
1337 wm.pipe[pipe].plane[PLANE_SPRITE0], wm.pipe[pipe].plane[PLANE_SPRITE1],
1338 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1340 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1341 intel_set_memory_cxsr(dev_priv, true);
1343 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1344 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1345 chv_set_memory_pm5(dev_priv, true);
1347 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1348 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1349 chv_set_memory_dvfs(dev_priv, true);
1351 dev_priv->wm.vlv = wm;
1354 #define single_plane_enabled(mask) is_power_of_2(mask)
1356 static void g4x_update_wm(struct intel_crtc *crtc)
1358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1359 static const int sr_latency_ns = 12000;
1360 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1361 int plane_sr, cursor_sr;
1362 unsigned int enabled = 0;
1365 if (g4x_compute_wm0(dev_priv, PIPE_A,
1366 &g4x_wm_info, pessimal_latency_ns,
1367 &g4x_cursor_wm_info, pessimal_latency_ns,
1368 &planea_wm, &cursora_wm))
1369 enabled |= 1 << PIPE_A;
1371 if (g4x_compute_wm0(dev_priv, PIPE_B,
1372 &g4x_wm_info, pessimal_latency_ns,
1373 &g4x_cursor_wm_info, pessimal_latency_ns,
1374 &planeb_wm, &cursorb_wm))
1375 enabled |= 1 << PIPE_B;
1377 if (single_plane_enabled(enabled) &&
1378 g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
1381 &g4x_cursor_wm_info,
1382 &plane_sr, &cursor_sr)) {
1383 cxsr_enabled = true;
1385 cxsr_enabled = false;
1386 intel_set_memory_cxsr(dev_priv, false);
1387 plane_sr = cursor_sr = 0;
1390 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1391 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1392 planea_wm, cursora_wm,
1393 planeb_wm, cursorb_wm,
1394 plane_sr, cursor_sr);
1397 FW_WM(plane_sr, SR) |
1398 FW_WM(cursorb_wm, CURSORB) |
1399 FW_WM(planeb_wm, PLANEB) |
1400 FW_WM(planea_wm, PLANEA));
1402 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1403 FW_WM(cursora_wm, CURSORA));
1404 /* HPLL off in SR has some issues on G4x... disable it */
1406 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1407 FW_WM(cursor_sr, CURSOR_SR));
1410 intel_set_memory_cxsr(dev_priv, true);
1413 static void i965_update_wm(struct intel_crtc *unused_crtc)
1415 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1416 struct intel_crtc *crtc;
1421 /* Calc sr entries for one plane configs */
1422 crtc = single_enabled_crtc(dev_priv);
1424 /* self-refresh has much higher latency */
1425 static const int sr_latency_ns = 12000;
1426 const struct drm_display_mode *adjusted_mode =
1427 &crtc->config->base.adjusted_mode;
1428 const struct drm_framebuffer *fb =
1429 crtc->base.primary->state->fb;
1430 int clock = adjusted_mode->crtc_clock;
1431 int htotal = adjusted_mode->crtc_htotal;
1432 int hdisplay = crtc->config->pipe_src_w;
1433 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1434 unsigned long line_time_us;
1437 line_time_us = max(htotal * 1000 / clock, 1);
1439 /* Use ns/us then divide to preserve precision */
1440 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1442 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1443 srwm = I965_FIFO_SIZE - entries;
1447 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1450 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1451 cpp * crtc->base.cursor->state->crtc_w;
1452 entries = DIV_ROUND_UP(entries,
1453 i965_cursor_wm_info.cacheline_size);
1454 cursor_sr = i965_cursor_wm_info.fifo_size -
1455 (entries + i965_cursor_wm_info.guard_size);
1457 if (cursor_sr > i965_cursor_wm_info.max_wm)
1458 cursor_sr = i965_cursor_wm_info.max_wm;
1460 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1461 "cursor %d\n", srwm, cursor_sr);
1463 cxsr_enabled = true;
1465 cxsr_enabled = false;
1466 /* Turn off self refresh if both pipes are enabled */
1467 intel_set_memory_cxsr(dev_priv, false);
1470 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1473 /* 965 has limitations... */
1474 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1478 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1479 FW_WM(8, PLANEC_OLD));
1480 /* update cursor SR watermark */
1481 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1484 intel_set_memory_cxsr(dev_priv, true);
1489 static void i9xx_update_wm(struct intel_crtc *unused_crtc)
1491 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1492 const struct intel_watermark_params *wm_info;
1497 int planea_wm, planeb_wm;
1498 struct intel_crtc *crtc, *enabled = NULL;
1500 if (IS_I945GM(dev_priv))
1501 wm_info = &i945_wm_info;
1502 else if (!IS_GEN2(dev_priv))
1503 wm_info = &i915_wm_info;
1505 wm_info = &i830_a_wm_info;
1507 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
1508 crtc = intel_get_crtc_for_plane(dev_priv, 0);
1509 if (intel_crtc_active(crtc)) {
1510 const struct drm_display_mode *adjusted_mode =
1511 &crtc->config->base.adjusted_mode;
1512 const struct drm_framebuffer *fb =
1513 crtc->base.primary->state->fb;
1516 if (IS_GEN2(dev_priv))
1519 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1521 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1522 wm_info, fifo_size, cpp,
1523 pessimal_latency_ns);
1526 planea_wm = fifo_size - wm_info->guard_size;
1527 if (planea_wm > (long)wm_info->max_wm)
1528 planea_wm = wm_info->max_wm;
1531 if (IS_GEN2(dev_priv))
1532 wm_info = &i830_bc_wm_info;
1534 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
1535 crtc = intel_get_crtc_for_plane(dev_priv, 1);
1536 if (intel_crtc_active(crtc)) {
1537 const struct drm_display_mode *adjusted_mode =
1538 &crtc->config->base.adjusted_mode;
1539 const struct drm_framebuffer *fb =
1540 crtc->base.primary->state->fb;
1543 if (IS_GEN2(dev_priv))
1546 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1548 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1549 wm_info, fifo_size, cpp,
1550 pessimal_latency_ns);
1551 if (enabled == NULL)
1556 planeb_wm = fifo_size - wm_info->guard_size;
1557 if (planeb_wm > (long)wm_info->max_wm)
1558 planeb_wm = wm_info->max_wm;
1561 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1563 if (IS_I915GM(dev_priv) && enabled) {
1564 struct drm_i915_gem_object *obj;
1566 obj = intel_fb_obj(enabled->base.primary->state->fb);
1568 /* self-refresh seems busted with untiled */
1569 if (!i915_gem_object_is_tiled(obj))
1574 * Overlay gets an aggressive default since video jitter is bad.
1578 /* Play safe and disable self-refresh before adjusting watermarks. */
1579 intel_set_memory_cxsr(dev_priv, false);
1581 /* Calc sr entries for one plane configs */
1582 if (HAS_FW_BLC(dev_priv) && enabled) {
1583 /* self-refresh has much higher latency */
1584 static const int sr_latency_ns = 6000;
1585 const struct drm_display_mode *adjusted_mode =
1586 &enabled->config->base.adjusted_mode;
1587 const struct drm_framebuffer *fb =
1588 enabled->base.primary->state->fb;
1589 int clock = adjusted_mode->crtc_clock;
1590 int htotal = adjusted_mode->crtc_htotal;
1591 int hdisplay = enabled->config->pipe_src_w;
1593 unsigned long line_time_us;
1596 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
1599 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
1601 line_time_us = max(htotal * 1000 / clock, 1);
1603 /* Use ns/us then divide to preserve precision */
1604 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1606 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1607 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1608 srwm = wm_info->fifo_size - entries;
1612 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1613 I915_WRITE(FW_BLC_SELF,
1614 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1616 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1619 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1620 planea_wm, planeb_wm, cwm, srwm);
1622 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1623 fwater_hi = (cwm & 0x1f);
1625 /* Set request length to 8 cachelines per fetch */
1626 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1627 fwater_hi = fwater_hi | (1 << 8);
1629 I915_WRITE(FW_BLC, fwater_lo);
1630 I915_WRITE(FW_BLC2, fwater_hi);
1633 intel_set_memory_cxsr(dev_priv, true);
1636 static void i845_update_wm(struct intel_crtc *unused_crtc)
1638 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
1639 struct intel_crtc *crtc;
1640 const struct drm_display_mode *adjusted_mode;
1644 crtc = single_enabled_crtc(dev_priv);
1648 adjusted_mode = &crtc->config->base.adjusted_mode;
1649 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1651 dev_priv->display.get_fifo_size(dev_priv, 0),
1652 4, pessimal_latency_ns);
1653 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1654 fwater_lo |= (3<<8) | planea_wm;
1656 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1658 I915_WRITE(FW_BLC, fwater_lo);
1661 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1663 uint32_t pixel_rate;
1665 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1667 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1668 * adjust the pixel_rate here. */
1670 if (pipe_config->pch_pfit.enabled) {
1671 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1672 uint32_t pfit_size = pipe_config->pch_pfit.size;
1674 pipe_w = pipe_config->pipe_src_w;
1675 pipe_h = pipe_config->pipe_src_h;
1677 pfit_w = (pfit_size >> 16) & 0xFFFF;
1678 pfit_h = pfit_size & 0xFFFF;
1679 if (pipe_w < pfit_w)
1681 if (pipe_h < pfit_h)
1684 if (WARN_ON(!pfit_w || !pfit_h))
1687 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1694 /* latency must be in 0.1us units. */
1695 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1699 if (WARN(latency == 0, "Latency value missing\n"))
1702 ret = (uint64_t) pixel_rate * cpp * latency;
1703 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1708 /* latency must be in 0.1us units. */
1709 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1710 uint32_t horiz_pixels, uint8_t cpp,
1715 if (WARN(latency == 0, "Latency value missing\n"))
1717 if (WARN_ON(!pipe_htotal))
1720 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1721 ret = (ret + 1) * horiz_pixels * cpp;
1722 ret = DIV_ROUND_UP(ret, 64) + 2;
1726 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1730 * Neither of these should be possible since this function shouldn't be
1731 * called if the CRTC is off or the plane is invisible. But let's be
1732 * extra paranoid to avoid a potential divide-by-zero if we screw up
1733 * elsewhere in the driver.
1737 if (WARN_ON(!horiz_pixels))
1740 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1743 struct ilk_wm_maximums {
1751 * For both WM_PIPE and WM_LP.
1752 * mem_value must be in 0.1us units.
1754 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1755 const struct intel_plane_state *pstate,
1759 int cpp = pstate->base.fb ?
1760 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1761 uint32_t method1, method2;
1763 if (!cstate->base.active || !pstate->base.visible)
1766 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1771 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1772 cstate->base.adjusted_mode.crtc_htotal,
1773 drm_rect_width(&pstate->base.dst),
1776 return min(method1, method2);
1780 * For both WM_PIPE and WM_LP.
1781 * mem_value must be in 0.1us units.
1783 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1784 const struct intel_plane_state *pstate,
1787 int cpp = pstate->base.fb ?
1788 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1789 uint32_t method1, method2;
1791 if (!cstate->base.active || !pstate->base.visible)
1794 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1795 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1796 cstate->base.adjusted_mode.crtc_htotal,
1797 drm_rect_width(&pstate->base.dst),
1799 return min(method1, method2);
1803 * For both WM_PIPE and WM_LP.
1804 * mem_value must be in 0.1us units.
1806 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1807 const struct intel_plane_state *pstate,
1811 * We treat the cursor plane as always-on for the purposes of watermark
1812 * calculation. Until we have two-stage watermark programming merged,
1813 * this is necessary to avoid flickering.
1816 int width = pstate->base.visible ? pstate->base.crtc_w : 64;
1818 if (!cstate->base.active)
1821 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1822 cstate->base.adjusted_mode.crtc_htotal,
1823 width, cpp, mem_value);
1826 /* Only for WM_LP. */
1827 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1828 const struct intel_plane_state *pstate,
1831 int cpp = pstate->base.fb ?
1832 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1834 if (!cstate->base.active || !pstate->base.visible)
1837 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
1841 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
1843 if (INTEL_GEN(dev_priv) >= 8)
1845 else if (INTEL_GEN(dev_priv) >= 7)
1852 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
1853 int level, bool is_sprite)
1855 if (INTEL_GEN(dev_priv) >= 8)
1856 /* BDW primary/sprite plane watermarks */
1857 return level == 0 ? 255 : 2047;
1858 else if (INTEL_GEN(dev_priv) >= 7)
1859 /* IVB/HSW primary/sprite plane watermarks */
1860 return level == 0 ? 127 : 1023;
1861 else if (!is_sprite)
1862 /* ILK/SNB primary plane watermarks */
1863 return level == 0 ? 127 : 511;
1865 /* ILK/SNB sprite plane watermarks */
1866 return level == 0 ? 63 : 255;
1870 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
1872 if (INTEL_GEN(dev_priv) >= 7)
1873 return level == 0 ? 63 : 255;
1875 return level == 0 ? 31 : 63;
1878 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
1880 if (INTEL_GEN(dev_priv) >= 8)
1886 /* Calculate the maximum primary/sprite plane watermark */
1887 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1889 const struct intel_wm_config *config,
1890 enum intel_ddb_partitioning ddb_partitioning,
1893 struct drm_i915_private *dev_priv = to_i915(dev);
1894 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
1896 /* if sprites aren't enabled, sprites get nothing */
1897 if (is_sprite && !config->sprites_enabled)
1900 /* HSW allows LP1+ watermarks even with multiple pipes */
1901 if (level == 0 || config->num_pipes_active > 1) {
1902 fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
1905 * For some reason the non self refresh
1906 * FIFO size is only half of the self
1907 * refresh FIFO size on ILK/SNB.
1909 if (INTEL_GEN(dev_priv) <= 6)
1913 if (config->sprites_enabled) {
1914 /* level 0 is always calculated with 1:1 split */
1915 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1924 /* clamp to max that the registers can hold */
1925 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
1928 /* Calculate the maximum cursor plane watermark */
1929 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1931 const struct intel_wm_config *config)
1933 /* HSW LP1+ watermarks w/ multiple pipes */
1934 if (level > 0 && config->num_pipes_active > 1)
1937 /* otherwise just report max that registers can hold */
1938 return ilk_cursor_wm_reg_max(to_i915(dev), level);
1941 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1943 const struct intel_wm_config *config,
1944 enum intel_ddb_partitioning ddb_partitioning,
1945 struct ilk_wm_maximums *max)
1947 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1948 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1949 max->cur = ilk_cursor_wm_max(dev, level, config);
1950 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
1953 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
1955 struct ilk_wm_maximums *max)
1957 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
1958 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
1959 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
1960 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
1963 static bool ilk_validate_wm_level(int level,
1964 const struct ilk_wm_maximums *max,
1965 struct intel_wm_level *result)
1969 /* already determined to be invalid? */
1970 if (!result->enable)
1973 result->enable = result->pri_val <= max->pri &&
1974 result->spr_val <= max->spr &&
1975 result->cur_val <= max->cur;
1977 ret = result->enable;
1980 * HACK until we can pre-compute everything,
1981 * and thus fail gracefully if LP0 watermarks
1984 if (level == 0 && !result->enable) {
1985 if (result->pri_val > max->pri)
1986 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1987 level, result->pri_val, max->pri);
1988 if (result->spr_val > max->spr)
1989 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1990 level, result->spr_val, max->spr);
1991 if (result->cur_val > max->cur)
1992 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1993 level, result->cur_val, max->cur);
1995 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1996 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1997 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1998 result->enable = true;
2004 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2005 const struct intel_crtc *intel_crtc,
2007 struct intel_crtc_state *cstate,
2008 struct intel_plane_state *pristate,
2009 struct intel_plane_state *sprstate,
2010 struct intel_plane_state *curstate,
2011 struct intel_wm_level *result)
2013 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2014 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2015 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2017 /* WM1+ latency values stored in 0.5us units */
2025 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2026 pri_latency, level);
2027 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2031 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2034 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2036 result->enable = true;
2040 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2042 const struct intel_atomic_state *intel_state =
2043 to_intel_atomic_state(cstate->base.state);
2044 const struct drm_display_mode *adjusted_mode =
2045 &cstate->base.adjusted_mode;
2046 u32 linetime, ips_linetime;
2048 if (!cstate->base.active)
2050 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2052 if (WARN_ON(intel_state->cdclk == 0))
2055 /* The WM are computed with base on how long it takes to fill a single
2056 * row at the given clock rate, multiplied by 8.
2058 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2059 adjusted_mode->crtc_clock);
2060 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2061 intel_state->cdclk);
2063 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2064 PIPE_WM_LINETIME_TIME(linetime);
2067 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
2070 if (IS_GEN9(dev_priv)) {
2073 int level, max_level = ilk_wm_max_level(dev_priv);
2075 /* read the first set of memory latencies[0:3] */
2076 val = 0; /* data0 to be programmed to 0 for first set */
2077 mutex_lock(&dev_priv->rps.hw_lock);
2078 ret = sandybridge_pcode_read(dev_priv,
2079 GEN9_PCODE_READ_MEM_LATENCY,
2081 mutex_unlock(&dev_priv->rps.hw_lock);
2084 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2088 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2089 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2090 GEN9_MEM_LATENCY_LEVEL_MASK;
2091 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2092 GEN9_MEM_LATENCY_LEVEL_MASK;
2093 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2094 GEN9_MEM_LATENCY_LEVEL_MASK;
2096 /* read the second set of memory latencies[4:7] */
2097 val = 1; /* data0 to be programmed to 1 for second set */
2098 mutex_lock(&dev_priv->rps.hw_lock);
2099 ret = sandybridge_pcode_read(dev_priv,
2100 GEN9_PCODE_READ_MEM_LATENCY,
2102 mutex_unlock(&dev_priv->rps.hw_lock);
2104 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2108 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2109 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2110 GEN9_MEM_LATENCY_LEVEL_MASK;
2111 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2112 GEN9_MEM_LATENCY_LEVEL_MASK;
2113 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2114 GEN9_MEM_LATENCY_LEVEL_MASK;
2117 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
2118 * need to be disabled. We make sure to sanitize the values out
2119 * of the punit to satisfy this requirement.
2121 for (level = 1; level <= max_level; level++) {
2122 if (wm[level] == 0) {
2123 for (i = level + 1; i <= max_level; i++)
2130 * WaWmMemoryReadLatency:skl
2132 * punit doesn't take into account the read latency so we need
2133 * to add 2us to the various latency levels we retrieve from the
2134 * punit when level 0 response data us 0us.
2138 for (level = 1; level <= max_level; level++) {
2145 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2146 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2148 wm[0] = (sskpd >> 56) & 0xFF;
2150 wm[0] = sskpd & 0xF;
2151 wm[1] = (sskpd >> 4) & 0xFF;
2152 wm[2] = (sskpd >> 12) & 0xFF;
2153 wm[3] = (sskpd >> 20) & 0x1FF;
2154 wm[4] = (sskpd >> 32) & 0x1FF;
2155 } else if (INTEL_GEN(dev_priv) >= 6) {
2156 uint32_t sskpd = I915_READ(MCH_SSKPD);
2158 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2159 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2160 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2161 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2162 } else if (INTEL_GEN(dev_priv) >= 5) {
2163 uint32_t mltr = I915_READ(MLTR_ILK);
2165 /* ILK primary LP0 latency is 700 ns */
2167 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2168 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2172 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2175 /* ILK sprite LP0 latency is 1300 ns */
2176 if (IS_GEN5(dev_priv))
2180 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2183 /* ILK cursor LP0 latency is 1300 ns */
2184 if (IS_GEN5(dev_priv))
2187 /* WaDoubleCursorLP3Latency:ivb */
2188 if (IS_IVYBRIDGE(dev_priv))
2192 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2194 /* how many WM levels are we expecting */
2195 if (INTEL_GEN(dev_priv) >= 9)
2197 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2199 else if (INTEL_GEN(dev_priv) >= 6)
2205 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2207 const uint16_t wm[8])
2209 int level, max_level = ilk_wm_max_level(dev_priv);
2211 for (level = 0; level <= max_level; level++) {
2212 unsigned int latency = wm[level];
2215 DRM_ERROR("%s WM%d latency not provided\n",
2221 * - latencies are in us on gen9.
2222 * - before then, WM1+ latency values are in 0.5us units
2224 if (IS_GEN9(dev_priv))
2229 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2230 name, level, wm[level],
2231 latency / 10, latency % 10);
2235 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2236 uint16_t wm[5], uint16_t min)
2238 int level, max_level = ilk_wm_max_level(dev_priv);
2243 wm[0] = max(wm[0], min);
2244 for (level = 1; level <= max_level; level++)
2245 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2250 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2255 * The BIOS provided WM memory latency values are often
2256 * inadequate for high resolution displays. Adjust them.
2258 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2259 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2260 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2265 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2266 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2267 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2268 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2271 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2273 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
2275 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2276 sizeof(dev_priv->wm.pri_latency));
2277 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2278 sizeof(dev_priv->wm.pri_latency));
2280 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
2281 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
2283 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
2284 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
2285 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2287 if (IS_GEN6(dev_priv))
2288 snb_wm_latency_quirk(dev_priv);
2291 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
2293 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
2294 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
2297 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2298 struct intel_pipe_wm *pipe_wm)
2300 /* LP0 watermark maximums depend on this pipe alone */
2301 const struct intel_wm_config config = {
2302 .num_pipes_active = 1,
2303 .sprites_enabled = pipe_wm->sprites_enabled,
2304 .sprites_scaled = pipe_wm->sprites_scaled,
2306 struct ilk_wm_maximums max;
2308 /* LP0 watermarks always use 1/2 DDB partitioning */
2309 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2311 /* At least LP0 must be valid */
2312 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2313 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2320 /* Compute new watermarks for the pipe */
2321 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2323 struct drm_atomic_state *state = cstate->base.state;
2324 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2325 struct intel_pipe_wm *pipe_wm;
2326 struct drm_device *dev = state->dev;
2327 const struct drm_i915_private *dev_priv = to_i915(dev);
2328 struct intel_plane *intel_plane;
2329 struct intel_plane_state *pristate = NULL;
2330 struct intel_plane_state *sprstate = NULL;
2331 struct intel_plane_state *curstate = NULL;
2332 int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
2333 struct ilk_wm_maximums max;
2335 pipe_wm = &cstate->wm.ilk.optimal;
2337 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2338 struct intel_plane_state *ps;
2340 ps = intel_atomic_get_existing_plane_state(state,
2345 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2347 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2349 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2353 pipe_wm->pipe_enabled = cstate->base.active;
2355 pipe_wm->sprites_enabled = sprstate->base.visible;
2356 pipe_wm->sprites_scaled = sprstate->base.visible &&
2357 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
2358 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
2361 usable_level = max_level;
2363 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2364 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
2367 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2368 if (pipe_wm->sprites_scaled)
2371 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2372 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2374 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2375 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2377 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2378 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2380 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2383 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
2385 for (level = 1; level <= max_level; level++) {
2386 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2388 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2389 pristate, sprstate, curstate, wm);
2392 * Disable any watermark level that exceeds the
2393 * register maximums since such watermarks are
2396 if (level > usable_level)
2399 if (ilk_validate_wm_level(level, &max, wm))
2400 pipe_wm->wm[level] = *wm;
2402 usable_level = level;
2409 * Build a set of 'intermediate' watermark values that satisfy both the old
2410 * state and the new state. These can be programmed to the hardware
2413 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2414 struct intel_crtc *intel_crtc,
2415 struct intel_crtc_state *newstate)
2417 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2418 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2419 int level, max_level = ilk_wm_max_level(to_i915(dev));
2422 * Start with the final, target watermarks, then combine with the
2423 * currently active watermarks to get values that are safe both before
2424 * and after the vblank.
2426 *a = newstate->wm.ilk.optimal;
2427 a->pipe_enabled |= b->pipe_enabled;
2428 a->sprites_enabled |= b->sprites_enabled;
2429 a->sprites_scaled |= b->sprites_scaled;
2431 for (level = 0; level <= max_level; level++) {
2432 struct intel_wm_level *a_wm = &a->wm[level];
2433 const struct intel_wm_level *b_wm = &b->wm[level];
2435 a_wm->enable &= b_wm->enable;
2436 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2437 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2438 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2439 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2443 * We need to make sure that these merged watermark values are
2444 * actually a valid configuration themselves. If they're not,
2445 * there's no safe way to transition from the old state to
2446 * the new state, so we need to fail the atomic transaction.
2448 if (!ilk_validate_pipe_wm(dev, a))
2452 * If our intermediate WM are identical to the final WM, then we can
2453 * omit the post-vblank programming; only update if it's different.
2455 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2456 newstate->wm.need_postvbl_update = false;
2462 * Merge the watermarks from all active pipes for a specific level.
2464 static void ilk_merge_wm_level(struct drm_device *dev,
2466 struct intel_wm_level *ret_wm)
2468 const struct intel_crtc *intel_crtc;
2470 ret_wm->enable = true;
2472 for_each_intel_crtc(dev, intel_crtc) {
2473 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2474 const struct intel_wm_level *wm = &active->wm[level];
2476 if (!active->pipe_enabled)
2480 * The watermark values may have been used in the past,
2481 * so we must maintain them in the registers for some
2482 * time even if the level is now disabled.
2485 ret_wm->enable = false;
2487 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2488 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2489 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2490 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2495 * Merge all low power watermarks for all active pipes.
2497 static void ilk_wm_merge(struct drm_device *dev,
2498 const struct intel_wm_config *config,
2499 const struct ilk_wm_maximums *max,
2500 struct intel_pipe_wm *merged)
2502 struct drm_i915_private *dev_priv = to_i915(dev);
2503 int level, max_level = ilk_wm_max_level(dev_priv);
2504 int last_enabled_level = max_level;
2506 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2507 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
2508 config->num_pipes_active > 1)
2509 last_enabled_level = 0;
2511 /* ILK: FBC WM must be disabled always */
2512 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
2514 /* merge each WM1+ level */
2515 for (level = 1; level <= max_level; level++) {
2516 struct intel_wm_level *wm = &merged->wm[level];
2518 ilk_merge_wm_level(dev, level, wm);
2520 if (level > last_enabled_level)
2522 else if (!ilk_validate_wm_level(level, max, wm))
2523 /* make sure all following levels get disabled */
2524 last_enabled_level = level - 1;
2527 * The spec says it is preferred to disable
2528 * FBC WMs instead of disabling a WM level.
2530 if (wm->fbc_val > max->fbc) {
2532 merged->fbc_wm_enabled = false;
2537 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2539 * FIXME this is racy. FBC might get enabled later.
2540 * What we should check here is whether FBC can be
2541 * enabled sometime later.
2543 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
2544 intel_fbc_is_active(dev_priv)) {
2545 for (level = 2; level <= max_level; level++) {
2546 struct intel_wm_level *wm = &merged->wm[level];
2553 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2555 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2556 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2559 /* The value we need to program into the WM_LPx latency field */
2560 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2562 struct drm_i915_private *dev_priv = to_i915(dev);
2564 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2567 return dev_priv->wm.pri_latency[level];
2570 static void ilk_compute_wm_results(struct drm_device *dev,
2571 const struct intel_pipe_wm *merged,
2572 enum intel_ddb_partitioning partitioning,
2573 struct ilk_wm_values *results)
2575 struct drm_i915_private *dev_priv = to_i915(dev);
2576 struct intel_crtc *intel_crtc;
2579 results->enable_fbc_wm = merged->fbc_wm_enabled;
2580 results->partitioning = partitioning;
2582 /* LP1+ register values */
2583 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2584 const struct intel_wm_level *r;
2586 level = ilk_wm_lp_to_level(wm_lp, merged);
2588 r = &merged->wm[level];
2591 * Maintain the watermark values even if the level is
2592 * disabled. Doing otherwise could cause underruns.
2594 results->wm_lp[wm_lp - 1] =
2595 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2596 (r->pri_val << WM1_LP_SR_SHIFT) |
2600 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2602 if (INTEL_GEN(dev_priv) >= 8)
2603 results->wm_lp[wm_lp - 1] |=
2604 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2606 results->wm_lp[wm_lp - 1] |=
2607 r->fbc_val << WM1_LP_FBC_SHIFT;
2610 * Always set WM1S_LP_EN when spr_val != 0, even if the
2611 * level is disabled. Doing otherwise could cause underruns.
2613 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
2614 WARN_ON(wm_lp != 1);
2615 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2617 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2620 /* LP0 register values */
2621 for_each_intel_crtc(dev, intel_crtc) {
2622 enum pipe pipe = intel_crtc->pipe;
2623 const struct intel_wm_level *r =
2624 &intel_crtc->wm.active.ilk.wm[0];
2626 if (WARN_ON(!r->enable))
2629 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2631 results->wm_pipe[pipe] =
2632 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2633 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2638 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2639 * case both are at the same level. Prefer r1 in case they're the same. */
2640 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2641 struct intel_pipe_wm *r1,
2642 struct intel_pipe_wm *r2)
2644 int level, max_level = ilk_wm_max_level(to_i915(dev));
2645 int level1 = 0, level2 = 0;
2647 for (level = 1; level <= max_level; level++) {
2648 if (r1->wm[level].enable)
2650 if (r2->wm[level].enable)
2654 if (level1 == level2) {
2655 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2659 } else if (level1 > level2) {
2666 /* dirty bits used to track which watermarks need changes */
2667 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2668 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2669 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2670 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2671 #define WM_DIRTY_FBC (1 << 24)
2672 #define WM_DIRTY_DDB (1 << 25)
2674 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2675 const struct ilk_wm_values *old,
2676 const struct ilk_wm_values *new)
2678 unsigned int dirty = 0;
2682 for_each_pipe(dev_priv, pipe) {
2683 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2684 dirty |= WM_DIRTY_LINETIME(pipe);
2685 /* Must disable LP1+ watermarks too */
2686 dirty |= WM_DIRTY_LP_ALL;
2689 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2690 dirty |= WM_DIRTY_PIPE(pipe);
2691 /* Must disable LP1+ watermarks too */
2692 dirty |= WM_DIRTY_LP_ALL;
2696 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2697 dirty |= WM_DIRTY_FBC;
2698 /* Must disable LP1+ watermarks too */
2699 dirty |= WM_DIRTY_LP_ALL;
2702 if (old->partitioning != new->partitioning) {
2703 dirty |= WM_DIRTY_DDB;
2704 /* Must disable LP1+ watermarks too */
2705 dirty |= WM_DIRTY_LP_ALL;
2708 /* LP1+ watermarks already deemed dirty, no need to continue */
2709 if (dirty & WM_DIRTY_LP_ALL)
2712 /* Find the lowest numbered LP1+ watermark in need of an update... */
2713 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2714 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2715 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2719 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2720 for (; wm_lp <= 3; wm_lp++)
2721 dirty |= WM_DIRTY_LP(wm_lp);
2726 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2729 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2730 bool changed = false;
2732 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2733 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2734 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2737 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2738 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2739 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2742 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2743 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2744 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2749 * Don't touch WM1S_LP_EN here.
2750 * Doing so could cause underruns.
2757 * The spec says we shouldn't write when we don't need, because every write
2758 * causes WMs to be re-evaluated, expending some power.
2760 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2761 struct ilk_wm_values *results)
2763 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2767 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2771 _ilk_disable_lp_wm(dev_priv, dirty);
2773 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2774 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2775 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2776 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2777 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2778 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2780 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2781 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2782 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2783 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2784 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2785 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2787 if (dirty & WM_DIRTY_DDB) {
2788 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2789 val = I915_READ(WM_MISC);
2790 if (results->partitioning == INTEL_DDB_PART_1_2)
2791 val &= ~WM_MISC_DATA_PARTITION_5_6;
2793 val |= WM_MISC_DATA_PARTITION_5_6;
2794 I915_WRITE(WM_MISC, val);
2796 val = I915_READ(DISP_ARB_CTL2);
2797 if (results->partitioning == INTEL_DDB_PART_1_2)
2798 val &= ~DISP_DATA_PARTITION_5_6;
2800 val |= DISP_DATA_PARTITION_5_6;
2801 I915_WRITE(DISP_ARB_CTL2, val);
2805 if (dirty & WM_DIRTY_FBC) {
2806 val = I915_READ(DISP_ARB_CTL);
2807 if (results->enable_fbc_wm)
2808 val &= ~DISP_FBC_WM_DIS;
2810 val |= DISP_FBC_WM_DIS;
2811 I915_WRITE(DISP_ARB_CTL, val);
2814 if (dirty & WM_DIRTY_LP(1) &&
2815 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2816 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2818 if (INTEL_GEN(dev_priv) >= 7) {
2819 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2820 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2821 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2822 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2825 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2826 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2827 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2828 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2829 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2830 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2832 dev_priv->wm.hw = *results;
2835 bool ilk_disable_lp_wm(struct drm_device *dev)
2837 struct drm_i915_private *dev_priv = to_i915(dev);
2839 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2842 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2845 * FIXME: We still don't have the proper code detect if we need to apply the WA,
2846 * so assume we'll always need it in order to avoid underruns.
2848 static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
2850 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2852 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
2853 IS_KABYLAKE(dev_priv))
2860 intel_has_sagv(struct drm_i915_private *dev_priv)
2862 if (IS_KABYLAKE(dev_priv))
2865 if (IS_SKYLAKE(dev_priv) &&
2866 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
2873 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2874 * depending on power and performance requirements. The display engine access
2875 * to system memory is blocked during the adjustment time. Because of the
2876 * blocking time, having this enabled can cause full system hangs and/or pipe
2877 * underruns if we don't meet all of the following requirements:
2879 * - <= 1 pipe enabled
2880 * - All planes can enable watermarks for latencies >= SAGV engine block time
2881 * - We're not using an interlaced display configuration
2884 intel_enable_sagv(struct drm_i915_private *dev_priv)
2888 if (!intel_has_sagv(dev_priv))
2891 if (dev_priv->sagv_status == I915_SAGV_ENABLED)
2894 DRM_DEBUG_KMS("Enabling the SAGV\n");
2895 mutex_lock(&dev_priv->rps.hw_lock);
2897 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2900 /* We don't need to wait for the SAGV when enabling */
2901 mutex_unlock(&dev_priv->rps.hw_lock);
2904 * Some skl systems, pre-release machines in particular,
2905 * don't actually have an SAGV.
2907 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
2908 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2909 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2911 } else if (ret < 0) {
2912 DRM_ERROR("Failed to enable the SAGV\n");
2916 dev_priv->sagv_status = I915_SAGV_ENABLED;
2921 intel_do_sagv_disable(struct drm_i915_private *dev_priv)
2924 uint32_t temp = GEN9_SAGV_DISABLE;
2926 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2931 return temp & GEN9_SAGV_IS_DISABLED;
2935 intel_disable_sagv(struct drm_i915_private *dev_priv)
2939 if (!intel_has_sagv(dev_priv))
2942 if (dev_priv->sagv_status == I915_SAGV_DISABLED)
2945 DRM_DEBUG_KMS("Disabling the SAGV\n");
2946 mutex_lock(&dev_priv->rps.hw_lock);
2948 /* bspec says to keep retrying for at least 1 ms */
2949 ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
2950 mutex_unlock(&dev_priv->rps.hw_lock);
2952 if (ret == -ETIMEDOUT) {
2953 DRM_ERROR("Request to disable SAGV timed out\n");
2958 * Some skl systems, pre-release machines in particular,
2959 * don't actually have an SAGV.
2961 if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
2962 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2963 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2965 } else if (result < 0) {
2966 DRM_ERROR("Failed to disable the SAGV\n");
2970 dev_priv->sagv_status = I915_SAGV_DISABLED;
2974 bool intel_can_enable_sagv(struct drm_atomic_state *state)
2976 struct drm_device *dev = state->dev;
2977 struct drm_i915_private *dev_priv = to_i915(dev);
2978 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2979 struct intel_crtc *crtc;
2980 struct intel_plane *plane;
2981 struct intel_crtc_state *cstate;
2985 if (!intel_has_sagv(dev_priv))
2989 * SKL workaround: bspec recommends we disable the SAGV when we have
2990 * more then one pipe enabled
2992 * If there are no active CRTCs, no additional checks need be performed
2994 if (hweight32(intel_state->active_crtcs) == 0)
2996 else if (hweight32(intel_state->active_crtcs) > 1)
2999 /* Since we're now guaranteed to only have one active CRTC... */
3000 pipe = ffs(intel_state->active_crtcs) - 1;
3001 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3002 cstate = to_intel_crtc_state(crtc->base.state);
3004 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3007 for_each_intel_plane_on_crtc(dev, crtc, plane) {
3008 struct skl_plane_wm *wm =
3009 &cstate->wm.skl.optimal.planes[plane->id];
3011 /* Skip this plane if it's not enabled */
3012 if (!wm->wm[0].plane_en)
3015 /* Find the highest enabled wm level for this plane */
3016 for (level = ilk_wm_max_level(dev_priv);
3017 !wm->wm[level].plane_en; --level)
3020 latency = dev_priv->wm.skl_latency[level];
3022 if (skl_needs_memory_bw_wa(intel_state) &&
3023 plane->base.state->fb->modifier ==
3024 I915_FORMAT_MOD_X_TILED)
3028 * If any of the planes on this pipe don't enable wm levels
3029 * that incur memory latencies higher then 30µs we can't enable
3032 if (latency < SKL_SAGV_BLOCK_TIME)
3040 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3041 const struct intel_crtc_state *cstate,
3042 struct skl_ddb_entry *alloc, /* out */
3043 int *num_active /* out */)
3045 struct drm_atomic_state *state = cstate->base.state;
3046 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3047 struct drm_i915_private *dev_priv = to_i915(dev);
3048 struct drm_crtc *for_crtc = cstate->base.crtc;
3049 unsigned int pipe_size, ddb_size;
3050 int nth_active_pipe;
3052 if (WARN_ON(!state) || !cstate->base.active) {
3055 *num_active = hweight32(dev_priv->active_crtcs);
3059 if (intel_state->active_pipe_changes)
3060 *num_active = hweight32(intel_state->active_crtcs);
3062 *num_active = hweight32(dev_priv->active_crtcs);
3064 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
3065 WARN_ON(ddb_size == 0);
3067 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3070 * If the state doesn't change the active CRTC's, then there's
3071 * no need to recalculate; the existing pipe allocation limits
3072 * should remain unchanged. Note that we're safe from racing
3073 * commits since any racing commit that changes the active CRTC
3074 * list would need to grab _all_ crtc locks, including the one
3075 * we currently hold.
3077 if (!intel_state->active_pipe_changes) {
3079 * alloc may be cleared by clear_intel_crtc_state,
3080 * copy from old state to be sure
3082 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3086 nth_active_pipe = hweight32(intel_state->active_crtcs &
3087 (drm_crtc_mask(for_crtc) - 1));
3088 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
3089 alloc->start = nth_active_pipe * ddb_size / *num_active;
3090 alloc->end = alloc->start + pipe_size;
3093 static unsigned int skl_cursor_allocation(int num_active)
3095 if (num_active == 1)
3101 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3103 entry->start = reg & 0x3ff;
3104 entry->end = (reg >> 16) & 0x3ff;
3109 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3110 struct skl_ddb_allocation *ddb /* out */)
3112 struct intel_crtc *crtc;
3114 memset(ddb, 0, sizeof(*ddb));
3116 for_each_intel_crtc(&dev_priv->drm, crtc) {
3117 enum intel_display_power_domain power_domain;
3118 enum plane_id plane_id;
3119 enum pipe pipe = crtc->pipe;
3121 power_domain = POWER_DOMAIN_PIPE(pipe);
3122 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3125 for_each_plane_id_on_crtc(crtc, plane_id) {
3128 if (plane_id != PLANE_CURSOR)
3129 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3131 val = I915_READ(CUR_BUF_CFG(pipe));
3133 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
3136 intel_display_power_put(dev_priv, power_domain);
3141 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3142 * The bspec defines downscale amount as:
3145 * Horizontal down scale amount = maximum[1, Horizontal source size /
3146 * Horizontal destination size]
3147 * Vertical down scale amount = maximum[1, Vertical source size /
3148 * Vertical destination size]
3149 * Total down scale amount = Horizontal down scale amount *
3150 * Vertical down scale amount
3153 * Return value is provided in 16.16 fixed point form to retain fractional part.
3154 * Caller should take care of dividing & rounding off the value.
3157 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
3159 uint32_t downscale_h, downscale_w;
3160 uint32_t src_w, src_h, dst_w, dst_h;
3162 if (WARN_ON(!pstate->base.visible))
3163 return DRM_PLANE_HELPER_NO_SCALING;
3165 /* n.b., src is 16.16 fixed point, dst is whole integer */
3166 src_w = drm_rect_width(&pstate->base.src);
3167 src_h = drm_rect_height(&pstate->base.src);
3168 dst_w = drm_rect_width(&pstate->base.dst);
3169 dst_h = drm_rect_height(&pstate->base.dst);
3170 if (drm_rotation_90_or_270(pstate->base.rotation))
3173 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3174 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3176 /* Provide result in 16.16 fixed point */
3177 return (uint64_t)downscale_w * downscale_h >> 16;
3181 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3182 const struct drm_plane_state *pstate,
3185 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3186 struct drm_framebuffer *fb = pstate->fb;
3187 uint32_t down_scale_amount, data_rate;
3188 uint32_t width = 0, height = 0;
3189 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3191 if (!intel_pstate->base.visible)
3193 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3195 if (y && format != DRM_FORMAT_NV12)
3198 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3199 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3201 if (drm_rotation_90_or_270(pstate->rotation))
3202 swap(width, height);
3204 /* for planar format */
3205 if (format == DRM_FORMAT_NV12) {
3206 if (y) /* y-plane data rate */
3207 data_rate = width * height *
3208 drm_format_plane_cpp(format, 0);
3209 else /* uv-plane data rate */
3210 data_rate = (width / 2) * (height / 2) *
3211 drm_format_plane_cpp(format, 1);
3213 /* for packed formats */
3214 data_rate = width * height * drm_format_plane_cpp(format, 0);
3217 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3219 return (uint64_t)data_rate * down_scale_amount >> 16;
3223 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3224 * a 8192x4096@32bpp framebuffer:
3225 * 3 * 4096 * 8192 * 4 < 2^32
3228 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
3229 unsigned *plane_data_rate,
3230 unsigned *plane_y_data_rate)
3232 struct drm_crtc_state *cstate = &intel_cstate->base;
3233 struct drm_atomic_state *state = cstate->state;
3234 struct drm_plane *plane;
3235 const struct drm_plane_state *pstate;
3236 unsigned int total_data_rate = 0;
3238 if (WARN_ON(!state))
3241 /* Calculate and cache data rate for each plane */
3242 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
3243 enum plane_id plane_id = to_intel_plane(plane)->id;
3247 rate = skl_plane_relative_data_rate(intel_cstate,
3249 plane_data_rate[plane_id] = rate;
3251 total_data_rate += rate;
3254 rate = skl_plane_relative_data_rate(intel_cstate,
3256 plane_y_data_rate[plane_id] = rate;
3258 total_data_rate += rate;
3261 return total_data_rate;
3265 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3268 struct drm_framebuffer *fb = pstate->fb;
3269 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3270 uint32_t src_w, src_h;
3271 uint32_t min_scanlines = 8;
3277 /* For packed formats, no y-plane, return 0 */
3278 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3281 /* For Non Y-tile return 8-blocks */
3282 if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
3283 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3286 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3287 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3289 if (drm_rotation_90_or_270(pstate->rotation))
3292 /* Halve UV plane width and height for NV12 */
3293 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3298 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3299 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3301 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3303 if (drm_rotation_90_or_270(pstate->rotation)) {
3304 switch (plane_bpp) {
3318 WARN(1, "Unsupported pixel depth %u for rotation",
3324 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3328 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
3329 uint16_t *minimum, uint16_t *y_minimum)
3331 const struct drm_plane_state *pstate;
3332 struct drm_plane *plane;
3334 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
3335 enum plane_id plane_id = to_intel_plane(plane)->id;
3337 if (plane_id == PLANE_CURSOR)
3340 if (!pstate->visible)
3343 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
3344 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
3347 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
3351 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3352 struct skl_ddb_allocation *ddb /* out */)
3354 struct drm_atomic_state *state = cstate->base.state;
3355 struct drm_crtc *crtc = cstate->base.crtc;
3356 struct drm_device *dev = crtc->dev;
3357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3358 enum pipe pipe = intel_crtc->pipe;
3359 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
3360 uint16_t alloc_size, start;
3361 uint16_t minimum[I915_MAX_PLANES] = {};
3362 uint16_t y_minimum[I915_MAX_PLANES] = {};
3363 unsigned int total_data_rate;
3364 enum plane_id plane_id;
3366 unsigned plane_data_rate[I915_MAX_PLANES] = {};
3367 unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
3369 /* Clear the partitioning for disabled planes. */
3370 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3371 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3373 if (WARN_ON(!state))
3376 if (!cstate->base.active) {
3377 alloc->start = alloc->end = 0;
3381 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3382 alloc_size = skl_ddb_entry_size(alloc);
3383 if (alloc_size == 0) {
3384 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3388 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
3391 * 1. Allocate the mininum required blocks for each active plane
3392 * and allocate the cursor, it doesn't require extra allocation
3393 * proportional to the data rate.
3396 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3397 alloc_size -= minimum[plane_id];
3398 alloc_size -= y_minimum[plane_id];
3401 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
3402 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3405 * 2. Distribute the remaining space in proportion to the amount of
3406 * data each plane needs to fetch from memory.
3408 * FIXME: we may not allocate every single block here.
3410 total_data_rate = skl_get_total_relative_data_rate(cstate,
3413 if (total_data_rate == 0)
3416 start = alloc->start;
3417 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
3418 unsigned int data_rate, y_data_rate;
3419 uint16_t plane_blocks, y_plane_blocks = 0;
3421 if (plane_id == PLANE_CURSOR)
3424 data_rate = plane_data_rate[plane_id];
3427 * allocation for (packed formats) or (uv-plane part of planar format):
3428 * promote the expression to 64 bits to avoid overflowing, the
3429 * result is < available as data_rate / total_data_rate < 1
3431 plane_blocks = minimum[plane_id];
3432 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3435 /* Leave disabled planes at (0,0) */
3437 ddb->plane[pipe][plane_id].start = start;
3438 ddb->plane[pipe][plane_id].end = start + plane_blocks;
3441 start += plane_blocks;
3444 * allocation for y_plane part of planar format:
3446 y_data_rate = plane_y_data_rate[plane_id];
3448 y_plane_blocks = y_minimum[plane_id];
3449 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3453 ddb->y_plane[pipe][plane_id].start = start;
3454 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
3457 start += y_plane_blocks;
3464 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3465 * for the read latency) and cpp should always be <= 8, so that
3466 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3467 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3469 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3471 uint32_t wm_intermediate_val, ret;
3476 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3477 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3482 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3483 uint32_t latency, uint32_t plane_blocks_per_line)
3486 uint32_t wm_intermediate_val;
3491 wm_intermediate_val = latency * pixel_rate;
3492 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3493 plane_blocks_per_line;
3498 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3499 struct intel_plane_state *pstate)
3501 uint64_t adjusted_pixel_rate;
3502 uint64_t downscale_amount;
3503 uint64_t pixel_rate;
3505 /* Shouldn't reach here on disabled planes... */
3506 if (WARN_ON(!pstate->base.visible))
3510 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3511 * with additional adjustments for plane-specific scaling.
3513 adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
3514 downscale_amount = skl_plane_downscale_amount(pstate);
3516 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3517 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3522 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3523 struct intel_crtc_state *cstate,
3524 struct intel_plane_state *intel_pstate,
3525 uint16_t ddb_allocation,
3527 uint16_t *out_blocks, /* out */
3528 uint8_t *out_lines, /* out */
3529 bool *enabled /* out */)
3531 struct drm_plane_state *pstate = &intel_pstate->base;
3532 struct drm_framebuffer *fb = pstate->fb;
3533 uint32_t latency = dev_priv->wm.skl_latency[level];
3534 uint32_t method1, method2;
3535 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3536 uint32_t res_blocks, res_lines;
3537 uint32_t selected_result;
3539 uint32_t width = 0, height = 0;
3540 uint32_t plane_pixel_rate;
3541 uint32_t y_tile_minimum, y_min_scanlines;
3542 struct intel_atomic_state *state =
3543 to_intel_atomic_state(cstate->base.state);
3544 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3546 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
3551 if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
3554 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3555 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3557 if (drm_rotation_90_or_270(pstate->rotation))
3558 swap(width, height);
3560 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3561 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3563 if (drm_rotation_90_or_270(pstate->rotation)) {
3564 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3565 drm_format_plane_cpp(fb->pixel_format, 1) :
3566 drm_format_plane_cpp(fb->pixel_format, 0);
3570 y_min_scanlines = 16;
3573 y_min_scanlines = 8;
3576 y_min_scanlines = 4;
3583 y_min_scanlines = 4;
3586 if (apply_memory_bw_wa)
3587 y_min_scanlines *= 2;
3589 plane_bytes_per_line = width * cpp;
3590 if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
3591 fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
3592 plane_blocks_per_line =
3593 DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
3594 plane_blocks_per_line /= y_min_scanlines;
3595 } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
3596 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
3599 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3602 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3603 method2 = skl_wm_method2(plane_pixel_rate,
3604 cstate->base.adjusted_mode.crtc_htotal,
3606 plane_blocks_per_line);
3608 y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
3610 if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
3611 fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
3612 selected_result = max(method2, y_tile_minimum);
3614 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
3615 (plane_bytes_per_line / 512 < 1))
3616 selected_result = method2;
3617 else if ((ddb_allocation / plane_blocks_per_line) >= 1)
3618 selected_result = min(method1, method2);
3620 selected_result = method1;
3623 res_blocks = selected_result + 1;
3624 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3626 if (level >= 1 && level <= 7) {
3627 if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
3628 fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
3629 res_blocks += y_tile_minimum;
3630 res_lines += y_min_scanlines;
3636 if (res_blocks >= ddb_allocation || res_lines > 31) {
3640 * If there are no valid level 0 watermarks, then we can't
3641 * support this display configuration.
3646 struct drm_plane *plane = pstate->plane;
3648 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3649 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
3650 plane->base.id, plane->name,
3651 res_blocks, ddb_allocation, res_lines);
3656 *out_blocks = res_blocks;
3657 *out_lines = res_lines;
3664 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3665 struct skl_ddb_allocation *ddb,
3666 struct intel_crtc_state *cstate,
3667 struct intel_plane *intel_plane,
3669 struct skl_wm_level *result)
3671 struct drm_atomic_state *state = cstate->base.state;
3672 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3673 struct drm_plane *plane = &intel_plane->base;
3674 struct intel_plane_state *intel_pstate = NULL;
3675 uint16_t ddb_blocks;
3676 enum pipe pipe = intel_crtc->pipe;
3681 intel_atomic_get_existing_plane_state(state,
3685 * Note: If we start supporting multiple pending atomic commits against
3686 * the same planes/CRTC's in the future, plane->state will no longer be
3687 * the correct pre-state to use for the calculations here and we'll
3688 * need to change where we get the 'unchanged' plane data from.
3690 * For now this is fine because we only allow one queued commit against
3691 * a CRTC. Even if the plane isn't modified by this transaction and we
3692 * don't have a plane lock, we still have the CRTC's lock, so we know
3693 * that no other transactions are racing with us to update it.
3696 intel_pstate = to_intel_plane_state(plane->state);
3698 WARN_ON(!intel_pstate->base.fb);
3700 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
3702 ret = skl_compute_plane_wm(dev_priv,
3707 &result->plane_res_b,
3708 &result->plane_res_l,
3717 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3719 uint32_t pixel_rate;
3721 if (!cstate->base.active)
3724 pixel_rate = ilk_pipe_pixel_rate(cstate);
3726 if (WARN_ON(pixel_rate == 0))
3729 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3733 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3734 struct skl_wm_level *trans_wm /* out */)
3736 if (!cstate->base.active)
3739 /* Until we know more, just disable transition WMs */
3740 trans_wm->plane_en = false;
3743 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3744 struct skl_ddb_allocation *ddb,
3745 struct skl_pipe_wm *pipe_wm)
3747 struct drm_device *dev = cstate->base.crtc->dev;
3748 const struct drm_i915_private *dev_priv = to_i915(dev);
3749 struct intel_plane *intel_plane;
3750 struct skl_plane_wm *wm;
3751 int level, max_level = ilk_wm_max_level(dev_priv);
3755 * We'll only calculate watermarks for planes that are actually
3756 * enabled, so make sure all other planes are set as disabled.
3758 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
3760 for_each_intel_plane_mask(&dev_priv->drm,
3762 cstate->base.plane_mask) {
3763 wm = &pipe_wm->planes[intel_plane->id];
3765 for (level = 0; level <= max_level; level++) {
3766 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3772 skl_compute_transition_wm(cstate, &wm->trans_wm);
3774 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3779 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3781 const struct skl_ddb_entry *entry)
3784 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3789 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
3791 const struct skl_wm_level *level)
3795 if (level->plane_en) {
3797 val |= level->plane_res_b;
3798 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
3801 I915_WRITE(reg, val);
3804 static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
3805 const struct skl_plane_wm *wm,
3806 const struct skl_ddb_allocation *ddb,
3807 enum plane_id plane_id)
3809 struct drm_crtc *crtc = &intel_crtc->base;
3810 struct drm_device *dev = crtc->dev;
3811 struct drm_i915_private *dev_priv = to_i915(dev);
3812 int level, max_level = ilk_wm_max_level(dev_priv);
3813 enum pipe pipe = intel_crtc->pipe;
3815 for (level = 0; level <= max_level; level++) {
3816 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
3819 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
3822 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
3823 &ddb->plane[pipe][plane_id]);
3824 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
3825 &ddb->y_plane[pipe][plane_id]);
3828 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
3829 const struct skl_plane_wm *wm,
3830 const struct skl_ddb_allocation *ddb)
3832 struct drm_crtc *crtc = &intel_crtc->base;
3833 struct drm_device *dev = crtc->dev;
3834 struct drm_i915_private *dev_priv = to_i915(dev);
3835 int level, max_level = ilk_wm_max_level(dev_priv);
3836 enum pipe pipe = intel_crtc->pipe;
3838 for (level = 0; level <= max_level; level++) {
3839 skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
3842 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
3844 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3845 &ddb->plane[pipe][PLANE_CURSOR]);
3848 bool skl_wm_level_equals(const struct skl_wm_level *l1,
3849 const struct skl_wm_level *l2)
3851 if (l1->plane_en != l2->plane_en)
3854 /* If both planes aren't enabled, the rest shouldn't matter */
3858 return (l1->plane_res_l == l2->plane_res_l &&
3859 l1->plane_res_b == l2->plane_res_b);
3862 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
3863 const struct skl_ddb_entry *b)
3865 return a->start < b->end && b->start < a->end;
3868 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
3869 const struct skl_ddb_entry *ddb,
3874 for (i = 0; i < I915_MAX_PIPES; i++)
3875 if (i != ignore && entries[i] &&
3876 skl_ddb_entries_overlap(ddb, entries[i]))
3882 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3883 const struct skl_pipe_wm *old_pipe_wm,
3884 struct skl_pipe_wm *pipe_wm, /* out */
3885 struct skl_ddb_allocation *ddb, /* out */
3886 bool *changed /* out */)
3888 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3891 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3895 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
3904 pipes_modified(struct drm_atomic_state *state)
3906 struct drm_crtc *crtc;
3907 struct drm_crtc_state *cstate;
3908 uint32_t i, ret = 0;
3910 for_each_crtc_in_state(state, crtc, cstate, i)
3911 ret |= drm_crtc_mask(crtc);
3917 skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
3919 struct drm_atomic_state *state = cstate->base.state;
3920 struct drm_device *dev = state->dev;
3921 struct drm_crtc *crtc = cstate->base.crtc;
3922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3923 struct drm_i915_private *dev_priv = to_i915(dev);
3924 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3925 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
3926 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3927 struct drm_plane_state *plane_state;
3928 struct drm_plane *plane;
3929 enum pipe pipe = intel_crtc->pipe;
3931 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
3933 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
3934 enum plane_id plane_id = to_intel_plane(plane)->id;
3936 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
3937 &new_ddb->plane[pipe][plane_id]) &&
3938 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
3939 &new_ddb->y_plane[pipe][plane_id]))
3942 plane_state = drm_atomic_get_plane_state(state, plane);
3943 if (IS_ERR(plane_state))
3944 return PTR_ERR(plane_state);
3951 skl_compute_ddb(struct drm_atomic_state *state)
3953 struct drm_device *dev = state->dev;
3954 struct drm_i915_private *dev_priv = to_i915(dev);
3955 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3956 struct intel_crtc *intel_crtc;
3957 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3958 uint32_t realloc_pipes = pipes_modified(state);
3962 * If this is our first atomic update following hardware readout,
3963 * we can't trust the DDB that the BIOS programmed for us. Let's
3964 * pretend that all pipes switched active status so that we'll
3965 * ensure a full DDB recompute.
3967 if (dev_priv->wm.distrust_bios_wm) {
3968 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
3969 state->acquire_ctx);
3973 intel_state->active_pipe_changes = ~0;
3976 * We usually only initialize intel_state->active_crtcs if we
3977 * we're doing a modeset; make sure this field is always
3978 * initialized during the sanitization process that happens
3979 * on the first commit too.
3981 if (!intel_state->modeset)
3982 intel_state->active_crtcs = dev_priv->active_crtcs;
3986 * If the modeset changes which CRTC's are active, we need to
3987 * recompute the DDB allocation for *all* active pipes, even
3988 * those that weren't otherwise being modified in any way by this
3989 * atomic commit. Due to the shrinking of the per-pipe allocations
3990 * when new active CRTC's are added, it's possible for a pipe that
3991 * we were already using and aren't changing at all here to suddenly
3992 * become invalid if its DDB needs exceeds its new allocation.
3994 * Note that if we wind up doing a full DDB recompute, we can't let
3995 * any other display updates race with this transaction, so we need
3996 * to grab the lock on *all* CRTC's.
3998 if (intel_state->active_pipe_changes) {
4000 intel_state->wm_results.dirty_pipes = ~0;
4004 * We're not recomputing for the pipes not included in the commit, so
4005 * make sure we start with the current state.
4007 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
4009 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
4010 struct intel_crtc_state *cstate;
4012 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
4014 return PTR_ERR(cstate);
4016 ret = skl_allocate_pipe_ddb(cstate, ddb);
4020 ret = skl_ddb_add_affected_planes(cstate);
4029 skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4030 struct skl_wm_values *src,
4033 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4034 sizeof(dst->ddb.y_plane[pipe]));
4035 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4036 sizeof(dst->ddb.plane[pipe]));
4040 skl_print_wm_changes(const struct drm_atomic_state *state)
4042 const struct drm_device *dev = state->dev;
4043 const struct drm_i915_private *dev_priv = to_i915(dev);
4044 const struct intel_atomic_state *intel_state =
4045 to_intel_atomic_state(state);
4046 const struct drm_crtc *crtc;
4047 const struct drm_crtc_state *cstate;
4048 const struct intel_plane *intel_plane;
4049 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
4050 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
4053 for_each_crtc_in_state(state, crtc, cstate, i) {
4054 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4055 enum pipe pipe = intel_crtc->pipe;
4057 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4058 enum plane_id plane_id = intel_plane->id;
4059 const struct skl_ddb_entry *old, *new;
4061 old = &old_ddb->plane[pipe][plane_id];
4062 new = &new_ddb->plane[pipe][plane_id];
4064 if (skl_ddb_entry_equal(old, new))
4067 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
4068 intel_plane->base.base.id,
4069 intel_plane->base.name,
4070 old->start, old->end,
4071 new->start, new->end);
4077 skl_compute_wm(struct drm_atomic_state *state)
4079 struct drm_crtc *crtc;
4080 struct drm_crtc_state *cstate;
4081 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4082 struct skl_wm_values *results = &intel_state->wm_results;
4083 struct skl_pipe_wm *pipe_wm;
4084 bool changed = false;
4088 * If this transaction isn't actually touching any CRTC's, don't
4089 * bother with watermark calculation. Note that if we pass this
4090 * test, we're guaranteed to hold at least one CRTC state mutex,
4091 * which means we can safely use values like dev_priv->active_crtcs
4092 * since any racing commits that want to update them would need to
4093 * hold _all_ CRTC state mutexes.
4095 for_each_crtc_in_state(state, crtc, cstate, i)
4100 /* Clear all dirty flags */
4101 results->dirty_pipes = 0;
4103 ret = skl_compute_ddb(state);
4108 * Calculate WM's for all pipes that are part of this transaction.
4109 * Note that the DDB allocation above may have added more CRTC's that
4110 * weren't otherwise being modified (and set bits in dirty_pipes) if
4111 * pipe allocations had to change.
4113 * FIXME: Now that we're doing this in the atomic check phase, we
4114 * should allow skl_update_pipe_wm() to return failure in cases where
4115 * no suitable watermark values can be found.
4117 for_each_crtc_in_state(state, crtc, cstate, i) {
4118 struct intel_crtc_state *intel_cstate =
4119 to_intel_crtc_state(cstate);
4120 const struct skl_pipe_wm *old_pipe_wm =
4121 &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
4123 pipe_wm = &intel_cstate->wm.skl.optimal;
4124 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
4125 &results->ddb, &changed);
4130 results->dirty_pipes |= drm_crtc_mask(crtc);
4132 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4133 /* This pipe's WM's did not change */
4136 intel_cstate->update_wm_pre = true;
4139 skl_print_wm_changes(state);
4144 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
4145 struct intel_crtc_state *cstate)
4147 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
4148 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4149 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4150 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
4151 enum pipe pipe = crtc->pipe;
4152 enum plane_id plane_id;
4154 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
4157 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
4159 for_each_plane_id_on_crtc(crtc, plane_id) {
4160 if (plane_id != PLANE_CURSOR)
4161 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
4164 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
4169 static void skl_initial_wm(struct intel_atomic_state *state,
4170 struct intel_crtc_state *cstate)
4172 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4173 struct drm_device *dev = intel_crtc->base.dev;
4174 struct drm_i915_private *dev_priv = to_i915(dev);
4175 struct skl_wm_values *results = &state->wm_results;
4176 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4177 enum pipe pipe = intel_crtc->pipe;
4179 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
4182 mutex_lock(&dev_priv->wm.wm_mutex);
4184 if (cstate->base.active_changed)
4185 skl_atomic_update_crtc_wm(state, cstate);
4187 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4189 mutex_unlock(&dev_priv->wm.wm_mutex);
4192 static void ilk_compute_wm_config(struct drm_device *dev,
4193 struct intel_wm_config *config)
4195 struct intel_crtc *crtc;
4197 /* Compute the currently _active_ config */
4198 for_each_intel_crtc(dev, crtc) {
4199 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4201 if (!wm->pipe_enabled)
4204 config->sprites_enabled |= wm->sprites_enabled;
4205 config->sprites_scaled |= wm->sprites_scaled;
4206 config->num_pipes_active++;
4210 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4212 struct drm_device *dev = &dev_priv->drm;
4213 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4214 struct ilk_wm_maximums max;
4215 struct intel_wm_config config = {};
4216 struct ilk_wm_values results = {};
4217 enum intel_ddb_partitioning partitioning;
4219 ilk_compute_wm_config(dev, &config);
4221 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4222 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4224 /* 5/6 split only in single pipe config on IVB+ */
4225 if (INTEL_GEN(dev_priv) >= 7 &&
4226 config.num_pipes_active == 1 && config.sprites_enabled) {
4227 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4228 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4230 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4232 best_lp_wm = &lp_wm_1_2;
4235 partitioning = (best_lp_wm == &lp_wm_1_2) ?
4236 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4238 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4240 ilk_write_wm_values(dev_priv, &results);
4243 static void ilk_initial_watermarks(struct intel_atomic_state *state,
4244 struct intel_crtc_state *cstate)
4246 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4247 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4249 mutex_lock(&dev_priv->wm.wm_mutex);
4250 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4251 ilk_program_watermarks(dev_priv);
4252 mutex_unlock(&dev_priv->wm.wm_mutex);
4255 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
4256 struct intel_crtc_state *cstate)
4258 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4259 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4261 mutex_lock(&dev_priv->wm.wm_mutex);
4262 if (cstate->wm.need_postvbl_update) {
4263 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4264 ilk_program_watermarks(dev_priv);
4266 mutex_unlock(&dev_priv->wm.wm_mutex);
4269 static inline void skl_wm_level_from_reg_val(uint32_t val,
4270 struct skl_wm_level *level)
4272 level->plane_en = val & PLANE_WM_EN;
4273 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
4274 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
4275 PLANE_WM_LINES_MASK;
4278 void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
4279 struct skl_pipe_wm *out)
4281 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4282 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4283 enum pipe pipe = intel_crtc->pipe;
4284 int level, max_level;
4285 enum plane_id plane_id;
4288 max_level = ilk_wm_max_level(dev_priv);
4290 for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4291 struct skl_plane_wm *wm = &out->planes[plane_id];
4293 for (level = 0; level <= max_level; level++) {
4294 if (plane_id != PLANE_CURSOR)
4295 val = I915_READ(PLANE_WM(pipe, plane_id, level));
4297 val = I915_READ(CUR_WM(pipe, level));
4299 skl_wm_level_from_reg_val(val, &wm->wm[level]);
4302 if (plane_id != PLANE_CURSOR)
4303 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
4305 val = I915_READ(CUR_WM_TRANS(pipe));
4307 skl_wm_level_from_reg_val(val, &wm->trans_wm);
4310 if (!intel_crtc->active)
4313 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
4316 void skl_wm_get_hw_state(struct drm_device *dev)
4318 struct drm_i915_private *dev_priv = to_i915(dev);
4319 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4320 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4321 struct drm_crtc *crtc;
4322 struct intel_crtc *intel_crtc;
4323 struct intel_crtc_state *cstate;
4325 skl_ddb_get_hw_state(dev_priv, ddb);
4326 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4327 intel_crtc = to_intel_crtc(crtc);
4328 cstate = to_intel_crtc_state(crtc->state);
4330 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
4332 if (intel_crtc->active)
4333 hw->dirty_pipes |= drm_crtc_mask(crtc);
4336 if (dev_priv->active_crtcs) {
4337 /* Fully recompute DDB on first atomic commit */
4338 dev_priv->wm.distrust_bios_wm = true;
4340 /* Easy/common case; just sanitize DDB now if everything off */
4341 memset(ddb, 0, sizeof(*ddb));
4345 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4347 struct drm_device *dev = crtc->dev;
4348 struct drm_i915_private *dev_priv = to_i915(dev);
4349 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4351 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4352 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4353 enum pipe pipe = intel_crtc->pipe;
4354 static const i915_reg_t wm0_pipe_reg[] = {
4355 [PIPE_A] = WM0_PIPEA_ILK,
4356 [PIPE_B] = WM0_PIPEB_ILK,
4357 [PIPE_C] = WM0_PIPEC_IVB,
4360 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4361 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4362 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4364 memset(active, 0, sizeof(*active));
4366 active->pipe_enabled = intel_crtc->active;
4368 if (active->pipe_enabled) {
4369 u32 tmp = hw->wm_pipe[pipe];
4372 * For active pipes LP0 watermark is marked as
4373 * enabled, and LP1+ watermaks as disabled since
4374 * we can't really reverse compute them in case
4375 * multiple pipes are active.
4377 active->wm[0].enable = true;
4378 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4379 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4380 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4381 active->linetime = hw->wm_linetime[pipe];
4383 int level, max_level = ilk_wm_max_level(dev_priv);
4386 * For inactive pipes, all watermark levels
4387 * should be marked as enabled but zeroed,
4388 * which is what we'd compute them to.
4390 for (level = 0; level <= max_level; level++)
4391 active->wm[level].enable = true;
4394 intel_crtc->wm.active.ilk = *active;
4397 #define _FW_WM(value, plane) \
4398 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4399 #define _FW_WM_VLV(value, plane) \
4400 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4402 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4403 struct vlv_wm_values *wm)
4408 for_each_pipe(dev_priv, pipe) {
4409 tmp = I915_READ(VLV_DDL(pipe));
4411 wm->ddl[pipe].plane[PLANE_PRIMARY] =
4412 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4413 wm->ddl[pipe].plane[PLANE_CURSOR] =
4414 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4415 wm->ddl[pipe].plane[PLANE_SPRITE0] =
4416 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4417 wm->ddl[pipe].plane[PLANE_SPRITE1] =
4418 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4421 tmp = I915_READ(DSPFW1);
4422 wm->sr.plane = _FW_WM(tmp, SR);
4423 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
4424 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
4425 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
4427 tmp = I915_READ(DSPFW2);
4428 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
4429 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
4430 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
4432 tmp = I915_READ(DSPFW3);
4433 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4435 if (IS_CHERRYVIEW(dev_priv)) {
4436 tmp = I915_READ(DSPFW7_CHV);
4437 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4438 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4440 tmp = I915_READ(DSPFW8_CHV);
4441 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
4442 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
4444 tmp = I915_READ(DSPFW9_CHV);
4445 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
4446 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
4448 tmp = I915_READ(DSPHOWM);
4449 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4450 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4451 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4452 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
4453 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4454 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4455 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4456 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4457 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4458 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4460 tmp = I915_READ(DSPFW7);
4461 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
4462 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
4464 tmp = I915_READ(DSPHOWM);
4465 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4466 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4467 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4468 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
4469 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4470 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4471 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
4478 void vlv_wm_get_hw_state(struct drm_device *dev)
4480 struct drm_i915_private *dev_priv = to_i915(dev);
4481 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4482 struct intel_plane *plane;
4486 vlv_read_wm_values(dev_priv, wm);
4488 for_each_intel_plane(dev, plane)
4489 plane->wm.fifo_size = vlv_get_fifo_size(plane);
4491 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4492 wm->level = VLV_WM_LEVEL_PM2;
4494 if (IS_CHERRYVIEW(dev_priv)) {
4495 mutex_lock(&dev_priv->rps.hw_lock);
4497 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4498 if (val & DSP_MAXFIFO_PM5_ENABLE)
4499 wm->level = VLV_WM_LEVEL_PM5;
4502 * If DDR DVFS is disabled in the BIOS, Punit
4503 * will never ack the request. So if that happens
4504 * assume we don't have to enable/disable DDR DVFS
4505 * dynamically. To test that just set the REQ_ACK
4506 * bit to poke the Punit, but don't change the
4507 * HIGH/LOW bits so that we don't actually change
4508 * the current state.
4510 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4511 val |= FORCE_DDR_FREQ_REQ_ACK;
4512 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4514 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4515 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4516 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4517 "assuming DDR DVFS is disabled\n");
4518 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4520 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4521 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4522 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4525 mutex_unlock(&dev_priv->rps.hw_lock);
4528 for_each_pipe(dev_priv, pipe)
4529 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4531 wm->pipe[pipe].plane[PLANE_PRIMARY],
4532 wm->pipe[pipe].plane[PLANE_CURSOR],
4533 wm->pipe[pipe].plane[PLANE_SPRITE0],
4534 wm->pipe[pipe].plane[PLANE_SPRITE1]);
4536 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4537 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4540 void ilk_wm_get_hw_state(struct drm_device *dev)
4542 struct drm_i915_private *dev_priv = to_i915(dev);
4543 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4544 struct drm_crtc *crtc;
4546 for_each_crtc(dev, crtc)
4547 ilk_pipe_wm_get_hw_state(crtc);
4549 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4550 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4551 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4553 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4554 if (INTEL_GEN(dev_priv) >= 7) {
4555 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4556 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4559 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4560 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4561 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4562 else if (IS_IVYBRIDGE(dev_priv))
4563 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4564 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4567 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4571 * intel_update_watermarks - update FIFO watermark values based on current modes
4573 * Calculate watermark values for the various WM regs based on current mode
4574 * and plane configuration.
4576 * There are several cases to deal with here:
4577 * - normal (i.e. non-self-refresh)
4578 * - self-refresh (SR) mode
4579 * - lines are large relative to FIFO size (buffer can hold up to 2)
4580 * - lines are small relative to FIFO size (buffer can hold more than 2
4581 * lines), so need to account for TLB latency
4583 * The normal calculation is:
4584 * watermark = dotclock * bytes per pixel * latency
4585 * where latency is platform & configuration dependent (we assume pessimal
4588 * The SR calculation is:
4589 * watermark = (trunc(latency/line time)+1) * surface width *
4592 * line time = htotal / dotclock
4593 * surface width = hdisplay for normal plane and 64 for cursor
4594 * and latency is assumed to be high, as above.
4596 * The final value programmed to the register should always be rounded up,
4597 * and include an extra 2 entries to account for clock crossings.
4599 * We don't use the sprite, so we can ignore that. And on Crestline we have
4600 * to set the non-SR watermarks to 8.
4602 void intel_update_watermarks(struct intel_crtc *crtc)
4604 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4606 if (dev_priv->display.update_wm)
4607 dev_priv->display.update_wm(crtc);
4611 * Lock protecting IPS related data structures
4613 DEFINE_SPINLOCK(mchdev_lock);
4615 /* Global for IPS driver to get at the current i915 device. Protected by
4617 static struct drm_i915_private *i915_mch_dev;
4619 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4623 assert_spin_locked(&mchdev_lock);
4625 rgvswctl = I915_READ16(MEMSWCTL);
4626 if (rgvswctl & MEMCTL_CMD_STS) {
4627 DRM_DEBUG("gpu busy, RCS change rejected\n");
4628 return false; /* still busy with another command */
4631 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4632 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4633 I915_WRITE16(MEMSWCTL, rgvswctl);
4634 POSTING_READ16(MEMSWCTL);
4636 rgvswctl |= MEMCTL_CMD_STS;
4637 I915_WRITE16(MEMSWCTL, rgvswctl);
4642 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4645 u8 fmax, fmin, fstart, vstart;
4647 spin_lock_irq(&mchdev_lock);
4649 rgvmodectl = I915_READ(MEMMODECTL);
4651 /* Enable temp reporting */
4652 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4653 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4655 /* 100ms RC evaluation intervals */
4656 I915_WRITE(RCUPEI, 100000);
4657 I915_WRITE(RCDNEI, 100000);
4659 /* Set max/min thresholds to 90ms and 80ms respectively */
4660 I915_WRITE(RCBMAXAVG, 90000);
4661 I915_WRITE(RCBMINAVG, 80000);
4663 I915_WRITE(MEMIHYST, 1);
4665 /* Set up min, max, and cur for interrupt handling */
4666 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4667 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4668 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4669 MEMMODE_FSTART_SHIFT;
4671 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4674 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4675 dev_priv->ips.fstart = fstart;
4677 dev_priv->ips.max_delay = fstart;
4678 dev_priv->ips.min_delay = fmin;
4679 dev_priv->ips.cur_delay = fstart;
4681 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4682 fmax, fmin, fstart);
4684 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4687 * Interrupts will be enabled in ironlake_irq_postinstall
4690 I915_WRITE(VIDSTART, vstart);
4691 POSTING_READ(VIDSTART);
4693 rgvmodectl |= MEMMODE_SWMODE_EN;
4694 I915_WRITE(MEMMODECTL, rgvmodectl);
4696 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4697 DRM_ERROR("stuck trying to change perf mode\n");
4700 ironlake_set_drps(dev_priv, fstart);
4702 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4703 I915_READ(DDREC) + I915_READ(CSIEC);
4704 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4705 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4706 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4708 spin_unlock_irq(&mchdev_lock);
4711 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4715 spin_lock_irq(&mchdev_lock);
4717 rgvswctl = I915_READ16(MEMSWCTL);
4719 /* Ack interrupts, disable EFC interrupt */
4720 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4721 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4722 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4723 I915_WRITE(DEIIR, DE_PCU_EVENT);
4724 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4726 /* Go back to the starting frequency */
4727 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4729 rgvswctl |= MEMCTL_CMD_STS;
4730 I915_WRITE(MEMSWCTL, rgvswctl);
4733 spin_unlock_irq(&mchdev_lock);
4736 /* There's a funny hw issue where the hw returns all 0 when reading from
4737 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4738 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4739 * all limits and the gpu stuck at whatever frequency it is at atm).
4741 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4745 /* Only set the down limit when we've reached the lowest level to avoid
4746 * getting more interrupts, otherwise leave this clear. This prevents a
4747 * race in the hw when coming out of rc6: There's a tiny window where
4748 * the hw runs at the minimal clock before selecting the desired
4749 * frequency, if the down threshold expires in that window we will not
4750 * receive a down interrupt. */
4751 if (IS_GEN9(dev_priv)) {
4752 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4753 if (val <= dev_priv->rps.min_freq_softlimit)
4754 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4756 limits = dev_priv->rps.max_freq_softlimit << 24;
4757 if (val <= dev_priv->rps.min_freq_softlimit)
4758 limits |= dev_priv->rps.min_freq_softlimit << 16;
4764 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4767 u32 threshold_up = 0, threshold_down = 0; /* in % */
4768 u32 ei_up = 0, ei_down = 0;
4770 new_power = dev_priv->rps.power;
4771 switch (dev_priv->rps.power) {
4773 if (val > dev_priv->rps.efficient_freq + 1 &&
4774 val > dev_priv->rps.cur_freq)
4775 new_power = BETWEEN;
4779 if (val <= dev_priv->rps.efficient_freq &&
4780 val < dev_priv->rps.cur_freq)
4781 new_power = LOW_POWER;
4782 else if (val >= dev_priv->rps.rp0_freq &&
4783 val > dev_priv->rps.cur_freq)
4784 new_power = HIGH_POWER;
4788 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4789 val < dev_priv->rps.cur_freq)
4790 new_power = BETWEEN;
4793 /* Max/min bins are special */
4794 if (val <= dev_priv->rps.min_freq_softlimit)
4795 new_power = LOW_POWER;
4796 if (val >= dev_priv->rps.max_freq_softlimit)
4797 new_power = HIGH_POWER;
4798 if (new_power == dev_priv->rps.power)
4801 /* Note the units here are not exactly 1us, but 1280ns. */
4802 switch (new_power) {
4804 /* Upclock if more than 95% busy over 16ms */
4808 /* Downclock if less than 85% busy over 32ms */
4810 threshold_down = 85;
4814 /* Upclock if more than 90% busy over 13ms */
4818 /* Downclock if less than 75% busy over 32ms */
4820 threshold_down = 75;
4824 /* Upclock if more than 85% busy over 10ms */
4828 /* Downclock if less than 60% busy over 32ms */
4830 threshold_down = 60;
4834 I915_WRITE(GEN6_RP_UP_EI,
4835 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4836 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4837 GT_INTERVAL_FROM_US(dev_priv,
4838 ei_up * threshold_up / 100));
4840 I915_WRITE(GEN6_RP_DOWN_EI,
4841 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4842 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4843 GT_INTERVAL_FROM_US(dev_priv,
4844 ei_down * threshold_down / 100));
4846 I915_WRITE(GEN6_RP_CONTROL,
4847 GEN6_RP_MEDIA_TURBO |
4848 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4849 GEN6_RP_MEDIA_IS_GFX |
4851 GEN6_RP_UP_BUSY_AVG |
4852 GEN6_RP_DOWN_IDLE_AVG);
4854 dev_priv->rps.power = new_power;
4855 dev_priv->rps.up_threshold = threshold_up;
4856 dev_priv->rps.down_threshold = threshold_down;
4857 dev_priv->rps.last_adj = 0;
4860 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4864 if (val > dev_priv->rps.min_freq_softlimit)
4865 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4866 if (val < dev_priv->rps.max_freq_softlimit)
4867 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4869 mask &= dev_priv->pm_rps_events;
4871 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4874 /* gen6_set_rps is called to update the frequency request, but should also be
4875 * called when the range (min_delay and max_delay) is modified so that we can
4876 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4877 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4879 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4880 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4883 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4884 WARN_ON(val > dev_priv->rps.max_freq);
4885 WARN_ON(val < dev_priv->rps.min_freq);
4887 /* min/max delay may still have been modified so be sure to
4888 * write the limits value.
4890 if (val != dev_priv->rps.cur_freq) {
4891 gen6_set_rps_thresholds(dev_priv, val);
4893 if (IS_GEN9(dev_priv))
4894 I915_WRITE(GEN6_RPNSWREQ,
4895 GEN9_FREQUENCY(val));
4896 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4897 I915_WRITE(GEN6_RPNSWREQ,
4898 HSW_FREQUENCY(val));
4900 I915_WRITE(GEN6_RPNSWREQ,
4901 GEN6_FREQUENCY(val) |
4903 GEN6_AGGRESSIVE_TURBO);
4906 /* Make sure we continue to get interrupts
4907 * until we hit the minimum or maximum frequencies.
4909 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4910 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4912 POSTING_READ(GEN6_RPNSWREQ);
4914 dev_priv->rps.cur_freq = val;
4915 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4918 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4920 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4921 WARN_ON(val > dev_priv->rps.max_freq);
4922 WARN_ON(val < dev_priv->rps.min_freq);
4924 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4925 "Odd GPU freq value\n"))
4928 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4930 if (val != dev_priv->rps.cur_freq) {
4931 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4932 if (!IS_CHERRYVIEW(dev_priv))
4933 gen6_set_rps_thresholds(dev_priv, val);
4936 dev_priv->rps.cur_freq = val;
4937 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4940 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4942 * * If Gfx is Idle, then
4943 * 1. Forcewake Media well.
4944 * 2. Request idle freq.
4945 * 3. Release Forcewake of Media well.
4947 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4949 u32 val = dev_priv->rps.idle_freq;
4951 if (dev_priv->rps.cur_freq <= val)
4954 /* Wake up the media well, as that takes a lot less
4955 * power than the Render well. */
4956 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4957 valleyview_set_rps(dev_priv, val);
4958 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4961 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4963 mutex_lock(&dev_priv->rps.hw_lock);
4964 if (dev_priv->rps.enabled) {
4965 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4966 gen6_rps_reset_ei(dev_priv);
4967 I915_WRITE(GEN6_PMINTRMSK,
4968 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4970 gen6_enable_rps_interrupts(dev_priv);
4972 /* Ensure we start at the user's desired frequency */
4973 intel_set_rps(dev_priv,
4974 clamp(dev_priv->rps.cur_freq,
4975 dev_priv->rps.min_freq_softlimit,
4976 dev_priv->rps.max_freq_softlimit));
4978 mutex_unlock(&dev_priv->rps.hw_lock);
4981 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4983 /* Flush our bottom-half so that it does not race with us
4984 * setting the idle frequency and so that it is bounded by
4985 * our rpm wakeref. And then disable the interrupts to stop any
4986 * futher RPS reclocking whilst we are asleep.
4988 gen6_disable_rps_interrupts(dev_priv);
4990 mutex_lock(&dev_priv->rps.hw_lock);
4991 if (dev_priv->rps.enabled) {
4992 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4993 vlv_set_rps_idle(dev_priv);
4995 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4996 dev_priv->rps.last_adj = 0;
4997 I915_WRITE(GEN6_PMINTRMSK,
4998 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
5000 mutex_unlock(&dev_priv->rps.hw_lock);
5002 spin_lock(&dev_priv->rps.client_lock);
5003 while (!list_empty(&dev_priv->rps.clients))
5004 list_del_init(dev_priv->rps.clients.next);
5005 spin_unlock(&dev_priv->rps.client_lock);
5008 void gen6_rps_boost(struct drm_i915_private *dev_priv,
5009 struct intel_rps_client *rps,
5010 unsigned long submitted)
5012 /* This is intentionally racy! We peek at the state here, then
5013 * validate inside the RPS worker.
5015 if (!(dev_priv->gt.awake &&
5016 dev_priv->rps.enabled &&
5017 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
5020 /* Force a RPS boost (and don't count it against the client) if
5021 * the GPU is severely congested.
5023 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
5026 spin_lock(&dev_priv->rps.client_lock);
5027 if (rps == NULL || list_empty(&rps->link)) {
5028 spin_lock_irq(&dev_priv->irq_lock);
5029 if (dev_priv->rps.interrupts_enabled) {
5030 dev_priv->rps.client_boost = true;
5031 schedule_work(&dev_priv->rps.work);
5033 spin_unlock_irq(&dev_priv->irq_lock);
5036 list_add(&rps->link, &dev_priv->rps.clients);
5039 dev_priv->rps.boosts++;
5041 spin_unlock(&dev_priv->rps.client_lock);
5044 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
5046 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5047 valleyview_set_rps(dev_priv, val);
5049 gen6_set_rps(dev_priv, val);
5052 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
5054 I915_WRITE(GEN6_RC_CONTROL, 0);
5055 I915_WRITE(GEN9_PG_ENABLE, 0);
5058 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
5060 I915_WRITE(GEN6_RP_CONTROL, 0);
5063 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
5065 I915_WRITE(GEN6_RC_CONTROL, 0);
5066 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
5067 I915_WRITE(GEN6_RP_CONTROL, 0);
5070 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
5072 I915_WRITE(GEN6_RC_CONTROL, 0);
5075 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
5077 /* we're doing forcewake before Disabling RC6,
5078 * This what the BIOS expects when going into suspend */
5079 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5081 I915_WRITE(GEN6_RC_CONTROL, 0);
5083 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5086 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
5088 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5089 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
5090 mode = GEN6_RC_CTL_RC6_ENABLE;
5094 if (HAS_RC6p(dev_priv))
5095 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5096 "RC6 %s RC6p %s RC6pp %s\n",
5097 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
5098 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
5099 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
5102 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5103 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5106 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5108 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5109 bool enable_rc6 = true;
5110 unsigned long rc6_ctx_base;
5114 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5115 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5116 RC_SW_TARGET_STATE_SHIFT;
5117 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5118 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5119 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5120 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5123 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5124 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5129 * The exact context size is not known for BXT, so assume a page size
5132 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5133 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5134 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5135 ggtt->stolen_reserved_size))) {
5136 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5140 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5141 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5142 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5143 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5144 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5148 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5149 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5150 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5151 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5155 if (!I915_READ(GEN6_GFXPAUSE)) {
5156 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5160 if (!I915_READ(GEN8_MISC_CTRL0)) {
5161 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5168 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5170 /* No RC6 before Ironlake and code is gone for ilk. */
5171 if (INTEL_INFO(dev_priv)->gen < 6)
5177 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5178 DRM_INFO("RC6 disabled by BIOS\n");
5182 /* Respect the kernel parameter if it is set */
5183 if (enable_rc6 >= 0) {
5186 if (HAS_RC6p(dev_priv))
5187 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5190 mask = INTEL_RC6_ENABLE;
5192 if ((enable_rc6 & mask) != enable_rc6)
5193 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5194 "(requested %d, valid %d)\n",
5195 enable_rc6 & mask, enable_rc6, mask);
5197 return enable_rc6 & mask;
5200 if (IS_IVYBRIDGE(dev_priv))
5201 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5203 return INTEL_RC6_ENABLE;
5206 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5208 /* All of these values are in units of 50MHz */
5210 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5211 if (IS_GEN9_LP(dev_priv)) {
5212 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5213 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5214 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5215 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
5217 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5218 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
5219 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
5220 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5222 /* hw_max = RP0 until we check for overclocking */
5223 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5225 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5226 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5227 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5228 u32 ddcc_status = 0;
5230 if (sandybridge_pcode_read(dev_priv,
5231 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5233 dev_priv->rps.efficient_freq =
5235 ((ddcc_status >> 8) & 0xff),
5236 dev_priv->rps.min_freq,
5237 dev_priv->rps.max_freq);
5240 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5241 /* Store the frequency values in 16.66 MHZ units, which is
5242 * the natural hardware unit for SKL
5244 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5245 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5246 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5247 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5248 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5252 static void reset_rps(struct drm_i915_private *dev_priv,
5253 void (*set)(struct drm_i915_private *, u8))
5255 u8 freq = dev_priv->rps.cur_freq;
5258 dev_priv->rps.power = -1;
5259 dev_priv->rps.cur_freq = -1;
5261 set(dev_priv, freq);
5264 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5265 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5267 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5269 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5270 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5272 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5273 * clear out the Control register just to avoid inconsitency
5274 * with debugfs interface, which will show Turbo as enabled
5275 * only and that is not expected by the User after adding the
5276 * WaGsvDisableTurbo. Apart from this there is no problem even
5277 * if the Turbo is left enabled in the Control register, as the
5278 * Up/Down interrupts would remain masked.
5280 gen9_disable_rps(dev_priv);
5281 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5285 /* Program defaults and thresholds for RPS*/
5286 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5287 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5289 /* 1 second timeout*/
5290 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5291 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5293 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5295 /* Leaning on the below call to gen6_set_rps to program/setup the
5296 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5297 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5298 reset_rps(dev_priv, gen6_set_rps);
5300 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5303 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5305 struct intel_engine_cs *engine;
5306 enum intel_engine_id id;
5307 uint32_t rc6_mask = 0;
5309 /* 1a: Software RC state - RC0 */
5310 I915_WRITE(GEN6_RC_STATE, 0);
5312 /* 1b: Get forcewake during program sequence. Although the driver
5313 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5314 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5316 /* 2a: Disable RC states. */
5317 I915_WRITE(GEN6_RC_CONTROL, 0);
5319 /* 2b: Program RC6 thresholds.*/
5321 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5322 if (IS_SKYLAKE(dev_priv))
5323 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5325 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5326 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5327 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5328 for_each_engine(engine, dev_priv, id)
5329 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5331 if (HAS_GUC(dev_priv))
5332 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5334 I915_WRITE(GEN6_RC_SLEEP, 0);
5336 /* 2c: Program Coarse Power Gating Policies. */
5337 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5338 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5340 /* 3a: Enable RC6 */
5341 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5342 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5343 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5344 /* WaRsUseTimeoutMode:bxt */
5345 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5346 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5347 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5348 GEN7_RC_CTL_TO_MODE |
5351 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5352 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5353 GEN6_RC_CTL_EI_MODE(1) |
5358 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5359 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5361 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5362 I915_WRITE(GEN9_PG_ENABLE, 0);
5364 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5365 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5367 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5370 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5372 struct intel_engine_cs *engine;
5373 enum intel_engine_id id;
5374 uint32_t rc6_mask = 0;
5376 /* 1a: Software RC state - RC0 */
5377 I915_WRITE(GEN6_RC_STATE, 0);
5379 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5380 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5381 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5383 /* 2a: Disable RC states. */
5384 I915_WRITE(GEN6_RC_CONTROL, 0);
5386 /* 2b: Program RC6 thresholds.*/
5387 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5388 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5389 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5390 for_each_engine(engine, dev_priv, id)
5391 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5392 I915_WRITE(GEN6_RC_SLEEP, 0);
5393 if (IS_BROADWELL(dev_priv))
5394 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5396 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5399 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5400 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5401 intel_print_rc6_info(dev_priv, rc6_mask);
5402 if (IS_BROADWELL(dev_priv))
5403 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5404 GEN7_RC_CTL_TO_MODE |
5407 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5408 GEN6_RC_CTL_EI_MODE(1) |
5411 /* 4 Program defaults and thresholds for RPS*/
5412 I915_WRITE(GEN6_RPNSWREQ,
5413 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5414 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5415 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5416 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5417 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5419 /* Docs recommend 900MHz, and 300 MHz respectively */
5420 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5421 dev_priv->rps.max_freq_softlimit << 24 |
5422 dev_priv->rps.min_freq_softlimit << 16);
5424 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5425 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5426 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5427 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5429 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5432 I915_WRITE(GEN6_RP_CONTROL,
5433 GEN6_RP_MEDIA_TURBO |
5434 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5435 GEN6_RP_MEDIA_IS_GFX |
5437 GEN6_RP_UP_BUSY_AVG |
5438 GEN6_RP_DOWN_IDLE_AVG);
5440 /* 6: Ring frequency + overclocking (our driver does this later */
5442 reset_rps(dev_priv, gen6_set_rps);
5444 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5447 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5449 struct intel_engine_cs *engine;
5450 enum intel_engine_id id;
5451 u32 rc6vids, rc6_mask = 0;
5456 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5458 /* Here begins a magic sequence of register writes to enable
5459 * auto-downclocking.
5461 * Perhaps there might be some value in exposing these to
5464 I915_WRITE(GEN6_RC_STATE, 0);
5466 /* Clear the DBG now so we don't confuse earlier errors */
5467 gtfifodbg = I915_READ(GTFIFODBG);
5469 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5470 I915_WRITE(GTFIFODBG, gtfifodbg);
5473 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5475 /* disable the counters and set deterministic thresholds */
5476 I915_WRITE(GEN6_RC_CONTROL, 0);
5478 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5479 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5480 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5481 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5482 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5484 for_each_engine(engine, dev_priv, id)
5485 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5487 I915_WRITE(GEN6_RC_SLEEP, 0);
5488 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5489 if (IS_IVYBRIDGE(dev_priv))
5490 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5492 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5493 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5494 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5496 /* Check if we are enabling RC6 */
5497 rc6_mode = intel_enable_rc6();
5498 if (rc6_mode & INTEL_RC6_ENABLE)
5499 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5501 /* We don't use those on Haswell */
5502 if (!IS_HASWELL(dev_priv)) {
5503 if (rc6_mode & INTEL_RC6p_ENABLE)
5504 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5506 if (rc6_mode & INTEL_RC6pp_ENABLE)
5507 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5510 intel_print_rc6_info(dev_priv, rc6_mask);
5512 I915_WRITE(GEN6_RC_CONTROL,
5514 GEN6_RC_CTL_EI_MODE(1) |
5515 GEN6_RC_CTL_HW_ENABLE);
5517 /* Power down if completely idle for over 50ms */
5518 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5519 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5521 reset_rps(dev_priv, gen6_set_rps);
5524 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5525 if (IS_GEN6(dev_priv) && ret) {
5526 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5527 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5528 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5529 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5530 rc6vids &= 0xffff00;
5531 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5532 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5534 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5537 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5540 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5543 unsigned int gpu_freq;
5544 unsigned int max_ia_freq, min_ring_freq;
5545 unsigned int max_gpu_freq, min_gpu_freq;
5546 int scaling_factor = 180;
5547 struct cpufreq_policy *policy;
5549 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5551 policy = cpufreq_cpu_get(0);
5553 max_ia_freq = policy->cpuinfo.max_freq;
5554 cpufreq_cpu_put(policy);
5557 * Default to measured freq if none found, PCU will ensure we
5560 max_ia_freq = tsc_khz;
5563 /* Convert from kHz to MHz */
5564 max_ia_freq /= 1000;
5566 min_ring_freq = I915_READ(DCLK) & 0xf;
5567 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5568 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5570 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5571 /* Convert GT frequency to 50 HZ units */
5572 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5573 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5575 min_gpu_freq = dev_priv->rps.min_freq;
5576 max_gpu_freq = dev_priv->rps.max_freq;
5580 * For each potential GPU frequency, load a ring frequency we'd like
5581 * to use for memory access. We do this by specifying the IA frequency
5582 * the PCU should use as a reference to determine the ring frequency.
5584 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5585 int diff = max_gpu_freq - gpu_freq;
5586 unsigned int ia_freq = 0, ring_freq = 0;
5588 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5590 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5591 * No floor required for ring frequency on SKL.
5593 ring_freq = gpu_freq;
5594 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5595 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5596 ring_freq = max(min_ring_freq, gpu_freq);
5597 } else if (IS_HASWELL(dev_priv)) {
5598 ring_freq = mult_frac(gpu_freq, 5, 4);
5599 ring_freq = max(min_ring_freq, ring_freq);
5600 /* leave ia_freq as the default, chosen by cpufreq */
5602 /* On older processors, there is no separate ring
5603 * clock domain, so in order to boost the bandwidth
5604 * of the ring, we need to upclock the CPU (ia_freq).
5606 * For GPU frequencies less than 750MHz,
5607 * just use the lowest ring freq.
5609 if (gpu_freq < min_freq)
5612 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5613 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5616 sandybridge_pcode_write(dev_priv,
5617 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5618 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5619 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5624 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5628 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5630 switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
5632 /* (2 * 4) config */
5633 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5636 /* (2 * 6) config */
5637 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5640 /* (2 * 8) config */
5642 /* Setting (2 * 8) Min RP0 for any other combination */
5643 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5647 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5652 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5656 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5657 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5662 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5666 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5667 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5672 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5676 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5678 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5683 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5687 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5689 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5691 rp0 = min_t(u32, rp0, 0xea);
5696 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5700 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5701 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5702 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5703 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5708 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5712 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5714 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5715 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5716 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5717 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5718 * to make sure it matches what Punit accepts.
5720 return max_t(u32, val, 0xc0);
5723 /* Check that the pctx buffer wasn't move under us. */
5724 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5726 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5728 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5729 dev_priv->vlv_pctx->stolen->start);
5733 /* Check that the pcbr address is not empty. */
5734 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5736 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5738 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5741 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5743 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5744 unsigned long pctx_paddr, paddr;
5746 int pctx_size = 32*1024;
5748 pcbr = I915_READ(VLV_PCBR);
5749 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5750 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5751 paddr = (dev_priv->mm.stolen_base +
5752 (ggtt->stolen_size - pctx_size));
5754 pctx_paddr = (paddr & (~4095));
5755 I915_WRITE(VLV_PCBR, pctx_paddr);
5758 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5761 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5763 struct drm_i915_gem_object *pctx;
5764 unsigned long pctx_paddr;
5766 int pctx_size = 24*1024;
5768 pcbr = I915_READ(VLV_PCBR);
5770 /* BIOS set it up already, grab the pre-alloc'd space */
5773 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5774 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
5776 I915_GTT_OFFSET_NONE,
5781 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5784 * From the Gunit register HAS:
5785 * The Gfx driver is expected to program this register and ensure
5786 * proper allocation within Gfx stolen memory. For example, this
5787 * register should be programmed such than the PCBR range does not
5788 * overlap with other ranges, such as the frame buffer, protected
5789 * memory, or any other relevant ranges.
5791 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
5793 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5797 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5798 I915_WRITE(VLV_PCBR, pctx_paddr);
5801 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5802 dev_priv->vlv_pctx = pctx;
5805 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5807 if (WARN_ON(!dev_priv->vlv_pctx))
5810 i915_gem_object_put(dev_priv->vlv_pctx);
5811 dev_priv->vlv_pctx = NULL;
5814 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5816 dev_priv->rps.gpll_ref_freq =
5817 vlv_get_cck_clock(dev_priv, "GPLL ref",
5818 CCK_GPLL_CLOCK_CONTROL,
5819 dev_priv->czclk_freq);
5821 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5822 dev_priv->rps.gpll_ref_freq);
5825 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5829 valleyview_setup_pctx(dev_priv);
5831 vlv_init_gpll_ref_freq(dev_priv);
5833 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5834 switch ((val >> 6) & 3) {
5837 dev_priv->mem_freq = 800;
5840 dev_priv->mem_freq = 1066;
5843 dev_priv->mem_freq = 1333;
5846 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5848 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5849 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5850 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5851 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5852 dev_priv->rps.max_freq);
5854 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5855 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5856 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5857 dev_priv->rps.efficient_freq);
5859 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5860 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5861 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5862 dev_priv->rps.rp1_freq);
5864 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5865 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5866 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5867 dev_priv->rps.min_freq);
5870 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5874 cherryview_setup_pctx(dev_priv);
5876 vlv_init_gpll_ref_freq(dev_priv);
5878 mutex_lock(&dev_priv->sb_lock);
5879 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5880 mutex_unlock(&dev_priv->sb_lock);
5882 switch ((val >> 2) & 0x7) {
5884 dev_priv->mem_freq = 2000;
5887 dev_priv->mem_freq = 1600;
5890 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5892 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5893 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5894 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5895 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5896 dev_priv->rps.max_freq);
5898 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5899 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5900 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5901 dev_priv->rps.efficient_freq);
5903 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5904 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5905 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5906 dev_priv->rps.rp1_freq);
5908 /* PUnit validated range is only [RPe, RP0] */
5909 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5910 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5911 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5912 dev_priv->rps.min_freq);
5914 WARN_ONCE((dev_priv->rps.max_freq |
5915 dev_priv->rps.efficient_freq |
5916 dev_priv->rps.rp1_freq |
5917 dev_priv->rps.min_freq) & 1,
5918 "Odd GPU freq values\n");
5921 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5923 valleyview_cleanup_pctx(dev_priv);
5926 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5928 struct intel_engine_cs *engine;
5929 enum intel_engine_id id;
5930 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5932 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5934 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5935 GT_FIFO_FREE_ENTRIES_CHV);
5937 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5939 I915_WRITE(GTFIFODBG, gtfifodbg);
5942 cherryview_check_pctx(dev_priv);
5944 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5945 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5946 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5948 /* Disable RC states. */
5949 I915_WRITE(GEN6_RC_CONTROL, 0);
5951 /* 2a: Program RC6 thresholds.*/
5952 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5953 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5954 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5956 for_each_engine(engine, dev_priv, id)
5957 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5958 I915_WRITE(GEN6_RC_SLEEP, 0);
5960 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5961 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5963 /* allows RC6 residency counter to work */
5964 I915_WRITE(VLV_COUNTER_CONTROL,
5965 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5966 VLV_MEDIA_RC6_COUNT_EN |
5967 VLV_RENDER_RC6_COUNT_EN));
5969 /* For now we assume BIOS is allocating and populating the PCBR */
5970 pcbr = I915_READ(VLV_PCBR);
5973 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5974 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5975 rc6_mode = GEN7_RC_CTL_TO_MODE;
5977 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5979 /* 4 Program defaults and thresholds for RPS*/
5980 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5981 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5982 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5983 I915_WRITE(GEN6_RP_UP_EI, 66000);
5984 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5986 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5989 I915_WRITE(GEN6_RP_CONTROL,
5990 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5991 GEN6_RP_MEDIA_IS_GFX |
5993 GEN6_RP_UP_BUSY_AVG |
5994 GEN6_RP_DOWN_IDLE_AVG);
5996 /* Setting Fixed Bias */
5997 val = VLV_OVERRIDE_EN |
5999 CHV_BIAS_CPU_50_SOC_50;
6000 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6002 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6004 /* RPS code assumes GPLL is used */
6005 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6007 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6008 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6010 reset_rps(dev_priv, valleyview_set_rps);
6012 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6015 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
6017 struct intel_engine_cs *engine;
6018 enum intel_engine_id id;
6019 u32 gtfifodbg, val, rc6_mode = 0;
6021 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6023 valleyview_check_pctx(dev_priv);
6025 gtfifodbg = I915_READ(GTFIFODBG);
6027 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6029 I915_WRITE(GTFIFODBG, gtfifodbg);
6032 /* If VLV, Forcewake all wells, else re-direct to regular path */
6033 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6035 /* Disable RC states. */
6036 I915_WRITE(GEN6_RC_CONTROL, 0);
6038 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6039 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
6040 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
6041 I915_WRITE(GEN6_RP_UP_EI, 66000);
6042 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
6044 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6046 I915_WRITE(GEN6_RP_CONTROL,
6047 GEN6_RP_MEDIA_TURBO |
6048 GEN6_RP_MEDIA_HW_NORMAL_MODE |
6049 GEN6_RP_MEDIA_IS_GFX |
6051 GEN6_RP_UP_BUSY_AVG |
6052 GEN6_RP_DOWN_IDLE_CONT);
6054 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
6055 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6056 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6058 for_each_engine(engine, dev_priv, id)
6059 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6061 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
6063 /* allows RC6 residency counter to work */
6064 I915_WRITE(VLV_COUNTER_CONTROL,
6065 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
6066 VLV_RENDER_RC0_COUNT_EN |
6067 VLV_MEDIA_RC6_COUNT_EN |
6068 VLV_RENDER_RC6_COUNT_EN));
6070 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
6071 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
6073 intel_print_rc6_info(dev_priv, rc6_mode);
6075 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
6077 /* Setting Fixed Bias */
6078 val = VLV_OVERRIDE_EN |
6080 VLV_BIAS_CPU_125_SOC_875;
6081 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
6083 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
6085 /* RPS code assumes GPLL is used */
6086 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
6088 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
6089 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
6091 reset_rps(dev_priv, valleyview_set_rps);
6093 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6096 static unsigned long intel_pxfreq(u32 vidfreq)
6099 int div = (vidfreq & 0x3f0000) >> 16;
6100 int post = (vidfreq & 0x3000) >> 12;
6101 int pre = (vidfreq & 0x7);
6106 freq = ((div * 133333) / ((1<<post) * pre));
6111 static const struct cparams {
6117 { 1, 1333, 301, 28664 },
6118 { 1, 1066, 294, 24460 },
6119 { 1, 800, 294, 25192 },
6120 { 0, 1333, 276, 27605 },
6121 { 0, 1066, 276, 27605 },
6122 { 0, 800, 231, 23784 },
6125 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6127 u64 total_count, diff, ret;
6128 u32 count1, count2, count3, m = 0, c = 0;
6129 unsigned long now = jiffies_to_msecs(jiffies), diff1;
6132 assert_spin_locked(&mchdev_lock);
6134 diff1 = now - dev_priv->ips.last_time1;
6136 /* Prevent division-by-zero if we are asking too fast.
6137 * Also, we don't get interesting results if we are polling
6138 * faster than once in 10ms, so just return the saved value
6142 return dev_priv->ips.chipset_power;
6144 count1 = I915_READ(DMIEC);
6145 count2 = I915_READ(DDREC);
6146 count3 = I915_READ(CSIEC);
6148 total_count = count1 + count2 + count3;
6150 /* FIXME: handle per-counter overflow */
6151 if (total_count < dev_priv->ips.last_count1) {
6152 diff = ~0UL - dev_priv->ips.last_count1;
6153 diff += total_count;
6155 diff = total_count - dev_priv->ips.last_count1;
6158 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6159 if (cparams[i].i == dev_priv->ips.c_m &&
6160 cparams[i].t == dev_priv->ips.r_t) {
6167 diff = div_u64(diff, diff1);
6168 ret = ((m * diff) + c);
6169 ret = div_u64(ret, 10);
6171 dev_priv->ips.last_count1 = total_count;
6172 dev_priv->ips.last_time1 = now;
6174 dev_priv->ips.chipset_power = ret;
6179 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6183 if (INTEL_INFO(dev_priv)->gen != 5)
6186 spin_lock_irq(&mchdev_lock);
6188 val = __i915_chipset_val(dev_priv);
6190 spin_unlock_irq(&mchdev_lock);
6195 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6197 unsigned long m, x, b;
6200 tsfs = I915_READ(TSFS);
6202 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6203 x = I915_READ8(TR1);
6205 b = tsfs & TSFS_INTR_MASK;
6207 return ((m * x) / 127) - b;
6210 static int _pxvid_to_vd(u8 pxvid)
6215 if (pxvid >= 8 && pxvid < 31)
6218 return (pxvid + 2) * 125;
6221 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6223 const int vd = _pxvid_to_vd(pxvid);
6224 const int vm = vd - 1125;
6226 if (INTEL_INFO(dev_priv)->is_mobile)
6227 return vm > 0 ? vm : 0;
6232 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6234 u64 now, diff, diffms;
6237 assert_spin_locked(&mchdev_lock);
6239 now = ktime_get_raw_ns();
6240 diffms = now - dev_priv->ips.last_time2;
6241 do_div(diffms, NSEC_PER_MSEC);
6243 /* Don't divide by 0 */
6247 count = I915_READ(GFXEC);
6249 if (count < dev_priv->ips.last_count2) {
6250 diff = ~0UL - dev_priv->ips.last_count2;
6253 diff = count - dev_priv->ips.last_count2;
6256 dev_priv->ips.last_count2 = count;
6257 dev_priv->ips.last_time2 = now;
6259 /* More magic constants... */
6261 diff = div_u64(diff, diffms * 10);
6262 dev_priv->ips.gfx_power = diff;
6265 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6267 if (INTEL_INFO(dev_priv)->gen != 5)
6270 spin_lock_irq(&mchdev_lock);
6272 __i915_update_gfx_val(dev_priv);
6274 spin_unlock_irq(&mchdev_lock);
6277 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6279 unsigned long t, corr, state1, corr2, state2;
6282 assert_spin_locked(&mchdev_lock);
6284 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6285 pxvid = (pxvid >> 24) & 0x7f;
6286 ext_v = pvid_to_extvid(dev_priv, pxvid);
6290 t = i915_mch_val(dev_priv);
6292 /* Revel in the empirically derived constants */
6294 /* Correction factor in 1/100000 units */
6296 corr = ((t * 2349) + 135940);
6298 corr = ((t * 964) + 29317);
6300 corr = ((t * 301) + 1004);
6302 corr = corr * ((150142 * state1) / 10000 - 78642);
6304 corr2 = (corr * dev_priv->ips.corr);
6306 state2 = (corr2 * state1) / 10000;
6307 state2 /= 100; /* convert to mW */
6309 __i915_update_gfx_val(dev_priv);
6311 return dev_priv->ips.gfx_power + state2;
6314 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6318 if (INTEL_INFO(dev_priv)->gen != 5)
6321 spin_lock_irq(&mchdev_lock);
6323 val = __i915_gfx_val(dev_priv);
6325 spin_unlock_irq(&mchdev_lock);
6331 * i915_read_mch_val - return value for IPS use
6333 * Calculate and return a value for the IPS driver to use when deciding whether
6334 * we have thermal and power headroom to increase CPU or GPU power budget.
6336 unsigned long i915_read_mch_val(void)
6338 struct drm_i915_private *dev_priv;
6339 unsigned long chipset_val, graphics_val, ret = 0;
6341 spin_lock_irq(&mchdev_lock);
6344 dev_priv = i915_mch_dev;
6346 chipset_val = __i915_chipset_val(dev_priv);
6347 graphics_val = __i915_gfx_val(dev_priv);
6349 ret = chipset_val + graphics_val;
6352 spin_unlock_irq(&mchdev_lock);
6356 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6359 * i915_gpu_raise - raise GPU frequency limit
6361 * Raise the limit; IPS indicates we have thermal headroom.
6363 bool i915_gpu_raise(void)
6365 struct drm_i915_private *dev_priv;
6368 spin_lock_irq(&mchdev_lock);
6369 if (!i915_mch_dev) {
6373 dev_priv = i915_mch_dev;
6375 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6376 dev_priv->ips.max_delay--;
6379 spin_unlock_irq(&mchdev_lock);
6383 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6386 * i915_gpu_lower - lower GPU frequency limit
6388 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6389 * frequency maximum.
6391 bool i915_gpu_lower(void)
6393 struct drm_i915_private *dev_priv;
6396 spin_lock_irq(&mchdev_lock);
6397 if (!i915_mch_dev) {
6401 dev_priv = i915_mch_dev;
6403 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6404 dev_priv->ips.max_delay++;
6407 spin_unlock_irq(&mchdev_lock);
6411 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6414 * i915_gpu_busy - indicate GPU business to IPS
6416 * Tell the IPS driver whether or not the GPU is busy.
6418 bool i915_gpu_busy(void)
6422 spin_lock_irq(&mchdev_lock);
6424 ret = i915_mch_dev->gt.awake;
6425 spin_unlock_irq(&mchdev_lock);
6429 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6432 * i915_gpu_turbo_disable - disable graphics turbo
6434 * Disable graphics turbo by resetting the max frequency and setting the
6435 * current frequency to the default.
6437 bool i915_gpu_turbo_disable(void)
6439 struct drm_i915_private *dev_priv;
6442 spin_lock_irq(&mchdev_lock);
6443 if (!i915_mch_dev) {
6447 dev_priv = i915_mch_dev;
6449 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6451 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6455 spin_unlock_irq(&mchdev_lock);
6459 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6462 * Tells the intel_ips driver that the i915 driver is now loaded, if
6463 * IPS got loaded first.
6465 * This awkward dance is so that neither module has to depend on the
6466 * other in order for IPS to do the appropriate communication of
6467 * GPU turbo limits to i915.
6470 ips_ping_for_i915_load(void)
6474 link = symbol_get(ips_link_to_i915_driver);
6477 symbol_put(ips_link_to_i915_driver);
6481 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6483 /* We only register the i915 ips part with intel-ips once everything is
6484 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6485 spin_lock_irq(&mchdev_lock);
6486 i915_mch_dev = dev_priv;
6487 spin_unlock_irq(&mchdev_lock);
6489 ips_ping_for_i915_load();
6492 void intel_gpu_ips_teardown(void)
6494 spin_lock_irq(&mchdev_lock);
6495 i915_mch_dev = NULL;
6496 spin_unlock_irq(&mchdev_lock);
6499 static void intel_init_emon(struct drm_i915_private *dev_priv)
6505 /* Disable to program */
6509 /* Program energy weights for various events */
6510 I915_WRITE(SDEW, 0x15040d00);
6511 I915_WRITE(CSIEW0, 0x007f0000);
6512 I915_WRITE(CSIEW1, 0x1e220004);
6513 I915_WRITE(CSIEW2, 0x04000004);
6515 for (i = 0; i < 5; i++)
6516 I915_WRITE(PEW(i), 0);
6517 for (i = 0; i < 3; i++)
6518 I915_WRITE(DEW(i), 0);
6520 /* Program P-state weights to account for frequency power adjustment */
6521 for (i = 0; i < 16; i++) {
6522 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6523 unsigned long freq = intel_pxfreq(pxvidfreq);
6524 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6529 val *= (freq / 1000);
6531 val /= (127*127*900);
6533 DRM_ERROR("bad pxval: %ld\n", val);
6536 /* Render standby states get 0 weight */
6540 for (i = 0; i < 4; i++) {
6541 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6542 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6543 I915_WRITE(PXW(i), val);
6546 /* Adjust magic regs to magic values (more experimental results) */
6547 I915_WRITE(OGW0, 0);
6548 I915_WRITE(OGW1, 0);
6549 I915_WRITE(EG0, 0x00007f00);
6550 I915_WRITE(EG1, 0x0000000e);
6551 I915_WRITE(EG2, 0x000e0000);
6552 I915_WRITE(EG3, 0x68000300);
6553 I915_WRITE(EG4, 0x42000000);
6554 I915_WRITE(EG5, 0x00140031);
6558 for (i = 0; i < 8; i++)
6559 I915_WRITE(PXWL(i), 0);
6561 /* Enable PMON + select events */
6562 I915_WRITE(ECR, 0x80000019);
6564 lcfuse = I915_READ(LCFUSE02);
6566 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6569 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6572 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6575 if (!i915.enable_rc6) {
6576 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6577 intel_runtime_pm_get(dev_priv);
6580 mutex_lock(&dev_priv->drm.struct_mutex);
6581 mutex_lock(&dev_priv->rps.hw_lock);
6583 /* Initialize RPS limits (for userspace) */
6584 if (IS_CHERRYVIEW(dev_priv))
6585 cherryview_init_gt_powersave(dev_priv);
6586 else if (IS_VALLEYVIEW(dev_priv))
6587 valleyview_init_gt_powersave(dev_priv);
6588 else if (INTEL_GEN(dev_priv) >= 6)
6589 gen6_init_rps_frequencies(dev_priv);
6591 /* Derive initial user preferences/limits from the hardware limits */
6592 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6593 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6595 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6596 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6598 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6599 dev_priv->rps.min_freq_softlimit =
6601 dev_priv->rps.efficient_freq,
6602 intel_freq_opcode(dev_priv, 450));
6604 /* After setting max-softlimit, find the overclock max freq */
6605 if (IS_GEN6(dev_priv) ||
6606 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6609 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
6610 if (params & BIT(31)) { /* OC supported */
6611 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6612 (dev_priv->rps.max_freq & 0xff) * 50,
6613 (params & 0xff) * 50);
6614 dev_priv->rps.max_freq = params & 0xff;
6618 /* Finally allow us to boost to max by default */
6619 dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6621 mutex_unlock(&dev_priv->rps.hw_lock);
6622 mutex_unlock(&dev_priv->drm.struct_mutex);
6624 intel_autoenable_gt_powersave(dev_priv);
6627 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6629 if (IS_VALLEYVIEW(dev_priv))
6630 valleyview_cleanup_gt_powersave(dev_priv);
6632 if (!i915.enable_rc6)
6633 intel_runtime_pm_put(dev_priv);
6637 * intel_suspend_gt_powersave - suspend PM work and helper threads
6638 * @dev_priv: i915 device
6640 * We don't want to disable RC6 or other features here, we just want
6641 * to make sure any work we've queued has finished and won't bother
6642 * us while we're suspended.
6644 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6646 if (INTEL_GEN(dev_priv) < 6)
6649 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6650 intel_runtime_pm_put(dev_priv);
6652 /* gen6_rps_idle() will be called later to disable interrupts */
6655 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6657 dev_priv->rps.enabled = true; /* force disabling */
6658 intel_disable_gt_powersave(dev_priv);
6660 gen6_reset_rps_interrupts(dev_priv);
6663 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6665 if (!READ_ONCE(dev_priv->rps.enabled))
6668 mutex_lock(&dev_priv->rps.hw_lock);
6670 if (INTEL_GEN(dev_priv) >= 9) {
6671 gen9_disable_rc6(dev_priv);
6672 gen9_disable_rps(dev_priv);
6673 } else if (IS_CHERRYVIEW(dev_priv)) {
6674 cherryview_disable_rps(dev_priv);
6675 } else if (IS_VALLEYVIEW(dev_priv)) {
6676 valleyview_disable_rps(dev_priv);
6677 } else if (INTEL_GEN(dev_priv) >= 6) {
6678 gen6_disable_rps(dev_priv);
6679 } else if (IS_IRONLAKE_M(dev_priv)) {
6680 ironlake_disable_drps(dev_priv);
6683 dev_priv->rps.enabled = false;
6684 mutex_unlock(&dev_priv->rps.hw_lock);
6687 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6689 /* We shouldn't be disabling as we submit, so this should be less
6690 * racy than it appears!
6692 if (READ_ONCE(dev_priv->rps.enabled))
6695 /* Powersaving is controlled by the host when inside a VM */
6696 if (intel_vgpu_active(dev_priv))
6699 mutex_lock(&dev_priv->rps.hw_lock);
6701 if (IS_CHERRYVIEW(dev_priv)) {
6702 cherryview_enable_rps(dev_priv);
6703 } else if (IS_VALLEYVIEW(dev_priv)) {
6704 valleyview_enable_rps(dev_priv);
6705 } else if (INTEL_GEN(dev_priv) >= 9) {
6706 gen9_enable_rc6(dev_priv);
6707 gen9_enable_rps(dev_priv);
6708 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6709 gen6_update_ring_freq(dev_priv);
6710 } else if (IS_BROADWELL(dev_priv)) {
6711 gen8_enable_rps(dev_priv);
6712 gen6_update_ring_freq(dev_priv);
6713 } else if (INTEL_GEN(dev_priv) >= 6) {
6714 gen6_enable_rps(dev_priv);
6715 gen6_update_ring_freq(dev_priv);
6716 } else if (IS_IRONLAKE_M(dev_priv)) {
6717 ironlake_enable_drps(dev_priv);
6718 intel_init_emon(dev_priv);
6721 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6722 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6724 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6725 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6727 dev_priv->rps.enabled = true;
6728 mutex_unlock(&dev_priv->rps.hw_lock);
6731 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6733 struct drm_i915_private *dev_priv =
6734 container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6735 struct intel_engine_cs *rcs;
6736 struct drm_i915_gem_request *req;
6738 if (READ_ONCE(dev_priv->rps.enabled))
6741 rcs = dev_priv->engine[RCS];
6742 if (rcs->last_context)
6745 if (!rcs->init_context)
6748 mutex_lock(&dev_priv->drm.struct_mutex);
6750 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6754 if (!i915.enable_execlists && i915_switch_context(req) == 0)
6755 rcs->init_context(req);
6757 /* Mark the device busy, calling intel_enable_gt_powersave() */
6758 i915_add_request_no_flush(req);
6761 mutex_unlock(&dev_priv->drm.struct_mutex);
6763 intel_runtime_pm_put(dev_priv);
6766 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6768 if (READ_ONCE(dev_priv->rps.enabled))
6771 if (IS_IRONLAKE_M(dev_priv)) {
6772 ironlake_enable_drps(dev_priv);
6773 intel_init_emon(dev_priv);
6774 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6776 * PCU communication is slow and this doesn't need to be
6777 * done at any specific time, so do this out of our fast path
6778 * to make resume and init faster.
6780 * We depend on the HW RC6 power context save/restore
6781 * mechanism when entering D3 through runtime PM suspend. So
6782 * disable RPM until RPS/RC6 is properly setup. We can only
6783 * get here via the driver load/system resume/runtime resume
6784 * paths, so the _noresume version is enough (and in case of
6785 * runtime resume it's necessary).
6787 if (queue_delayed_work(dev_priv->wq,
6788 &dev_priv->rps.autoenable_work,
6789 round_jiffies_up_relative(HZ)))
6790 intel_runtime_pm_get_noresume(dev_priv);
6794 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
6797 * On Ibex Peak and Cougar Point, we need to disable clock
6798 * gating for the panel power sequencer or it will fail to
6799 * start up when no ports are active.
6801 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6804 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
6808 for_each_pipe(dev_priv, pipe) {
6809 I915_WRITE(DSPCNTR(pipe),
6810 I915_READ(DSPCNTR(pipe)) |
6811 DISPPLANE_TRICKLE_FEED_DISABLE);
6813 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6814 POSTING_READ(DSPSURF(pipe));
6818 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
6820 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6821 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6822 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6825 * Don't touch WM1S_LP_EN here.
6826 * Doing so could cause underruns.
6830 static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
6832 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6836 * WaFbcDisableDpfcClockGating:ilk
6838 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6839 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6840 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6842 I915_WRITE(PCH_3DCGDIS0,
6843 MARIUNIT_CLOCK_GATE_DISABLE |
6844 SVSMUNIT_CLOCK_GATE_DISABLE);
6845 I915_WRITE(PCH_3DCGDIS1,
6846 VFMUNIT_CLOCK_GATE_DISABLE);
6849 * According to the spec the following bits should be set in
6850 * order to enable memory self-refresh
6851 * The bit 22/21 of 0x42004
6852 * The bit 5 of 0x42020
6853 * The bit 15 of 0x45000
6855 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6856 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6857 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6858 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6859 I915_WRITE(DISP_ARB_CTL,
6860 (I915_READ(DISP_ARB_CTL) |
6863 ilk_init_lp_watermarks(dev_priv);
6866 * Based on the document from hardware guys the following bits
6867 * should be set unconditionally in order to enable FBC.
6868 * The bit 22 of 0x42000
6869 * The bit 22 of 0x42004
6870 * The bit 7,8,9 of 0x42020.
6872 if (IS_IRONLAKE_M(dev_priv)) {
6873 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6874 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6875 I915_READ(ILK_DISPLAY_CHICKEN1) |
6877 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6878 I915_READ(ILK_DISPLAY_CHICKEN2) |
6882 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6884 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6885 I915_READ(ILK_DISPLAY_CHICKEN2) |
6886 ILK_ELPIN_409_SELECT);
6887 I915_WRITE(_3D_CHICKEN2,
6888 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6889 _3D_CHICKEN2_WM_READ_PIPELINED);
6891 /* WaDisableRenderCachePipelinedFlush:ilk */
6892 I915_WRITE(CACHE_MODE_0,
6893 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6895 /* WaDisable_RenderCache_OperationalFlush:ilk */
6896 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6898 g4x_disable_trickle_feed(dev_priv);
6900 ibx_init_clock_gating(dev_priv);
6903 static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
6909 * On Ibex Peak and Cougar Point, we need to disable clock
6910 * gating for the panel power sequencer or it will fail to
6911 * start up when no ports are active.
6913 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6914 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6915 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6916 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6917 DPLS_EDP_PPS_FIX_DIS);
6918 /* The below fixes the weird display corruption, a few pixels shifted
6919 * downward, on (only) LVDS of some HP laptops with IVY.
6921 for_each_pipe(dev_priv, pipe) {
6922 val = I915_READ(TRANS_CHICKEN2(pipe));
6923 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6924 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6925 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6926 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6927 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6928 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6929 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6930 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6932 /* WADP0ClockGatingDisable */
6933 for_each_pipe(dev_priv, pipe) {
6934 I915_WRITE(TRANS_CHICKEN1(pipe),
6935 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6939 static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
6943 tmp = I915_READ(MCH_SSKPD);
6944 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6945 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6949 static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
6951 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6953 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6955 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6956 I915_READ(ILK_DISPLAY_CHICKEN2) |
6957 ILK_ELPIN_409_SELECT);
6959 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6960 I915_WRITE(_3D_CHICKEN,
6961 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6963 /* WaDisable_RenderCache_OperationalFlush:snb */
6964 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6967 * BSpec recoomends 8x4 when MSAA is used,
6968 * however in practice 16x4 seems fastest.
6970 * Note that PS/WM thread counts depend on the WIZ hashing
6971 * disable bit, which we don't touch here, but it's good
6972 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6974 I915_WRITE(GEN6_GT_MODE,
6975 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6977 ilk_init_lp_watermarks(dev_priv);
6979 I915_WRITE(CACHE_MODE_0,
6980 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6982 I915_WRITE(GEN6_UCGCTL1,
6983 I915_READ(GEN6_UCGCTL1) |
6984 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6985 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6987 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6988 * gating disable must be set. Failure to set it results in
6989 * flickering pixels due to Z write ordering failures after
6990 * some amount of runtime in the Mesa "fire" demo, and Unigine
6991 * Sanctuary and Tropics, and apparently anything else with
6992 * alpha test or pixel discard.
6994 * According to the spec, bit 11 (RCCUNIT) must also be set,
6995 * but we didn't debug actual testcases to find it out.
6997 * WaDisableRCCUnitClockGating:snb
6998 * WaDisableRCPBUnitClockGating:snb
7000 I915_WRITE(GEN6_UCGCTL2,
7001 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7002 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7004 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7005 I915_WRITE(_3D_CHICKEN3,
7006 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
7010 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7011 * 3DSTATE_SF number of SF output attributes is more than 16."
7013 I915_WRITE(_3D_CHICKEN3,
7014 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
7017 * According to the spec the following bits should be
7018 * set in order to enable memory self-refresh and fbc:
7019 * The bit21 and bit22 of 0x42000
7020 * The bit21 and bit22 of 0x42004
7021 * The bit5 and bit7 of 0x42020
7022 * The bit14 of 0x70180
7023 * The bit14 of 0x71180
7025 * WaFbcAsynchFlipDisableFbcQueue:snb
7027 I915_WRITE(ILK_DISPLAY_CHICKEN1,
7028 I915_READ(ILK_DISPLAY_CHICKEN1) |
7029 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7030 I915_WRITE(ILK_DISPLAY_CHICKEN2,
7031 I915_READ(ILK_DISPLAY_CHICKEN2) |
7032 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7033 I915_WRITE(ILK_DSPCLK_GATE_D,
7034 I915_READ(ILK_DSPCLK_GATE_D) |
7035 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
7036 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
7038 g4x_disable_trickle_feed(dev_priv);
7040 cpt_init_clock_gating(dev_priv);
7042 gen6_check_mch_setup(dev_priv);
7045 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
7047 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
7050 * WaVSThreadDispatchOverride:ivb,vlv
7052 * This actually overrides the dispatch
7053 * mode for all thread types.
7055 reg &= ~GEN7_FF_SCHED_MASK;
7056 reg |= GEN7_FF_TS_SCHED_HW;
7057 reg |= GEN7_FF_VS_SCHED_HW;
7058 reg |= GEN7_FF_DS_SCHED_HW;
7060 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
7063 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
7066 * TODO: this bit should only be enabled when really needed, then
7067 * disabled when not needed anymore in order to save power.
7069 if (HAS_PCH_LPT_LP(dev_priv))
7070 I915_WRITE(SOUTH_DSPCLK_GATE_D,
7071 I915_READ(SOUTH_DSPCLK_GATE_D) |
7072 PCH_LP_PARTITION_LEVEL_DISABLE);
7074 /* WADPOClockGatingDisable:hsw */
7075 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7076 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7077 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7080 static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
7082 if (HAS_PCH_LPT_LP(dev_priv)) {
7083 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7085 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7086 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7090 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7091 int general_prio_credits,
7092 int high_prio_credits)
7096 /* WaTempDisableDOPClkGating:bdw */
7097 misccpctl = I915_READ(GEN7_MISCCPCTL);
7098 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7100 I915_WRITE(GEN8_L3SQCREG1,
7101 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7102 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7105 * Wait at least 100 clocks before re-enabling clock gating.
7106 * See the definition of L3SQCREG1 in BSpec.
7108 POSTING_READ(GEN8_L3SQCREG1);
7110 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7113 static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
7115 gen9_init_clock_gating(dev_priv);
7117 /* WaDisableSDEUnitClockGating:kbl */
7118 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7119 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7120 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7122 /* WaDisableGamClockGating:kbl */
7123 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7124 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7125 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7127 /* WaFbcNukeOnHostModify:kbl */
7128 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7129 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7132 static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
7134 gen9_init_clock_gating(dev_priv);
7136 /* WAC6entrylatency:skl */
7137 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7138 FBC_LLC_FULLY_OPEN);
7140 /* WaFbcNukeOnHostModify:skl */
7141 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7142 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7145 static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
7149 ilk_init_lp_watermarks(dev_priv);
7151 /* WaSwitchSolVfFArbitrationPriority:bdw */
7152 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7154 /* WaPsrDPAMaskVBlankInSRD:bdw */
7155 I915_WRITE(CHICKEN_PAR1_1,
7156 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7158 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7159 for_each_pipe(dev_priv, pipe) {
7160 I915_WRITE(CHICKEN_PIPESL_1(pipe),
7161 I915_READ(CHICKEN_PIPESL_1(pipe)) |
7162 BDW_DPRS_MASK_VBLANK_SRD);
7165 /* WaVSRefCountFullforceMissDisable:bdw */
7166 /* WaDSRefCountFullforceMissDisable:bdw */
7167 I915_WRITE(GEN7_FF_THREAD_MODE,
7168 I915_READ(GEN7_FF_THREAD_MODE) &
7169 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7171 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7172 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7174 /* WaDisableSDEUnitClockGating:bdw */
7175 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7176 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7178 /* WaProgramL3SqcReg1Default:bdw */
7179 gen8_set_l3sqc_credits(dev_priv, 30, 2);
7182 * WaGttCachingOffByDefault:bdw
7183 * GTT cache may not work with big pages, so if those
7184 * are ever enabled GTT cache may need to be disabled.
7186 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7188 /* WaKVMNotificationOnConfigChange:bdw */
7189 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7190 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7192 lpt_init_clock_gating(dev_priv);
7195 static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
7197 ilk_init_lp_watermarks(dev_priv);
7199 /* L3 caching of data atomics doesn't work -- disable it. */
7200 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7201 I915_WRITE(HSW_ROW_CHICKEN3,
7202 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7204 /* This is required by WaCatErrorRejectionIssue:hsw */
7205 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7206 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7207 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7209 /* WaVSRefCountFullforceMissDisable:hsw */
7210 I915_WRITE(GEN7_FF_THREAD_MODE,
7211 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7213 /* WaDisable_RenderCache_OperationalFlush:hsw */
7214 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7216 /* enable HiZ Raw Stall Optimization */
7217 I915_WRITE(CACHE_MODE_0_GEN7,
7218 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7220 /* WaDisable4x2SubspanOptimization:hsw */
7221 I915_WRITE(CACHE_MODE_1,
7222 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7225 * BSpec recommends 8x4 when MSAA is used,
7226 * however in practice 16x4 seems fastest.
7228 * Note that PS/WM thread counts depend on the WIZ hashing
7229 * disable bit, which we don't touch here, but it's good
7230 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7232 I915_WRITE(GEN7_GT_MODE,
7233 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7235 /* WaSampleCChickenBitEnable:hsw */
7236 I915_WRITE(HALF_SLICE_CHICKEN3,
7237 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7239 /* WaSwitchSolVfFArbitrationPriority:hsw */
7240 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7242 /* WaRsPkgCStateDisplayPMReq:hsw */
7243 I915_WRITE(CHICKEN_PAR1_1,
7244 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7246 lpt_init_clock_gating(dev_priv);
7249 static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
7253 ilk_init_lp_watermarks(dev_priv);
7255 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7257 /* WaDisableEarlyCull:ivb */
7258 I915_WRITE(_3D_CHICKEN3,
7259 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7261 /* WaDisableBackToBackFlipFix:ivb */
7262 I915_WRITE(IVB_CHICKEN3,
7263 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7264 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7266 /* WaDisablePSDDualDispatchEnable:ivb */
7267 if (IS_IVB_GT1(dev_priv))
7268 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7269 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7271 /* WaDisable_RenderCache_OperationalFlush:ivb */
7272 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7274 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7275 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7276 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7278 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7279 I915_WRITE(GEN7_L3CNTLREG1,
7280 GEN7_WA_FOR_GEN7_L3_CONTROL);
7281 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7282 GEN7_WA_L3_CHICKEN_MODE);
7283 if (IS_IVB_GT1(dev_priv))
7284 I915_WRITE(GEN7_ROW_CHICKEN2,
7285 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7287 /* must write both registers */
7288 I915_WRITE(GEN7_ROW_CHICKEN2,
7289 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7290 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7291 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7294 /* WaForceL3Serialization:ivb */
7295 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7296 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7299 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7300 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7302 I915_WRITE(GEN6_UCGCTL2,
7303 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7305 /* This is required by WaCatErrorRejectionIssue:ivb */
7306 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7307 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7308 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7310 g4x_disable_trickle_feed(dev_priv);
7312 gen7_setup_fixed_func_scheduler(dev_priv);
7314 if (0) { /* causes HiZ corruption on ivb:gt1 */
7315 /* enable HiZ Raw Stall Optimization */
7316 I915_WRITE(CACHE_MODE_0_GEN7,
7317 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7320 /* WaDisable4x2SubspanOptimization:ivb */
7321 I915_WRITE(CACHE_MODE_1,
7322 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7325 * BSpec recommends 8x4 when MSAA is used,
7326 * however in practice 16x4 seems fastest.
7328 * Note that PS/WM thread counts depend on the WIZ hashing
7329 * disable bit, which we don't touch here, but it's good
7330 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7332 I915_WRITE(GEN7_GT_MODE,
7333 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7335 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7336 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7337 snpcr |= GEN6_MBC_SNPCR_MED;
7338 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7340 if (!HAS_PCH_NOP(dev_priv))
7341 cpt_init_clock_gating(dev_priv);
7343 gen6_check_mch_setup(dev_priv);
7346 static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
7348 /* WaDisableEarlyCull:vlv */
7349 I915_WRITE(_3D_CHICKEN3,
7350 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7352 /* WaDisableBackToBackFlipFix:vlv */
7353 I915_WRITE(IVB_CHICKEN3,
7354 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7355 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7357 /* WaPsdDispatchEnable:vlv */
7358 /* WaDisablePSDDualDispatchEnable:vlv */
7359 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7360 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7361 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7363 /* WaDisable_RenderCache_OperationalFlush:vlv */
7364 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7366 /* WaForceL3Serialization:vlv */
7367 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7368 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7370 /* WaDisableDopClockGating:vlv */
7371 I915_WRITE(GEN7_ROW_CHICKEN2,
7372 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7374 /* This is required by WaCatErrorRejectionIssue:vlv */
7375 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7376 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7377 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7379 gen7_setup_fixed_func_scheduler(dev_priv);
7382 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7383 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7385 I915_WRITE(GEN6_UCGCTL2,
7386 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7388 /* WaDisableL3Bank2xClockGate:vlv
7389 * Disabling L3 clock gating- MMIO 940c[25] = 1
7390 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7391 I915_WRITE(GEN7_UCGCTL4,
7392 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7395 * BSpec says this must be set, even though
7396 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7398 I915_WRITE(CACHE_MODE_1,
7399 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7402 * BSpec recommends 8x4 when MSAA is used,
7403 * however in practice 16x4 seems fastest.
7405 * Note that PS/WM thread counts depend on the WIZ hashing
7406 * disable bit, which we don't touch here, but it's good
7407 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7409 I915_WRITE(GEN7_GT_MODE,
7410 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7413 * WaIncreaseL3CreditsForVLVB0:vlv
7414 * This is the hardware default actually.
7416 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7419 * WaDisableVLVClockGating_VBIIssue:vlv
7420 * Disable clock gating on th GCFG unit to prevent a delay
7421 * in the reporting of vblank events.
7423 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7426 static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
7428 /* WaVSRefCountFullforceMissDisable:chv */
7429 /* WaDSRefCountFullforceMissDisable:chv */
7430 I915_WRITE(GEN7_FF_THREAD_MODE,
7431 I915_READ(GEN7_FF_THREAD_MODE) &
7432 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7434 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7435 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7436 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7438 /* WaDisableCSUnitClockGating:chv */
7439 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7440 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7442 /* WaDisableSDEUnitClockGating:chv */
7443 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7444 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7447 * WaProgramL3SqcReg1Default:chv
7448 * See gfxspecs/Related Documents/Performance Guide/
7449 * LSQC Setting Recommendations.
7451 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7454 * GTT cache may not work with big pages, so if those
7455 * are ever enabled GTT cache may need to be disabled.
7457 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7460 static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
7462 uint32_t dspclk_gate;
7464 I915_WRITE(RENCLK_GATE_D1, 0);
7465 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7466 GS_UNIT_CLOCK_GATE_DISABLE |
7467 CL_UNIT_CLOCK_GATE_DISABLE);
7468 I915_WRITE(RAMCLK_GATE_D, 0);
7469 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7470 OVRUNIT_CLOCK_GATE_DISABLE |
7471 OVCUNIT_CLOCK_GATE_DISABLE;
7472 if (IS_GM45(dev_priv))
7473 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7474 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7476 /* WaDisableRenderCachePipelinedFlush */
7477 I915_WRITE(CACHE_MODE_0,
7478 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7480 /* WaDisable_RenderCache_OperationalFlush:g4x */
7481 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7483 g4x_disable_trickle_feed(dev_priv);
7486 static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
7488 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7489 I915_WRITE(RENCLK_GATE_D2, 0);
7490 I915_WRITE(DSPCLK_GATE_D, 0);
7491 I915_WRITE(RAMCLK_GATE_D, 0);
7492 I915_WRITE16(DEUC, 0);
7493 I915_WRITE(MI_ARB_STATE,
7494 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7496 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7497 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7500 static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
7502 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7503 I965_RCC_CLOCK_GATE_DISABLE |
7504 I965_RCPB_CLOCK_GATE_DISABLE |
7505 I965_ISC_CLOCK_GATE_DISABLE |
7506 I965_FBC_CLOCK_GATE_DISABLE);
7507 I915_WRITE(RENCLK_GATE_D2, 0);
7508 I915_WRITE(MI_ARB_STATE,
7509 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7511 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7512 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7515 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
7517 u32 dstate = I915_READ(D_STATE);
7519 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7520 DSTATE_DOT_CLOCK_GATING;
7521 I915_WRITE(D_STATE, dstate);
7523 if (IS_PINEVIEW(dev_priv))
7524 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7526 /* IIR "flip pending" means done if this bit is set */
7527 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7529 /* interrupts should cause a wake up from C3 */
7530 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7532 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7533 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7535 I915_WRITE(MI_ARB_STATE,
7536 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7539 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
7541 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7543 /* interrupts should cause a wake up from C3 */
7544 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7545 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7547 I915_WRITE(MEM_MODE,
7548 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7551 static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
7553 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7555 I915_WRITE(MEM_MODE,
7556 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7557 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7560 void intel_init_clock_gating(struct drm_i915_private *dev_priv)
7562 dev_priv->display.init_clock_gating(dev_priv);
7565 void intel_suspend_hw(struct drm_i915_private *dev_priv)
7567 if (HAS_PCH_LPT(dev_priv))
7568 lpt_suspend_hw(dev_priv);
7571 static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
7573 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7577 * intel_init_clock_gating_hooks - setup the clock gating hooks
7578 * @dev_priv: device private
7580 * Setup the hooks that configure which clocks of a given platform can be
7581 * gated and also apply various GT and display specific workarounds for these
7582 * platforms. Note that some GT specific workarounds are applied separately
7583 * when GPU contexts or batchbuffers start their execution.
7585 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7587 if (IS_SKYLAKE(dev_priv))
7588 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7589 else if (IS_KABYLAKE(dev_priv))
7590 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7591 else if (IS_GEN9_LP(dev_priv))
7592 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7593 else if (IS_BROADWELL(dev_priv))
7594 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7595 else if (IS_CHERRYVIEW(dev_priv))
7596 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7597 else if (IS_HASWELL(dev_priv))
7598 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7599 else if (IS_IVYBRIDGE(dev_priv))
7600 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7601 else if (IS_VALLEYVIEW(dev_priv))
7602 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7603 else if (IS_GEN6(dev_priv))
7604 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7605 else if (IS_GEN5(dev_priv))
7606 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7607 else if (IS_G4X(dev_priv))
7608 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7609 else if (IS_CRESTLINE(dev_priv))
7610 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7611 else if (IS_BROADWATER(dev_priv))
7612 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7613 else if (IS_GEN3(dev_priv))
7614 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7615 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7616 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7617 else if (IS_GEN2(dev_priv))
7618 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7620 MISSING_CASE(INTEL_DEVID(dev_priv));
7621 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7625 /* Set up chip specific power management-related functions */
7626 void intel_init_pm(struct drm_i915_private *dev_priv)
7628 intel_fbc_init(dev_priv);
7631 if (IS_PINEVIEW(dev_priv))
7632 i915_pineview_get_mem_freq(dev_priv);
7633 else if (IS_GEN5(dev_priv))
7634 i915_ironlake_get_mem_freq(dev_priv);
7636 /* For FIFO watermark updates */
7637 if (INTEL_GEN(dev_priv) >= 9) {
7638 skl_setup_wm_latency(dev_priv);
7639 dev_priv->display.initial_watermarks = skl_initial_wm;
7640 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
7641 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7642 } else if (HAS_PCH_SPLIT(dev_priv)) {
7643 ilk_setup_wm_latency(dev_priv);
7645 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
7646 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7647 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
7648 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7649 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7650 dev_priv->display.compute_intermediate_wm =
7651 ilk_compute_intermediate_wm;
7652 dev_priv->display.initial_watermarks =
7653 ilk_initial_watermarks;
7654 dev_priv->display.optimize_watermarks =
7655 ilk_optimize_watermarks;
7657 DRM_DEBUG_KMS("Failed to read display plane latency. "
7660 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7661 vlv_setup_wm_latency(dev_priv);
7662 dev_priv->display.update_wm = vlv_update_wm;
7663 } else if (IS_PINEVIEW(dev_priv)) {
7664 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
7667 dev_priv->mem_freq)) {
7668 DRM_INFO("failed to find known CxSR latency "
7669 "(found ddr%s fsb freq %d, mem freq %d), "
7671 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7672 dev_priv->fsb_freq, dev_priv->mem_freq);
7673 /* Disable CxSR and never update its watermark again */
7674 intel_set_memory_cxsr(dev_priv, false);
7675 dev_priv->display.update_wm = NULL;
7677 dev_priv->display.update_wm = pineview_update_wm;
7678 } else if (IS_G4X(dev_priv)) {
7679 dev_priv->display.update_wm = g4x_update_wm;
7680 } else if (IS_GEN4(dev_priv)) {
7681 dev_priv->display.update_wm = i965_update_wm;
7682 } else if (IS_GEN3(dev_priv)) {
7683 dev_priv->display.update_wm = i9xx_update_wm;
7684 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7685 } else if (IS_GEN2(dev_priv)) {
7686 if (INTEL_INFO(dev_priv)->num_pipes == 1) {
7687 dev_priv->display.update_wm = i845_update_wm;
7688 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7690 dev_priv->display.update_wm = i9xx_update_wm;
7691 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7694 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7698 static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7701 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7704 case GEN6_PCODE_SUCCESS:
7706 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7707 case GEN6_PCODE_ILLEGAL_CMD:
7709 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7710 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7712 case GEN6_PCODE_TIMEOUT:
7720 static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7723 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7726 case GEN6_PCODE_SUCCESS:
7728 case GEN6_PCODE_ILLEGAL_CMD:
7730 case GEN7_PCODE_TIMEOUT:
7732 case GEN7_PCODE_ILLEGAL_DATA:
7734 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7737 MISSING_CASE(flags);
7742 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7746 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7748 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7749 * use te fw I915_READ variants to reduce the amount of work
7750 * required when reading/writing.
7753 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7754 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7758 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7759 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7760 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7762 if (intel_wait_for_register_fw(dev_priv,
7763 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7765 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7769 *val = I915_READ_FW(GEN6_PCODE_DATA);
7770 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7772 if (INTEL_GEN(dev_priv) > 6)
7773 status = gen7_check_mailbox_status(dev_priv);
7775 status = gen6_check_mailbox_status(dev_priv);
7778 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7786 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7791 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7793 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7794 * use te fw I915_READ variants to reduce the amount of work
7795 * required when reading/writing.
7798 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7799 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7803 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7804 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7805 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7807 if (intel_wait_for_register_fw(dev_priv,
7808 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7810 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7814 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7816 if (INTEL_GEN(dev_priv) > 6)
7817 status = gen7_check_mailbox_status(dev_priv);
7819 status = gen6_check_mailbox_status(dev_priv);
7822 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7830 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7834 * Slow = Fast = GPLL ref * N
7836 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7839 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7841 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7844 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7848 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7850 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7853 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7855 /* CHV needs even values */
7856 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7859 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7861 if (IS_GEN9(dev_priv))
7862 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7864 else if (IS_CHERRYVIEW(dev_priv))
7865 return chv_gpu_freq(dev_priv, val);
7866 else if (IS_VALLEYVIEW(dev_priv))
7867 return byt_gpu_freq(dev_priv, val);
7869 return val * GT_FREQUENCY_MULTIPLIER;
7872 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7874 if (IS_GEN9(dev_priv))
7875 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7876 GT_FREQUENCY_MULTIPLIER);
7877 else if (IS_CHERRYVIEW(dev_priv))
7878 return chv_freq_opcode(dev_priv, val);
7879 else if (IS_VALLEYVIEW(dev_priv))
7880 return byt_freq_opcode(dev_priv, val);
7882 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7885 struct request_boost {
7886 struct work_struct work;
7887 struct drm_i915_gem_request *req;
7890 static void __intel_rps_boost_work(struct work_struct *work)
7892 struct request_boost *boost = container_of(work, struct request_boost, work);
7893 struct drm_i915_gem_request *req = boost->req;
7895 if (!i915_gem_request_completed(req))
7896 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7898 i915_gem_request_put(req);
7902 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7904 struct request_boost *boost;
7906 if (req == NULL || INTEL_GEN(req->i915) < 6)
7909 if (i915_gem_request_completed(req))
7912 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7916 boost->req = i915_gem_request_get(req);
7918 INIT_WORK(&boost->work, __intel_rps_boost_work);
7919 queue_work(req->i915->wq, &boost->work);
7922 void intel_pm_setup(struct drm_i915_private *dev_priv)
7924 mutex_init(&dev_priv->rps.hw_lock);
7925 spin_lock_init(&dev_priv->rps.client_lock);
7927 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
7928 __intel_autoenable_gt_powersave);
7929 INIT_LIST_HEAD(&dev_priv->rps.clients);
7931 dev_priv->pm.suspended = false;
7932 atomic_set(&dev_priv->pm.wakeref_count, 0);