2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
37 * RC6 is a special power stage which allows the GPU to enter an very
38 * low-voltage mode when idle, using down to 0V while at this stage. This
39 * stage is entered automatically when the GPU is idle when RC6 support is
40 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 * There are different RC6 modes available in Intel GPU, which differentiate
43 * among each other with the latency required to enter and leave RC6 and
44 * voltage consumed by the GPU in different states.
46 * The combination of the following flags define which states GPU is allowed
47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48 * RC6pp is deepest RC6. Their support by hardware varies according to the
49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50 * which brings the most power savings; deeper states save more power, but
51 * require higher latency to switch to and wake up.
53 #define INTEL_RC6_ENABLE (1<<0)
54 #define INTEL_RC6p_ENABLE (1<<1)
55 #define INTEL_RC6pp_ENABLE (1<<2)
57 static void gen9_init_clock_gating(struct drm_device *dev)
59 struct drm_i915_private *dev_priv = dev->dev_private;
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 I915_WRITE(GEN8_CONFIG0,
66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
68 /* WaEnableChickenDCPR:skl,bxt,kbl */
69 I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 /* WaFbcWakeMemOn:skl,bxt,kbl */
74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
76 DISP_FBC_MEMORY_WAKE);
79 static void bxt_init_clock_gating(struct drm_device *dev)
81 struct drm_i915_private *dev_priv = dev->dev_private;
83 gen9_init_clock_gating(dev);
85 /* WaDisableSDEUnitClockGating:bxt */
86 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
87 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
91 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
93 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
94 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
97 * Wa: Backlight PWM may stop in the asserted state, causing backlight
100 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
101 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
102 PWM1_GATING_DIS | PWM2_GATING_DIS);
105 static void i915_pineview_get_mem_freq(struct drm_device *dev)
107 struct drm_i915_private *dev_priv = dev->dev_private;
110 tmp = I915_READ(CLKCFG);
112 switch (tmp & CLKCFG_FSB_MASK) {
114 dev_priv->fsb_freq = 533; /* 133*4 */
117 dev_priv->fsb_freq = 800; /* 200*4 */
120 dev_priv->fsb_freq = 667; /* 167*4 */
123 dev_priv->fsb_freq = 400; /* 100*4 */
127 switch (tmp & CLKCFG_MEM_MASK) {
129 dev_priv->mem_freq = 533;
132 dev_priv->mem_freq = 667;
135 dev_priv->mem_freq = 800;
139 /* detect pineview DDR3 setting */
140 tmp = I915_READ(CSHRDDR3CTL);
141 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
144 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
146 struct drm_i915_private *dev_priv = dev->dev_private;
149 ddrpll = I915_READ16(DDRMPLL1);
150 csipll = I915_READ16(CSIPLL0);
152 switch (ddrpll & 0xff) {
154 dev_priv->mem_freq = 800;
157 dev_priv->mem_freq = 1066;
160 dev_priv->mem_freq = 1333;
163 dev_priv->mem_freq = 1600;
166 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
168 dev_priv->mem_freq = 0;
172 dev_priv->ips.r_t = dev_priv->mem_freq;
174 switch (csipll & 0x3ff) {
176 dev_priv->fsb_freq = 3200;
179 dev_priv->fsb_freq = 3733;
182 dev_priv->fsb_freq = 4266;
185 dev_priv->fsb_freq = 4800;
188 dev_priv->fsb_freq = 5333;
191 dev_priv->fsb_freq = 5866;
194 dev_priv->fsb_freq = 6400;
197 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
199 dev_priv->fsb_freq = 0;
203 if (dev_priv->fsb_freq == 3200) {
204 dev_priv->ips.c_m = 0;
205 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
206 dev_priv->ips.c_m = 1;
208 dev_priv->ips.c_m = 2;
212 static const struct cxsr_latency cxsr_latency_table[] = {
213 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
214 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
215 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
216 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
217 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
219 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
220 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
221 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
222 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
223 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
225 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
226 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
227 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
228 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
229 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
231 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
232 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
233 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
234 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
235 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
237 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
238 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
239 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
240 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
241 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
243 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
244 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
245 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
246 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
247 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
250 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
255 const struct cxsr_latency *latency;
258 if (fsb == 0 || mem == 0)
261 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
262 latency = &cxsr_latency_table[i];
263 if (is_desktop == latency->is_desktop &&
264 is_ddr3 == latency->is_ddr3 &&
265 fsb == latency->fsb_freq && mem == latency->mem_freq)
269 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
274 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
278 mutex_lock(&dev_priv->rps.hw_lock);
280 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
282 val &= ~FORCE_DDR_HIGH_FREQ;
284 val |= FORCE_DDR_HIGH_FREQ;
285 val &= ~FORCE_DDR_LOW_FREQ;
286 val |= FORCE_DDR_FREQ_REQ_ACK;
287 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
289 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
290 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
291 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
293 mutex_unlock(&dev_priv->rps.hw_lock);
296 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
300 mutex_lock(&dev_priv->rps.hw_lock);
302 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
304 val |= DSP_MAXFIFO_PM5_ENABLE;
306 val &= ~DSP_MAXFIFO_PM5_ENABLE;
307 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
309 mutex_unlock(&dev_priv->rps.hw_lock);
312 #define FW_WM(value, plane) \
313 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
315 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
317 struct drm_device *dev = dev_priv->dev;
320 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
321 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
322 POSTING_READ(FW_BLC_SELF_VLV);
323 dev_priv->wm.vlv.cxsr = enable;
324 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
325 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
326 POSTING_READ(FW_BLC_SELF);
327 } else if (IS_PINEVIEW(dev)) {
328 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
329 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
330 I915_WRITE(DSPFW3, val);
331 POSTING_READ(DSPFW3);
332 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
333 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
334 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
335 I915_WRITE(FW_BLC_SELF, val);
336 POSTING_READ(FW_BLC_SELF);
337 } else if (IS_I915GM(dev)) {
338 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
339 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
340 I915_WRITE(INSTPM, val);
341 POSTING_READ(INSTPM);
346 DRM_DEBUG_KMS("memory self-refresh is %s\n",
347 enable ? "enabled" : "disabled");
352 * Latency for FIFO fetches is dependent on several factors:
353 * - memory configuration (speed, channels)
355 * - current MCH state
356 * It can be fairly high in some situations, so here we assume a fairly
357 * pessimal value. It's a tradeoff between extra memory fetches (if we
358 * set this value too high, the FIFO will fetch frequently to stay full)
359 * and power consumption (set it too low to save power and we might see
360 * FIFO underruns and display "flicker").
362 * A value of 5us seems to be a good balance; safe for very low end
363 * platforms but not overly aggressive on lower latency configs.
365 static const int pessimal_latency_ns = 5000;
367 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
368 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
370 static int vlv_get_fifo_size(struct drm_device *dev,
371 enum pipe pipe, int plane)
373 struct drm_i915_private *dev_priv = dev->dev_private;
374 int sprite0_start, sprite1_start, size;
377 uint32_t dsparb, dsparb2, dsparb3;
379 dsparb = I915_READ(DSPARB);
380 dsparb2 = I915_READ(DSPARB2);
381 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
382 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
385 dsparb = I915_READ(DSPARB);
386 dsparb2 = I915_READ(DSPARB2);
387 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
388 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
391 dsparb2 = I915_READ(DSPARB2);
392 dsparb3 = I915_READ(DSPARB3);
393 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
394 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
402 size = sprite0_start;
405 size = sprite1_start - sprite0_start;
408 size = 512 - 1 - sprite1_start;
414 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
415 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
416 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
422 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
424 struct drm_i915_private *dev_priv = dev->dev_private;
425 uint32_t dsparb = I915_READ(DSPARB);
428 size = dsparb & 0x7f;
430 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
432 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
433 plane ? "B" : "A", size);
438 static int i830_get_fifo_size(struct drm_device *dev, int plane)
440 struct drm_i915_private *dev_priv = dev->dev_private;
441 uint32_t dsparb = I915_READ(DSPARB);
444 size = dsparb & 0x1ff;
446 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
447 size >>= 1; /* Convert to cachelines */
449 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
450 plane ? "B" : "A", size);
455 static int i845_get_fifo_size(struct drm_device *dev, int plane)
457 struct drm_i915_private *dev_priv = dev->dev_private;
458 uint32_t dsparb = I915_READ(DSPARB);
461 size = dsparb & 0x7f;
462 size >>= 2; /* Convert to cachelines */
464 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
471 /* Pineview has different values for various configs */
472 static const struct intel_watermark_params pineview_display_wm = {
473 .fifo_size = PINEVIEW_DISPLAY_FIFO,
474 .max_wm = PINEVIEW_MAX_WM,
475 .default_wm = PINEVIEW_DFT_WM,
476 .guard_size = PINEVIEW_GUARD_WM,
477 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
479 static const struct intel_watermark_params pineview_display_hplloff_wm = {
480 .fifo_size = PINEVIEW_DISPLAY_FIFO,
481 .max_wm = PINEVIEW_MAX_WM,
482 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
483 .guard_size = PINEVIEW_GUARD_WM,
484 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
486 static const struct intel_watermark_params pineview_cursor_wm = {
487 .fifo_size = PINEVIEW_CURSOR_FIFO,
488 .max_wm = PINEVIEW_CURSOR_MAX_WM,
489 .default_wm = PINEVIEW_CURSOR_DFT_WM,
490 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
491 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
493 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
494 .fifo_size = PINEVIEW_CURSOR_FIFO,
495 .max_wm = PINEVIEW_CURSOR_MAX_WM,
496 .default_wm = PINEVIEW_CURSOR_DFT_WM,
497 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
498 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
500 static const struct intel_watermark_params g4x_wm_info = {
501 .fifo_size = G4X_FIFO_SIZE,
502 .max_wm = G4X_MAX_WM,
503 .default_wm = G4X_MAX_WM,
505 .cacheline_size = G4X_FIFO_LINE_SIZE,
507 static const struct intel_watermark_params g4x_cursor_wm_info = {
508 .fifo_size = I965_CURSOR_FIFO,
509 .max_wm = I965_CURSOR_MAX_WM,
510 .default_wm = I965_CURSOR_DFT_WM,
512 .cacheline_size = G4X_FIFO_LINE_SIZE,
514 static const struct intel_watermark_params i965_cursor_wm_info = {
515 .fifo_size = I965_CURSOR_FIFO,
516 .max_wm = I965_CURSOR_MAX_WM,
517 .default_wm = I965_CURSOR_DFT_WM,
519 .cacheline_size = I915_FIFO_LINE_SIZE,
521 static const struct intel_watermark_params i945_wm_info = {
522 .fifo_size = I945_FIFO_SIZE,
523 .max_wm = I915_MAX_WM,
526 .cacheline_size = I915_FIFO_LINE_SIZE,
528 static const struct intel_watermark_params i915_wm_info = {
529 .fifo_size = I915_FIFO_SIZE,
530 .max_wm = I915_MAX_WM,
533 .cacheline_size = I915_FIFO_LINE_SIZE,
535 static const struct intel_watermark_params i830_a_wm_info = {
536 .fifo_size = I855GM_FIFO_SIZE,
537 .max_wm = I915_MAX_WM,
540 .cacheline_size = I830_FIFO_LINE_SIZE,
542 static const struct intel_watermark_params i830_bc_wm_info = {
543 .fifo_size = I855GM_FIFO_SIZE,
544 .max_wm = I915_MAX_WM/2,
547 .cacheline_size = I830_FIFO_LINE_SIZE,
549 static const struct intel_watermark_params i845_wm_info = {
550 .fifo_size = I830_FIFO_SIZE,
551 .max_wm = I915_MAX_WM,
554 .cacheline_size = I830_FIFO_LINE_SIZE,
558 * intel_calculate_wm - calculate watermark level
559 * @clock_in_khz: pixel clock
560 * @wm: chip FIFO params
561 * @cpp: bytes per pixel
562 * @latency_ns: memory latency for the platform
564 * Calculate the watermark level (the level at which the display plane will
565 * start fetching from memory again). Each chip has a different display
566 * FIFO size and allocation, so the caller needs to figure that out and pass
567 * in the correct intel_watermark_params structure.
569 * As the pixel clock runs, the FIFO will be drained at a rate that depends
570 * on the pixel size. When it reaches the watermark level, it'll start
571 * fetching FIFO line sized based chunks from memory until the FIFO fills
572 * past the watermark point. If the FIFO drains completely, a FIFO underrun
573 * will occur, and a display engine hang could result.
575 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
576 const struct intel_watermark_params *wm,
577 int fifo_size, int cpp,
578 unsigned long latency_ns)
580 long entries_required, wm_size;
583 * Note: we need to make sure we don't overflow for various clock &
585 * clocks go from a few thousand to several hundred thousand.
586 * latency is usually a few thousand
588 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
590 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
592 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
594 wm_size = fifo_size - (entries_required + wm->guard_size);
596 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
598 /* Don't promote wm_size to unsigned... */
599 if (wm_size > (long)wm->max_wm)
600 wm_size = wm->max_wm;
602 wm_size = wm->default_wm;
605 * Bspec seems to indicate that the value shouldn't be lower than
606 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
607 * Lets go for 8 which is the burst size since certain platforms
608 * already use a hardcoded 8 (which is what the spec says should be
617 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
619 struct drm_crtc *crtc, *enabled = NULL;
621 for_each_crtc(dev, crtc) {
622 if (intel_crtc_active(crtc)) {
632 static void pineview_update_wm(struct drm_crtc *unused_crtc)
634 struct drm_device *dev = unused_crtc->dev;
635 struct drm_i915_private *dev_priv = dev->dev_private;
636 struct drm_crtc *crtc;
637 const struct cxsr_latency *latency;
641 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
642 dev_priv->fsb_freq, dev_priv->mem_freq);
644 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
645 intel_set_memory_cxsr(dev_priv, false);
649 crtc = single_enabled_crtc(dev);
651 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
652 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
653 int clock = adjusted_mode->crtc_clock;
656 wm = intel_calculate_wm(clock, &pineview_display_wm,
657 pineview_display_wm.fifo_size,
658 cpp, latency->display_sr);
659 reg = I915_READ(DSPFW1);
660 reg &= ~DSPFW_SR_MASK;
661 reg |= FW_WM(wm, SR);
662 I915_WRITE(DSPFW1, reg);
663 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
666 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
667 pineview_display_wm.fifo_size,
668 cpp, latency->cursor_sr);
669 reg = I915_READ(DSPFW3);
670 reg &= ~DSPFW_CURSOR_SR_MASK;
671 reg |= FW_WM(wm, CURSOR_SR);
672 I915_WRITE(DSPFW3, reg);
674 /* Display HPLL off SR */
675 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
676 pineview_display_hplloff_wm.fifo_size,
677 cpp, latency->display_hpll_disable);
678 reg = I915_READ(DSPFW3);
679 reg &= ~DSPFW_HPLL_SR_MASK;
680 reg |= FW_WM(wm, HPLL_SR);
681 I915_WRITE(DSPFW3, reg);
683 /* cursor HPLL off SR */
684 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
685 pineview_display_hplloff_wm.fifo_size,
686 cpp, latency->cursor_hpll_disable);
687 reg = I915_READ(DSPFW3);
688 reg &= ~DSPFW_HPLL_CURSOR_MASK;
689 reg |= FW_WM(wm, HPLL_CURSOR);
690 I915_WRITE(DSPFW3, reg);
691 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
693 intel_set_memory_cxsr(dev_priv, true);
695 intel_set_memory_cxsr(dev_priv, false);
699 static bool g4x_compute_wm0(struct drm_device *dev,
701 const struct intel_watermark_params *display,
702 int display_latency_ns,
703 const struct intel_watermark_params *cursor,
704 int cursor_latency_ns,
708 struct drm_crtc *crtc;
709 const struct drm_display_mode *adjusted_mode;
710 int htotal, hdisplay, clock, cpp;
711 int line_time_us, line_count;
712 int entries, tlb_miss;
714 crtc = intel_get_crtc_for_plane(dev, plane);
715 if (!intel_crtc_active(crtc)) {
716 *cursor_wm = cursor->guard_size;
717 *plane_wm = display->guard_size;
721 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
722 clock = adjusted_mode->crtc_clock;
723 htotal = adjusted_mode->crtc_htotal;
724 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
725 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
727 /* Use the small buffer method to calculate plane watermark */
728 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
729 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
732 entries = DIV_ROUND_UP(entries, display->cacheline_size);
733 *plane_wm = entries + display->guard_size;
734 if (*plane_wm > (int)display->max_wm)
735 *plane_wm = display->max_wm;
737 /* Use the large buffer method to calculate cursor watermark */
738 line_time_us = max(htotal * 1000 / clock, 1);
739 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
740 entries = line_count * crtc->cursor->state->crtc_w * cpp;
741 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
744 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
745 *cursor_wm = entries + cursor->guard_size;
746 if (*cursor_wm > (int)cursor->max_wm)
747 *cursor_wm = (int)cursor->max_wm;
753 * Check the wm result.
755 * If any calculated watermark values is larger than the maximum value that
756 * can be programmed into the associated watermark register, that watermark
759 static bool g4x_check_srwm(struct drm_device *dev,
760 int display_wm, int cursor_wm,
761 const struct intel_watermark_params *display,
762 const struct intel_watermark_params *cursor)
764 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
765 display_wm, cursor_wm);
767 if (display_wm > display->max_wm) {
768 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
769 display_wm, display->max_wm);
773 if (cursor_wm > cursor->max_wm) {
774 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
775 cursor_wm, cursor->max_wm);
779 if (!(display_wm || cursor_wm)) {
780 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
787 static bool g4x_compute_srwm(struct drm_device *dev,
790 const struct intel_watermark_params *display,
791 const struct intel_watermark_params *cursor,
792 int *display_wm, int *cursor_wm)
794 struct drm_crtc *crtc;
795 const struct drm_display_mode *adjusted_mode;
796 int hdisplay, htotal, cpp, clock;
797 unsigned long line_time_us;
798 int line_count, line_size;
803 *display_wm = *cursor_wm = 0;
807 crtc = intel_get_crtc_for_plane(dev, plane);
808 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
809 clock = adjusted_mode->crtc_clock;
810 htotal = adjusted_mode->crtc_htotal;
811 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
812 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
814 line_time_us = max(htotal * 1000 / clock, 1);
815 line_count = (latency_ns / line_time_us + 1000) / 1000;
816 line_size = hdisplay * cpp;
818 /* Use the minimum of the small and large buffer method for primary */
819 small = ((clock * cpp / 1000) * latency_ns) / 1000;
820 large = line_count * line_size;
822 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
823 *display_wm = entries + display->guard_size;
825 /* calculate the self-refresh watermark for display cursor */
826 entries = line_count * cpp * crtc->cursor->state->crtc_w;
827 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
828 *cursor_wm = entries + cursor->guard_size;
830 return g4x_check_srwm(dev,
831 *display_wm, *cursor_wm,
835 #define FW_WM_VLV(value, plane) \
836 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
838 static void vlv_write_wm_values(struct intel_crtc *crtc,
839 const struct vlv_wm_values *wm)
841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
842 enum pipe pipe = crtc->pipe;
844 I915_WRITE(VLV_DDL(pipe),
845 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
846 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
847 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
848 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
851 FW_WM(wm->sr.plane, SR) |
852 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
853 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
854 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
856 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
857 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
858 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
860 FW_WM(wm->sr.cursor, CURSOR_SR));
862 if (IS_CHERRYVIEW(dev_priv)) {
863 I915_WRITE(DSPFW7_CHV,
864 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
865 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
866 I915_WRITE(DSPFW8_CHV,
867 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
868 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
869 I915_WRITE(DSPFW9_CHV,
870 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
871 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
873 FW_WM(wm->sr.plane >> 9, SR_HI) |
874 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
875 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
876 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
877 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
878 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
879 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
880 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
881 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
882 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
885 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
886 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
888 FW_WM(wm->sr.plane >> 9, SR_HI) |
889 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
890 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
891 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
892 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
893 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
894 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
897 /* zero (unused) WM1 watermarks */
898 I915_WRITE(DSPFW4, 0);
899 I915_WRITE(DSPFW5, 0);
900 I915_WRITE(DSPFW6, 0);
901 I915_WRITE(DSPHOWM1, 0);
903 POSTING_READ(DSPFW1);
911 VLV_WM_LEVEL_DDR_DVFS,
914 /* latency must be in 0.1us units. */
915 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
916 unsigned int pipe_htotal,
917 unsigned int horiz_pixels,
919 unsigned int latency)
923 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
924 ret = (ret + 1) * horiz_pixels * cpp;
925 ret = DIV_ROUND_UP(ret, 64);
930 static void vlv_setup_wm_latency(struct drm_device *dev)
932 struct drm_i915_private *dev_priv = dev->dev_private;
934 /* all latencies in usec */
935 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
937 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
939 if (IS_CHERRYVIEW(dev_priv)) {
940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
941 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
943 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
947 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
948 struct intel_crtc *crtc,
949 const struct intel_plane_state *state,
952 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
953 int clock, htotal, cpp, width, wm;
955 if (dev_priv->wm.pri_latency[level] == 0)
961 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
962 clock = crtc->config->base.adjusted_mode.crtc_clock;
963 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
964 width = crtc->config->pipe_src_w;
965 if (WARN_ON(htotal == 0))
968 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
970 * FIXME the formula gives values that are
971 * too big for the cursor FIFO, and hence we
972 * would never be able to use cursors. For
973 * now just hardcode the watermark.
977 wm = vlv_wm_method2(clock, htotal, width, cpp,
978 dev_priv->wm.pri_latency[level] * 10);
981 return min_t(int, wm, USHRT_MAX);
984 static void vlv_compute_fifo(struct intel_crtc *crtc)
986 struct drm_device *dev = crtc->base.dev;
987 struct vlv_wm_state *wm_state = &crtc->wm_state;
988 struct intel_plane *plane;
989 unsigned int total_rate = 0;
990 const int fifo_size = 512 - 1;
991 int fifo_extra, fifo_left = fifo_size;
993 for_each_intel_plane_on_crtc(dev, crtc, plane) {
994 struct intel_plane_state *state =
995 to_intel_plane_state(plane->base.state);
997 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1000 if (state->visible) {
1001 wm_state->num_active_planes++;
1002 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1006 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1007 struct intel_plane_state *state =
1008 to_intel_plane_state(plane->base.state);
1011 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1012 plane->wm.fifo_size = 63;
1016 if (!state->visible) {
1017 plane->wm.fifo_size = 0;
1021 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1022 plane->wm.fifo_size = fifo_size * rate / total_rate;
1023 fifo_left -= plane->wm.fifo_size;
1026 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1028 /* spread the remainder evenly */
1029 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1035 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1038 /* give it all to the first plane if none are active */
1039 if (plane->wm.fifo_size == 0 &&
1040 wm_state->num_active_planes)
1043 plane_extra = min(fifo_extra, fifo_left);
1044 plane->wm.fifo_size += plane_extra;
1045 fifo_left -= plane_extra;
1048 WARN_ON(fifo_left != 0);
1051 static void vlv_invert_wms(struct intel_crtc *crtc)
1053 struct vlv_wm_state *wm_state = &crtc->wm_state;
1056 for (level = 0; level < wm_state->num_levels; level++) {
1057 struct drm_device *dev = crtc->base.dev;
1058 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1059 struct intel_plane *plane;
1061 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1062 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1064 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1065 switch (plane->base.type) {
1067 case DRM_PLANE_TYPE_CURSOR:
1068 wm_state->wm[level].cursor = plane->wm.fifo_size -
1069 wm_state->wm[level].cursor;
1071 case DRM_PLANE_TYPE_PRIMARY:
1072 wm_state->wm[level].primary = plane->wm.fifo_size -
1073 wm_state->wm[level].primary;
1075 case DRM_PLANE_TYPE_OVERLAY:
1076 sprite = plane->plane;
1077 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1078 wm_state->wm[level].sprite[sprite];
1085 static void vlv_compute_wm(struct intel_crtc *crtc)
1087 struct drm_device *dev = crtc->base.dev;
1088 struct vlv_wm_state *wm_state = &crtc->wm_state;
1089 struct intel_plane *plane;
1090 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1093 memset(wm_state, 0, sizeof(*wm_state));
1095 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1096 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1098 wm_state->num_active_planes = 0;
1100 vlv_compute_fifo(crtc);
1102 if (wm_state->num_active_planes != 1)
1103 wm_state->cxsr = false;
1105 if (wm_state->cxsr) {
1106 for (level = 0; level < wm_state->num_levels; level++) {
1107 wm_state->sr[level].plane = sr_fifo_size;
1108 wm_state->sr[level].cursor = 63;
1112 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1113 struct intel_plane_state *state =
1114 to_intel_plane_state(plane->base.state);
1116 if (!state->visible)
1119 /* normal watermarks */
1120 for (level = 0; level < wm_state->num_levels; level++) {
1121 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1122 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1125 if (WARN_ON(level == 0 && wm > max_wm))
1128 if (wm > plane->wm.fifo_size)
1131 switch (plane->base.type) {
1133 case DRM_PLANE_TYPE_CURSOR:
1134 wm_state->wm[level].cursor = wm;
1136 case DRM_PLANE_TYPE_PRIMARY:
1137 wm_state->wm[level].primary = wm;
1139 case DRM_PLANE_TYPE_OVERLAY:
1140 sprite = plane->plane;
1141 wm_state->wm[level].sprite[sprite] = wm;
1146 wm_state->num_levels = level;
1148 if (!wm_state->cxsr)
1151 /* maxfifo watermarks */
1152 switch (plane->base.type) {
1154 case DRM_PLANE_TYPE_CURSOR:
1155 for (level = 0; level < wm_state->num_levels; level++)
1156 wm_state->sr[level].cursor =
1157 wm_state->wm[level].cursor;
1159 case DRM_PLANE_TYPE_PRIMARY:
1160 for (level = 0; level < wm_state->num_levels; level++)
1161 wm_state->sr[level].plane =
1162 min(wm_state->sr[level].plane,
1163 wm_state->wm[level].primary);
1165 case DRM_PLANE_TYPE_OVERLAY:
1166 sprite = plane->plane;
1167 for (level = 0; level < wm_state->num_levels; level++)
1168 wm_state->sr[level].plane =
1169 min(wm_state->sr[level].plane,
1170 wm_state->wm[level].sprite[sprite]);
1175 /* clear any (partially) filled invalid levels */
1176 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1177 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1178 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1181 vlv_invert_wms(crtc);
1184 #define VLV_FIFO(plane, value) \
1185 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1187 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1189 struct drm_device *dev = crtc->base.dev;
1190 struct drm_i915_private *dev_priv = to_i915(dev);
1191 struct intel_plane *plane;
1192 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1194 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1195 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1196 WARN_ON(plane->wm.fifo_size != 63);
1200 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1201 sprite0_start = plane->wm.fifo_size;
1202 else if (plane->plane == 0)
1203 sprite1_start = sprite0_start + plane->wm.fifo_size;
1205 fifo_size = sprite1_start + plane->wm.fifo_size;
1208 WARN_ON(fifo_size != 512 - 1);
1210 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1211 pipe_name(crtc->pipe), sprite0_start,
1212 sprite1_start, fifo_size);
1214 switch (crtc->pipe) {
1215 uint32_t dsparb, dsparb2, dsparb3;
1217 dsparb = I915_READ(DSPARB);
1218 dsparb2 = I915_READ(DSPARB2);
1220 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1221 VLV_FIFO(SPRITEB, 0xff));
1222 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1223 VLV_FIFO(SPRITEB, sprite1_start));
1225 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1226 VLV_FIFO(SPRITEB_HI, 0x1));
1227 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1228 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1230 I915_WRITE(DSPARB, dsparb);
1231 I915_WRITE(DSPARB2, dsparb2);
1234 dsparb = I915_READ(DSPARB);
1235 dsparb2 = I915_READ(DSPARB2);
1237 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1238 VLV_FIFO(SPRITED, 0xff));
1239 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1240 VLV_FIFO(SPRITED, sprite1_start));
1242 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1243 VLV_FIFO(SPRITED_HI, 0xff));
1244 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1245 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1247 I915_WRITE(DSPARB, dsparb);
1248 I915_WRITE(DSPARB2, dsparb2);
1251 dsparb3 = I915_READ(DSPARB3);
1252 dsparb2 = I915_READ(DSPARB2);
1254 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1255 VLV_FIFO(SPRITEF, 0xff));
1256 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1257 VLV_FIFO(SPRITEF, sprite1_start));
1259 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1260 VLV_FIFO(SPRITEF_HI, 0xff));
1261 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1262 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1264 I915_WRITE(DSPARB3, dsparb3);
1265 I915_WRITE(DSPARB2, dsparb2);
1274 static void vlv_merge_wm(struct drm_device *dev,
1275 struct vlv_wm_values *wm)
1277 struct intel_crtc *crtc;
1278 int num_active_crtcs = 0;
1280 wm->level = to_i915(dev)->wm.max_level;
1283 for_each_intel_crtc(dev, crtc) {
1284 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1289 if (!wm_state->cxsr)
1293 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1296 if (num_active_crtcs != 1)
1299 if (num_active_crtcs > 1)
1300 wm->level = VLV_WM_LEVEL_PM2;
1302 for_each_intel_crtc(dev, crtc) {
1303 struct vlv_wm_state *wm_state = &crtc->wm_state;
1304 enum pipe pipe = crtc->pipe;
1309 wm->pipe[pipe] = wm_state->wm[wm->level];
1311 wm->sr = wm_state->sr[wm->level];
1313 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1314 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1315 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1316 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1320 static void vlv_update_wm(struct drm_crtc *crtc)
1322 struct drm_device *dev = crtc->dev;
1323 struct drm_i915_private *dev_priv = dev->dev_private;
1324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1325 enum pipe pipe = intel_crtc->pipe;
1326 struct vlv_wm_values wm = {};
1328 vlv_compute_wm(intel_crtc);
1329 vlv_merge_wm(dev, &wm);
1331 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1332 /* FIXME should be part of crtc atomic commit */
1333 vlv_pipe_set_fifo_size(intel_crtc);
1337 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1338 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1339 chv_set_memory_dvfs(dev_priv, false);
1341 if (wm.level < VLV_WM_LEVEL_PM5 &&
1342 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1343 chv_set_memory_pm5(dev_priv, false);
1345 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1346 intel_set_memory_cxsr(dev_priv, false);
1348 /* FIXME should be part of crtc atomic commit */
1349 vlv_pipe_set_fifo_size(intel_crtc);
1351 vlv_write_wm_values(intel_crtc, &wm);
1353 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1354 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1355 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1356 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1357 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1359 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1360 intel_set_memory_cxsr(dev_priv, true);
1362 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1363 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1364 chv_set_memory_pm5(dev_priv, true);
1366 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1367 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1368 chv_set_memory_dvfs(dev_priv, true);
1370 dev_priv->wm.vlv = wm;
1373 #define single_plane_enabled(mask) is_power_of_2(mask)
1375 static void g4x_update_wm(struct drm_crtc *crtc)
1377 struct drm_device *dev = crtc->dev;
1378 static const int sr_latency_ns = 12000;
1379 struct drm_i915_private *dev_priv = dev->dev_private;
1380 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1381 int plane_sr, cursor_sr;
1382 unsigned int enabled = 0;
1385 if (g4x_compute_wm0(dev, PIPE_A,
1386 &g4x_wm_info, pessimal_latency_ns,
1387 &g4x_cursor_wm_info, pessimal_latency_ns,
1388 &planea_wm, &cursora_wm))
1389 enabled |= 1 << PIPE_A;
1391 if (g4x_compute_wm0(dev, PIPE_B,
1392 &g4x_wm_info, pessimal_latency_ns,
1393 &g4x_cursor_wm_info, pessimal_latency_ns,
1394 &planeb_wm, &cursorb_wm))
1395 enabled |= 1 << PIPE_B;
1397 if (single_plane_enabled(enabled) &&
1398 g4x_compute_srwm(dev, ffs(enabled) - 1,
1401 &g4x_cursor_wm_info,
1402 &plane_sr, &cursor_sr)) {
1403 cxsr_enabled = true;
1405 cxsr_enabled = false;
1406 intel_set_memory_cxsr(dev_priv, false);
1407 plane_sr = cursor_sr = 0;
1410 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1411 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1412 planea_wm, cursora_wm,
1413 planeb_wm, cursorb_wm,
1414 plane_sr, cursor_sr);
1417 FW_WM(plane_sr, SR) |
1418 FW_WM(cursorb_wm, CURSORB) |
1419 FW_WM(planeb_wm, PLANEB) |
1420 FW_WM(planea_wm, PLANEA));
1422 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1423 FW_WM(cursora_wm, CURSORA));
1424 /* HPLL off in SR has some issues on G4x... disable it */
1426 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1427 FW_WM(cursor_sr, CURSOR_SR));
1430 intel_set_memory_cxsr(dev_priv, true);
1433 static void i965_update_wm(struct drm_crtc *unused_crtc)
1435 struct drm_device *dev = unused_crtc->dev;
1436 struct drm_i915_private *dev_priv = dev->dev_private;
1437 struct drm_crtc *crtc;
1442 /* Calc sr entries for one plane configs */
1443 crtc = single_enabled_crtc(dev);
1445 /* self-refresh has much higher latency */
1446 static const int sr_latency_ns = 12000;
1447 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1448 int clock = adjusted_mode->crtc_clock;
1449 int htotal = adjusted_mode->crtc_htotal;
1450 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1451 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1452 unsigned long line_time_us;
1455 line_time_us = max(htotal * 1000 / clock, 1);
1457 /* Use ns/us then divide to preserve precision */
1458 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1460 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1461 srwm = I965_FIFO_SIZE - entries;
1465 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1468 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1469 cpp * crtc->cursor->state->crtc_w;
1470 entries = DIV_ROUND_UP(entries,
1471 i965_cursor_wm_info.cacheline_size);
1472 cursor_sr = i965_cursor_wm_info.fifo_size -
1473 (entries + i965_cursor_wm_info.guard_size);
1475 if (cursor_sr > i965_cursor_wm_info.max_wm)
1476 cursor_sr = i965_cursor_wm_info.max_wm;
1478 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1479 "cursor %d\n", srwm, cursor_sr);
1481 cxsr_enabled = true;
1483 cxsr_enabled = false;
1484 /* Turn off self refresh if both pipes are enabled */
1485 intel_set_memory_cxsr(dev_priv, false);
1488 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1491 /* 965 has limitations... */
1492 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1496 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1497 FW_WM(8, PLANEC_OLD));
1498 /* update cursor SR watermark */
1499 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1502 intel_set_memory_cxsr(dev_priv, true);
1507 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1509 struct drm_device *dev = unused_crtc->dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511 const struct intel_watermark_params *wm_info;
1516 int planea_wm, planeb_wm;
1517 struct drm_crtc *crtc, *enabled = NULL;
1520 wm_info = &i945_wm_info;
1521 else if (!IS_GEN2(dev))
1522 wm_info = &i915_wm_info;
1524 wm_info = &i830_a_wm_info;
1526 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1527 crtc = intel_get_crtc_for_plane(dev, 0);
1528 if (intel_crtc_active(crtc)) {
1529 const struct drm_display_mode *adjusted_mode;
1530 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1534 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1535 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1536 wm_info, fifo_size, cpp,
1537 pessimal_latency_ns);
1540 planea_wm = fifo_size - wm_info->guard_size;
1541 if (planea_wm > (long)wm_info->max_wm)
1542 planea_wm = wm_info->max_wm;
1546 wm_info = &i830_bc_wm_info;
1548 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1549 crtc = intel_get_crtc_for_plane(dev, 1);
1550 if (intel_crtc_active(crtc)) {
1551 const struct drm_display_mode *adjusted_mode;
1552 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1556 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1557 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1558 wm_info, fifo_size, cpp,
1559 pessimal_latency_ns);
1560 if (enabled == NULL)
1565 planeb_wm = fifo_size - wm_info->guard_size;
1566 if (planeb_wm > (long)wm_info->max_wm)
1567 planeb_wm = wm_info->max_wm;
1570 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1572 if (IS_I915GM(dev) && enabled) {
1573 struct drm_i915_gem_object *obj;
1575 obj = intel_fb_obj(enabled->primary->state->fb);
1577 /* self-refresh seems busted with untiled */
1578 if (obj->tiling_mode == I915_TILING_NONE)
1583 * Overlay gets an aggressive default since video jitter is bad.
1587 /* Play safe and disable self-refresh before adjusting watermarks. */
1588 intel_set_memory_cxsr(dev_priv, false);
1590 /* Calc sr entries for one plane configs */
1591 if (HAS_FW_BLC(dev) && enabled) {
1592 /* self-refresh has much higher latency */
1593 static const int sr_latency_ns = 6000;
1594 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1595 int clock = adjusted_mode->crtc_clock;
1596 int htotal = adjusted_mode->crtc_htotal;
1597 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1598 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
1599 unsigned long line_time_us;
1602 line_time_us = max(htotal * 1000 / clock, 1);
1604 /* Use ns/us then divide to preserve precision */
1605 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1607 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1608 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1609 srwm = wm_info->fifo_size - entries;
1613 if (IS_I945G(dev) || IS_I945GM(dev))
1614 I915_WRITE(FW_BLC_SELF,
1615 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1616 else if (IS_I915GM(dev))
1617 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1620 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1621 planea_wm, planeb_wm, cwm, srwm);
1623 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1624 fwater_hi = (cwm & 0x1f);
1626 /* Set request length to 8 cachelines per fetch */
1627 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1628 fwater_hi = fwater_hi | (1 << 8);
1630 I915_WRITE(FW_BLC, fwater_lo);
1631 I915_WRITE(FW_BLC2, fwater_hi);
1634 intel_set_memory_cxsr(dev_priv, true);
1637 static void i845_update_wm(struct drm_crtc *unused_crtc)
1639 struct drm_device *dev = unused_crtc->dev;
1640 struct drm_i915_private *dev_priv = dev->dev_private;
1641 struct drm_crtc *crtc;
1642 const struct drm_display_mode *adjusted_mode;
1646 crtc = single_enabled_crtc(dev);
1650 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1651 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1653 dev_priv->display.get_fifo_size(dev, 0),
1654 4, pessimal_latency_ns);
1655 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1656 fwater_lo |= (3<<8) | planea_wm;
1658 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1660 I915_WRITE(FW_BLC, fwater_lo);
1663 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1665 uint32_t pixel_rate;
1667 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1669 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1670 * adjust the pixel_rate here. */
1672 if (pipe_config->pch_pfit.enabled) {
1673 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1674 uint32_t pfit_size = pipe_config->pch_pfit.size;
1676 pipe_w = pipe_config->pipe_src_w;
1677 pipe_h = pipe_config->pipe_src_h;
1679 pfit_w = (pfit_size >> 16) & 0xFFFF;
1680 pfit_h = pfit_size & 0xFFFF;
1681 if (pipe_w < pfit_w)
1683 if (pipe_h < pfit_h)
1686 if (WARN_ON(!pfit_w || !pfit_h))
1689 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1696 /* latency must be in 0.1us units. */
1697 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1701 if (WARN(latency == 0, "Latency value missing\n"))
1704 ret = (uint64_t) pixel_rate * cpp * latency;
1705 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1710 /* latency must be in 0.1us units. */
1711 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1712 uint32_t horiz_pixels, uint8_t cpp,
1717 if (WARN(latency == 0, "Latency value missing\n"))
1719 if (WARN_ON(!pipe_htotal))
1722 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1723 ret = (ret + 1) * horiz_pixels * cpp;
1724 ret = DIV_ROUND_UP(ret, 64) + 2;
1728 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1732 * Neither of these should be possible since this function shouldn't be
1733 * called if the CRTC is off or the plane is invisible. But let's be
1734 * extra paranoid to avoid a potential divide-by-zero if we screw up
1735 * elsewhere in the driver.
1739 if (WARN_ON(!horiz_pixels))
1742 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1745 struct ilk_wm_maximums {
1753 * For both WM_PIPE and WM_LP.
1754 * mem_value must be in 0.1us units.
1756 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1757 const struct intel_plane_state *pstate,
1761 int cpp = pstate->base.fb ?
1762 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1763 uint32_t method1, method2;
1765 if (!cstate->base.active || !pstate->visible)
1768 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1773 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1774 cstate->base.adjusted_mode.crtc_htotal,
1775 drm_rect_width(&pstate->dst),
1778 return min(method1, method2);
1782 * For both WM_PIPE and WM_LP.
1783 * mem_value must be in 0.1us units.
1785 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1786 const struct intel_plane_state *pstate,
1789 int cpp = pstate->base.fb ?
1790 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1791 uint32_t method1, method2;
1793 if (!cstate->base.active || !pstate->visible)
1796 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1797 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1798 cstate->base.adjusted_mode.crtc_htotal,
1799 drm_rect_width(&pstate->dst),
1801 return min(method1, method2);
1805 * For both WM_PIPE and WM_LP.
1806 * mem_value must be in 0.1us units.
1808 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1809 const struct intel_plane_state *pstate,
1813 * We treat the cursor plane as always-on for the purposes of watermark
1814 * calculation. Until we have two-stage watermark programming merged,
1815 * this is necessary to avoid flickering.
1818 int width = pstate->visible ? pstate->base.crtc_w : 64;
1820 if (!cstate->base.active)
1823 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1824 cstate->base.adjusted_mode.crtc_htotal,
1825 width, cpp, mem_value);
1828 /* Only for WM_LP. */
1829 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1830 const struct intel_plane_state *pstate,
1833 int cpp = pstate->base.fb ?
1834 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1836 if (!cstate->base.active || !pstate->visible)
1839 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
1842 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1844 if (INTEL_INFO(dev)->gen >= 8)
1846 else if (INTEL_INFO(dev)->gen >= 7)
1852 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1853 int level, bool is_sprite)
1855 if (INTEL_INFO(dev)->gen >= 8)
1856 /* BDW primary/sprite plane watermarks */
1857 return level == 0 ? 255 : 2047;
1858 else if (INTEL_INFO(dev)->gen >= 7)
1859 /* IVB/HSW primary/sprite plane watermarks */
1860 return level == 0 ? 127 : 1023;
1861 else if (!is_sprite)
1862 /* ILK/SNB primary plane watermarks */
1863 return level == 0 ? 127 : 511;
1865 /* ILK/SNB sprite plane watermarks */
1866 return level == 0 ? 63 : 255;
1869 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1872 if (INTEL_INFO(dev)->gen >= 7)
1873 return level == 0 ? 63 : 255;
1875 return level == 0 ? 31 : 63;
1878 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1880 if (INTEL_INFO(dev)->gen >= 8)
1886 /* Calculate the maximum primary/sprite plane watermark */
1887 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1889 const struct intel_wm_config *config,
1890 enum intel_ddb_partitioning ddb_partitioning,
1893 unsigned int fifo_size = ilk_display_fifo_size(dev);
1895 /* if sprites aren't enabled, sprites get nothing */
1896 if (is_sprite && !config->sprites_enabled)
1899 /* HSW allows LP1+ watermarks even with multiple pipes */
1900 if (level == 0 || config->num_pipes_active > 1) {
1901 fifo_size /= INTEL_INFO(dev)->num_pipes;
1904 * For some reason the non self refresh
1905 * FIFO size is only half of the self
1906 * refresh FIFO size on ILK/SNB.
1908 if (INTEL_INFO(dev)->gen <= 6)
1912 if (config->sprites_enabled) {
1913 /* level 0 is always calculated with 1:1 split */
1914 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1923 /* clamp to max that the registers can hold */
1924 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1927 /* Calculate the maximum cursor plane watermark */
1928 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1930 const struct intel_wm_config *config)
1932 /* HSW LP1+ watermarks w/ multiple pipes */
1933 if (level > 0 && config->num_pipes_active > 1)
1936 /* otherwise just report max that registers can hold */
1937 return ilk_cursor_wm_reg_max(dev, level);
1940 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1942 const struct intel_wm_config *config,
1943 enum intel_ddb_partitioning ddb_partitioning,
1944 struct ilk_wm_maximums *max)
1946 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1947 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1948 max->cur = ilk_cursor_wm_max(dev, level, config);
1949 max->fbc = ilk_fbc_wm_reg_max(dev);
1952 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1954 struct ilk_wm_maximums *max)
1956 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1957 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1958 max->cur = ilk_cursor_wm_reg_max(dev, level);
1959 max->fbc = ilk_fbc_wm_reg_max(dev);
1962 static bool ilk_validate_wm_level(int level,
1963 const struct ilk_wm_maximums *max,
1964 struct intel_wm_level *result)
1968 /* already determined to be invalid? */
1969 if (!result->enable)
1972 result->enable = result->pri_val <= max->pri &&
1973 result->spr_val <= max->spr &&
1974 result->cur_val <= max->cur;
1976 ret = result->enable;
1979 * HACK until we can pre-compute everything,
1980 * and thus fail gracefully if LP0 watermarks
1983 if (level == 0 && !result->enable) {
1984 if (result->pri_val > max->pri)
1985 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1986 level, result->pri_val, max->pri);
1987 if (result->spr_val > max->spr)
1988 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1989 level, result->spr_val, max->spr);
1990 if (result->cur_val > max->cur)
1991 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1992 level, result->cur_val, max->cur);
1994 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1995 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1996 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1997 result->enable = true;
2003 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2004 const struct intel_crtc *intel_crtc,
2006 struct intel_crtc_state *cstate,
2007 struct intel_plane_state *pristate,
2008 struct intel_plane_state *sprstate,
2009 struct intel_plane_state *curstate,
2010 struct intel_wm_level *result)
2012 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2013 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2014 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2016 /* WM1+ latency values stored in 0.5us units */
2024 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2025 pri_latency, level);
2026 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2030 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2033 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2035 result->enable = true;
2039 hsw_compute_linetime_wm(struct drm_device *dev,
2040 struct intel_crtc_state *cstate)
2042 struct drm_i915_private *dev_priv = dev->dev_private;
2043 const struct drm_display_mode *adjusted_mode =
2044 &cstate->base.adjusted_mode;
2045 u32 linetime, ips_linetime;
2047 if (!cstate->base.active)
2049 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2051 if (WARN_ON(dev_priv->cdclk_freq == 0))
2054 /* The WM are computed with base on how long it takes to fill a single
2055 * row at the given clock rate, multiplied by 8.
2057 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2058 adjusted_mode->crtc_clock);
2059 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2060 dev_priv->cdclk_freq);
2062 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2063 PIPE_WM_LINETIME_TIME(linetime);
2066 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2068 struct drm_i915_private *dev_priv = dev->dev_private;
2073 int level, max_level = ilk_wm_max_level(dev);
2075 /* read the first set of memory latencies[0:3] */
2076 val = 0; /* data0 to be programmed to 0 for first set */
2077 mutex_lock(&dev_priv->rps.hw_lock);
2078 ret = sandybridge_pcode_read(dev_priv,
2079 GEN9_PCODE_READ_MEM_LATENCY,
2081 mutex_unlock(&dev_priv->rps.hw_lock);
2084 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2088 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2089 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2090 GEN9_MEM_LATENCY_LEVEL_MASK;
2091 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2092 GEN9_MEM_LATENCY_LEVEL_MASK;
2093 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2094 GEN9_MEM_LATENCY_LEVEL_MASK;
2096 /* read the second set of memory latencies[4:7] */
2097 val = 1; /* data0 to be programmed to 1 for second set */
2098 mutex_lock(&dev_priv->rps.hw_lock);
2099 ret = sandybridge_pcode_read(dev_priv,
2100 GEN9_PCODE_READ_MEM_LATENCY,
2102 mutex_unlock(&dev_priv->rps.hw_lock);
2104 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2108 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2109 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2110 GEN9_MEM_LATENCY_LEVEL_MASK;
2111 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2112 GEN9_MEM_LATENCY_LEVEL_MASK;
2113 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2114 GEN9_MEM_LATENCY_LEVEL_MASK;
2117 * WaWmMemoryReadLatency:skl
2119 * punit doesn't take into account the read latency so we need
2120 * to add 2us to the various latency levels we retrieve from
2122 * - W0 is a bit special in that it's the only level that
2123 * can't be disabled if we want to have display working, so
2124 * we always add 2us there.
2125 * - For levels >=1, punit returns 0us latency when they are
2126 * disabled, so we respect that and don't add 2us then
2128 * Additionally, if a level n (n > 1) has a 0us latency, all
2129 * levels m (m >= n) need to be disabled. We make sure to
2130 * sanitize the values out of the punit to satisfy this
2134 for (level = 1; level <= max_level; level++)
2138 for (i = level + 1; i <= max_level; i++)
2143 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2144 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2146 wm[0] = (sskpd >> 56) & 0xFF;
2148 wm[0] = sskpd & 0xF;
2149 wm[1] = (sskpd >> 4) & 0xFF;
2150 wm[2] = (sskpd >> 12) & 0xFF;
2151 wm[3] = (sskpd >> 20) & 0x1FF;
2152 wm[4] = (sskpd >> 32) & 0x1FF;
2153 } else if (INTEL_INFO(dev)->gen >= 6) {
2154 uint32_t sskpd = I915_READ(MCH_SSKPD);
2156 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2157 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2158 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2159 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2160 } else if (INTEL_INFO(dev)->gen >= 5) {
2161 uint32_t mltr = I915_READ(MLTR_ILK);
2163 /* ILK primary LP0 latency is 700 ns */
2165 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2166 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2170 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2172 /* ILK sprite LP0 latency is 1300 ns */
2173 if (INTEL_INFO(dev)->gen == 5)
2177 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2179 /* ILK cursor LP0 latency is 1300 ns */
2180 if (INTEL_INFO(dev)->gen == 5)
2183 /* WaDoubleCursorLP3Latency:ivb */
2184 if (IS_IVYBRIDGE(dev))
2188 int ilk_wm_max_level(const struct drm_device *dev)
2190 /* how many WM levels are we expecting */
2191 if (INTEL_INFO(dev)->gen >= 9)
2193 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2195 else if (INTEL_INFO(dev)->gen >= 6)
2201 static void intel_print_wm_latency(struct drm_device *dev,
2203 const uint16_t wm[8])
2205 int level, max_level = ilk_wm_max_level(dev);
2207 for (level = 0; level <= max_level; level++) {
2208 unsigned int latency = wm[level];
2211 DRM_ERROR("%s WM%d latency not provided\n",
2217 * - latencies are in us on gen9.
2218 * - before then, WM1+ latency values are in 0.5us units
2225 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2226 name, level, wm[level],
2227 latency / 10, latency % 10);
2231 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2232 uint16_t wm[5], uint16_t min)
2234 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2239 wm[0] = max(wm[0], min);
2240 for (level = 1; level <= max_level; level++)
2241 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2246 static void snb_wm_latency_quirk(struct drm_device *dev)
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2252 * The BIOS provided WM memory latency values are often
2253 * inadequate for high resolution displays. Adjust them.
2255 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2256 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2257 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2262 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2263 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2264 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2265 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2268 static void ilk_setup_wm_latency(struct drm_device *dev)
2270 struct drm_i915_private *dev_priv = dev->dev_private;
2272 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2274 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2275 sizeof(dev_priv->wm.pri_latency));
2276 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2277 sizeof(dev_priv->wm.pri_latency));
2279 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2280 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2282 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2283 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2284 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2287 snb_wm_latency_quirk(dev);
2290 static void skl_setup_wm_latency(struct drm_device *dev)
2292 struct drm_i915_private *dev_priv = dev->dev_private;
2294 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2295 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2298 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2299 struct intel_pipe_wm *pipe_wm)
2301 /* LP0 watermark maximums depend on this pipe alone */
2302 const struct intel_wm_config config = {
2303 .num_pipes_active = 1,
2304 .sprites_enabled = pipe_wm->sprites_enabled,
2305 .sprites_scaled = pipe_wm->sprites_scaled,
2307 struct ilk_wm_maximums max;
2309 /* LP0 watermarks always use 1/2 DDB partitioning */
2310 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2312 /* At least LP0 must be valid */
2313 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2314 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2321 /* Compute new watermarks for the pipe */
2322 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2324 struct drm_atomic_state *state = cstate->base.state;
2325 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2326 struct intel_pipe_wm *pipe_wm;
2327 struct drm_device *dev = state->dev;
2328 const struct drm_i915_private *dev_priv = dev->dev_private;
2329 struct intel_plane *intel_plane;
2330 struct intel_plane_state *pristate = NULL;
2331 struct intel_plane_state *sprstate = NULL;
2332 struct intel_plane_state *curstate = NULL;
2333 int level, max_level = ilk_wm_max_level(dev), usable_level;
2334 struct ilk_wm_maximums max;
2336 pipe_wm = &cstate->wm.optimal.ilk;
2338 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2339 struct intel_plane_state *ps;
2341 ps = intel_atomic_get_existing_plane_state(state,
2346 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2348 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2350 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2354 pipe_wm->pipe_enabled = cstate->base.active;
2356 pipe_wm->sprites_enabled = sprstate->visible;
2357 pipe_wm->sprites_scaled = sprstate->visible &&
2358 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2359 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2362 usable_level = max_level;
2364 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2365 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2368 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2369 if (pipe_wm->sprites_scaled)
2372 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2373 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2375 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2376 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2378 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2379 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
2381 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2384 ilk_compute_wm_reg_maximums(dev, 1, &max);
2386 for (level = 1; level <= max_level; level++) {
2387 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2389 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2390 pristate, sprstate, curstate, wm);
2393 * Disable any watermark level that exceeds the
2394 * register maximums since such watermarks are
2397 if (level > usable_level)
2400 if (ilk_validate_wm_level(level, &max, wm))
2401 pipe_wm->wm[level] = *wm;
2403 usable_level = level;
2410 * Build a set of 'intermediate' watermark values that satisfy both the old
2411 * state and the new state. These can be programmed to the hardware
2414 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2415 struct intel_crtc *intel_crtc,
2416 struct intel_crtc_state *newstate)
2418 struct intel_pipe_wm *a = &newstate->wm.intermediate;
2419 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2420 int level, max_level = ilk_wm_max_level(dev);
2423 * Start with the final, target watermarks, then combine with the
2424 * currently active watermarks to get values that are safe both before
2425 * and after the vblank.
2427 *a = newstate->wm.optimal.ilk;
2428 a->pipe_enabled |= b->pipe_enabled;
2429 a->sprites_enabled |= b->sprites_enabled;
2430 a->sprites_scaled |= b->sprites_scaled;
2432 for (level = 0; level <= max_level; level++) {
2433 struct intel_wm_level *a_wm = &a->wm[level];
2434 const struct intel_wm_level *b_wm = &b->wm[level];
2436 a_wm->enable &= b_wm->enable;
2437 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2438 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2439 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2440 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2444 * We need to make sure that these merged watermark values are
2445 * actually a valid configuration themselves. If they're not,
2446 * there's no safe way to transition from the old state to
2447 * the new state, so we need to fail the atomic transaction.
2449 if (!ilk_validate_pipe_wm(dev, a))
2453 * If our intermediate WM are identical to the final WM, then we can
2454 * omit the post-vblank programming; only update if it's different.
2456 if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
2457 newstate->wm.need_postvbl_update = false;
2463 * Merge the watermarks from all active pipes for a specific level.
2465 static void ilk_merge_wm_level(struct drm_device *dev,
2467 struct intel_wm_level *ret_wm)
2469 const struct intel_crtc *intel_crtc;
2471 ret_wm->enable = true;
2473 for_each_intel_crtc(dev, intel_crtc) {
2474 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2475 const struct intel_wm_level *wm = &active->wm[level];
2477 if (!active->pipe_enabled)
2481 * The watermark values may have been used in the past,
2482 * so we must maintain them in the registers for some
2483 * time even if the level is now disabled.
2486 ret_wm->enable = false;
2488 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2489 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2490 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2491 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2496 * Merge all low power watermarks for all active pipes.
2498 static void ilk_wm_merge(struct drm_device *dev,
2499 const struct intel_wm_config *config,
2500 const struct ilk_wm_maximums *max,
2501 struct intel_pipe_wm *merged)
2503 struct drm_i915_private *dev_priv = dev->dev_private;
2504 int level, max_level = ilk_wm_max_level(dev);
2505 int last_enabled_level = max_level;
2507 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2508 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2509 config->num_pipes_active > 1)
2510 last_enabled_level = 0;
2512 /* ILK: FBC WM must be disabled always */
2513 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2515 /* merge each WM1+ level */
2516 for (level = 1; level <= max_level; level++) {
2517 struct intel_wm_level *wm = &merged->wm[level];
2519 ilk_merge_wm_level(dev, level, wm);
2521 if (level > last_enabled_level)
2523 else if (!ilk_validate_wm_level(level, max, wm))
2524 /* make sure all following levels get disabled */
2525 last_enabled_level = level - 1;
2528 * The spec says it is preferred to disable
2529 * FBC WMs instead of disabling a WM level.
2531 if (wm->fbc_val > max->fbc) {
2533 merged->fbc_wm_enabled = false;
2538 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2540 * FIXME this is racy. FBC might get enabled later.
2541 * What we should check here is whether FBC can be
2542 * enabled sometime later.
2544 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2545 intel_fbc_is_active(dev_priv)) {
2546 for (level = 2; level <= max_level; level++) {
2547 struct intel_wm_level *wm = &merged->wm[level];
2554 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2556 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2557 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2560 /* The value we need to program into the WM_LPx latency field */
2561 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2563 struct drm_i915_private *dev_priv = dev->dev_private;
2565 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2568 return dev_priv->wm.pri_latency[level];
2571 static void ilk_compute_wm_results(struct drm_device *dev,
2572 const struct intel_pipe_wm *merged,
2573 enum intel_ddb_partitioning partitioning,
2574 struct ilk_wm_values *results)
2576 struct intel_crtc *intel_crtc;
2579 results->enable_fbc_wm = merged->fbc_wm_enabled;
2580 results->partitioning = partitioning;
2582 /* LP1+ register values */
2583 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2584 const struct intel_wm_level *r;
2586 level = ilk_wm_lp_to_level(wm_lp, merged);
2588 r = &merged->wm[level];
2591 * Maintain the watermark values even if the level is
2592 * disabled. Doing otherwise could cause underruns.
2594 results->wm_lp[wm_lp - 1] =
2595 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2596 (r->pri_val << WM1_LP_SR_SHIFT) |
2600 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2602 if (INTEL_INFO(dev)->gen >= 8)
2603 results->wm_lp[wm_lp - 1] |=
2604 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2606 results->wm_lp[wm_lp - 1] |=
2607 r->fbc_val << WM1_LP_FBC_SHIFT;
2610 * Always set WM1S_LP_EN when spr_val != 0, even if the
2611 * level is disabled. Doing otherwise could cause underruns.
2613 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2614 WARN_ON(wm_lp != 1);
2615 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2617 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2620 /* LP0 register values */
2621 for_each_intel_crtc(dev, intel_crtc) {
2622 enum pipe pipe = intel_crtc->pipe;
2623 const struct intel_wm_level *r =
2624 &intel_crtc->wm.active.ilk.wm[0];
2626 if (WARN_ON(!r->enable))
2629 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2631 results->wm_pipe[pipe] =
2632 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2633 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2638 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2639 * case both are at the same level. Prefer r1 in case they're the same. */
2640 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2641 struct intel_pipe_wm *r1,
2642 struct intel_pipe_wm *r2)
2644 int level, max_level = ilk_wm_max_level(dev);
2645 int level1 = 0, level2 = 0;
2647 for (level = 1; level <= max_level; level++) {
2648 if (r1->wm[level].enable)
2650 if (r2->wm[level].enable)
2654 if (level1 == level2) {
2655 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2659 } else if (level1 > level2) {
2666 /* dirty bits used to track which watermarks need changes */
2667 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2668 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2669 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2670 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2671 #define WM_DIRTY_FBC (1 << 24)
2672 #define WM_DIRTY_DDB (1 << 25)
2674 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2675 const struct ilk_wm_values *old,
2676 const struct ilk_wm_values *new)
2678 unsigned int dirty = 0;
2682 for_each_pipe(dev_priv, pipe) {
2683 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2684 dirty |= WM_DIRTY_LINETIME(pipe);
2685 /* Must disable LP1+ watermarks too */
2686 dirty |= WM_DIRTY_LP_ALL;
2689 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2690 dirty |= WM_DIRTY_PIPE(pipe);
2691 /* Must disable LP1+ watermarks too */
2692 dirty |= WM_DIRTY_LP_ALL;
2696 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2697 dirty |= WM_DIRTY_FBC;
2698 /* Must disable LP1+ watermarks too */
2699 dirty |= WM_DIRTY_LP_ALL;
2702 if (old->partitioning != new->partitioning) {
2703 dirty |= WM_DIRTY_DDB;
2704 /* Must disable LP1+ watermarks too */
2705 dirty |= WM_DIRTY_LP_ALL;
2708 /* LP1+ watermarks already deemed dirty, no need to continue */
2709 if (dirty & WM_DIRTY_LP_ALL)
2712 /* Find the lowest numbered LP1+ watermark in need of an update... */
2713 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2714 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2715 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2719 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2720 for (; wm_lp <= 3; wm_lp++)
2721 dirty |= WM_DIRTY_LP(wm_lp);
2726 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2729 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2730 bool changed = false;
2732 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2733 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2734 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2737 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2738 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2739 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2742 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2743 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2744 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2749 * Don't touch WM1S_LP_EN here.
2750 * Doing so could cause underruns.
2757 * The spec says we shouldn't write when we don't need, because every write
2758 * causes WMs to be re-evaluated, expending some power.
2760 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2761 struct ilk_wm_values *results)
2763 struct drm_device *dev = dev_priv->dev;
2764 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2768 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2772 _ilk_disable_lp_wm(dev_priv, dirty);
2774 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2775 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2776 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2777 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2778 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2779 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2781 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2782 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2783 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2784 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2785 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2786 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2788 if (dirty & WM_DIRTY_DDB) {
2789 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2790 val = I915_READ(WM_MISC);
2791 if (results->partitioning == INTEL_DDB_PART_1_2)
2792 val &= ~WM_MISC_DATA_PARTITION_5_6;
2794 val |= WM_MISC_DATA_PARTITION_5_6;
2795 I915_WRITE(WM_MISC, val);
2797 val = I915_READ(DISP_ARB_CTL2);
2798 if (results->partitioning == INTEL_DDB_PART_1_2)
2799 val &= ~DISP_DATA_PARTITION_5_6;
2801 val |= DISP_DATA_PARTITION_5_6;
2802 I915_WRITE(DISP_ARB_CTL2, val);
2806 if (dirty & WM_DIRTY_FBC) {
2807 val = I915_READ(DISP_ARB_CTL);
2808 if (results->enable_fbc_wm)
2809 val &= ~DISP_FBC_WM_DIS;
2811 val |= DISP_FBC_WM_DIS;
2812 I915_WRITE(DISP_ARB_CTL, val);
2815 if (dirty & WM_DIRTY_LP(1) &&
2816 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2817 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2819 if (INTEL_INFO(dev)->gen >= 7) {
2820 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2821 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2822 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2823 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2826 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2827 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2828 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2829 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2830 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2831 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2833 dev_priv->wm.hw = *results;
2836 bool ilk_disable_lp_wm(struct drm_device *dev)
2838 struct drm_i915_private *dev_priv = dev->dev_private;
2840 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2844 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2845 * different active planes.
2848 #define SKL_DDB_SIZE 896 /* in blocks */
2849 #define BXT_DDB_SIZE 512
2852 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2853 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2854 * other universal planes are in indices 1..n. Note that this may leave unused
2855 * indices between the top "sprite" plane and the cursor.
2858 skl_wm_plane_id(const struct intel_plane *plane)
2860 switch (plane->base.type) {
2861 case DRM_PLANE_TYPE_PRIMARY:
2863 case DRM_PLANE_TYPE_CURSOR:
2864 return PLANE_CURSOR;
2865 case DRM_PLANE_TYPE_OVERLAY:
2866 return plane->plane + 1;
2868 MISSING_CASE(plane->base.type);
2869 return plane->plane;
2874 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2875 const struct intel_crtc_state *cstate,
2876 const struct intel_wm_config *config,
2877 struct skl_ddb_entry *alloc /* out */)
2879 struct drm_crtc *for_crtc = cstate->base.crtc;
2880 struct drm_crtc *crtc;
2881 unsigned int pipe_size, ddb_size;
2882 int nth_active_pipe;
2884 if (!cstate->base.active) {
2890 if (IS_BROXTON(dev))
2891 ddb_size = BXT_DDB_SIZE;
2893 ddb_size = SKL_DDB_SIZE;
2895 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2897 nth_active_pipe = 0;
2898 for_each_crtc(dev, crtc) {
2899 if (!to_intel_crtc(crtc)->active)
2902 if (crtc == for_crtc)
2908 pipe_size = ddb_size / config->num_pipes_active;
2909 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2910 alloc->end = alloc->start + pipe_size;
2913 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2915 if (config->num_pipes_active == 1)
2921 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2923 entry->start = reg & 0x3ff;
2924 entry->end = (reg >> 16) & 0x3ff;
2929 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2930 struct skl_ddb_allocation *ddb /* out */)
2936 memset(ddb, 0, sizeof(*ddb));
2938 for_each_pipe(dev_priv, pipe) {
2939 enum intel_display_power_domain power_domain;
2941 power_domain = POWER_DOMAIN_PIPE(pipe);
2942 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2945 for_each_plane(dev_priv, pipe, plane) {
2946 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2947 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2951 val = I915_READ(CUR_BUF_CFG(pipe));
2952 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2955 intel_display_power_put(dev_priv, power_domain);
2960 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2961 const struct drm_plane_state *pstate,
2964 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2965 struct drm_framebuffer *fb = pstate->fb;
2966 uint32_t width = 0, height = 0;
2968 width = drm_rect_width(&intel_pstate->src) >> 16;
2969 height = drm_rect_height(&intel_pstate->src) >> 16;
2971 if (intel_rotation_90_or_270(pstate->rotation))
2972 swap(width, height);
2974 /* for planar format */
2975 if (fb->pixel_format == DRM_FORMAT_NV12) {
2976 if (y) /* y-plane data rate */
2977 return width * height *
2978 drm_format_plane_cpp(fb->pixel_format, 0);
2979 else /* uv-plane data rate */
2980 return (width / 2) * (height / 2) *
2981 drm_format_plane_cpp(fb->pixel_format, 1);
2984 /* for packed formats */
2985 return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
2989 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2990 * a 8192x4096@32bpp framebuffer:
2991 * 3 * 4096 * 8192 * 4 < 2^32
2994 skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
2996 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2997 struct drm_device *dev = intel_crtc->base.dev;
2998 const struct intel_plane *intel_plane;
2999 unsigned int total_data_rate = 0;
3001 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3002 const struct drm_plane_state *pstate = intel_plane->base.state;
3004 if (pstate->fb == NULL)
3007 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
3011 total_data_rate += skl_plane_relative_data_rate(cstate,
3015 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
3017 total_data_rate += skl_plane_relative_data_rate(cstate,
3022 return total_data_rate;
3026 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3027 struct skl_ddb_allocation *ddb /* out */)
3029 struct drm_crtc *crtc = cstate->base.crtc;
3030 struct drm_device *dev = crtc->dev;
3031 struct drm_i915_private *dev_priv = to_i915(dev);
3032 struct intel_wm_config *config = &dev_priv->wm.config;
3033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3034 struct intel_plane *intel_plane;
3035 enum pipe pipe = intel_crtc->pipe;
3036 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3037 uint16_t alloc_size, start, cursor_blocks;
3038 uint16_t minimum[I915_MAX_PLANES];
3039 uint16_t y_minimum[I915_MAX_PLANES];
3040 unsigned int total_data_rate;
3042 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
3043 alloc_size = skl_ddb_entry_size(alloc);
3044 if (alloc_size == 0) {
3045 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3046 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
3047 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3051 cursor_blocks = skl_cursor_allocation(config);
3052 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3053 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3055 alloc_size -= cursor_blocks;
3056 alloc->end -= cursor_blocks;
3058 /* 1. Allocate the mininum required blocks for each active plane */
3059 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3060 struct drm_plane *plane = &intel_plane->base;
3061 struct drm_framebuffer *fb = plane->state->fb;
3062 int id = skl_wm_plane_id(intel_plane);
3064 if (!to_intel_plane_state(plane->state)->visible)
3067 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3071 alloc_size -= minimum[id];
3072 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
3073 alloc_size -= y_minimum[id];
3077 * 2. Distribute the remaining space in proportion to the amount of
3078 * data each plane needs to fetch from memory.
3080 * FIXME: we may not allocate every single block here.
3082 total_data_rate = skl_get_total_relative_data_rate(cstate);
3084 start = alloc->start;
3085 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3086 struct drm_plane *plane = &intel_plane->base;
3087 struct drm_plane_state *pstate = intel_plane->base.state;
3088 unsigned int data_rate, y_data_rate;
3089 uint16_t plane_blocks, y_plane_blocks = 0;
3090 int id = skl_wm_plane_id(intel_plane);
3092 if (!to_intel_plane_state(pstate)->visible)
3094 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3097 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3100 * allocation for (packed formats) or (uv-plane part of planar format):
3101 * promote the expression to 64 bits to avoid overflowing, the
3102 * result is < available as data_rate / total_data_rate < 1
3104 plane_blocks = minimum[id];
3105 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3108 ddb->plane[pipe][id].start = start;
3109 ddb->plane[pipe][id].end = start + plane_blocks;
3111 start += plane_blocks;
3114 * allocation for y_plane part of planar format:
3116 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
3117 y_data_rate = skl_plane_relative_data_rate(cstate,
3120 y_plane_blocks = y_minimum[id];
3121 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3124 ddb->y_plane[pipe][id].start = start;
3125 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3127 start += y_plane_blocks;
3134 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3136 /* TODO: Take into account the scalers once we support them */
3137 return config->base.adjusted_mode.crtc_clock;
3141 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3142 * for the read latency) and cpp should always be <= 8, so that
3143 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3144 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3146 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3148 uint32_t wm_intermediate_val, ret;
3153 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3154 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3159 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3160 uint32_t horiz_pixels, uint8_t cpp,
3161 uint64_t tiling, uint32_t latency)
3164 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3165 uint32_t wm_intermediate_val;
3170 plane_bytes_per_line = horiz_pixels * cpp;
3172 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3173 tiling == I915_FORMAT_MOD_Yf_TILED) {
3174 plane_bytes_per_line *= 4;
3175 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3176 plane_blocks_per_line /= 4;
3178 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3181 wm_intermediate_val = latency * pixel_rate;
3182 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3183 plane_blocks_per_line;
3188 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3189 const struct intel_crtc *intel_crtc)
3191 struct drm_device *dev = intel_crtc->base.dev;
3192 struct drm_i915_private *dev_priv = dev->dev_private;
3193 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3196 * If ddb allocation of pipes changed, it may require recalculation of
3199 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
3205 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3206 struct intel_crtc_state *cstate,
3207 struct intel_plane *intel_plane,
3208 uint16_t ddb_allocation,
3210 uint16_t *out_blocks, /* out */
3211 uint8_t *out_lines /* out */)
3213 struct drm_plane *plane = &intel_plane->base;
3214 struct drm_framebuffer *fb = plane->state->fb;
3215 struct intel_plane_state *intel_pstate =
3216 to_intel_plane_state(plane->state);
3217 uint32_t latency = dev_priv->wm.skl_latency[level];
3218 uint32_t method1, method2;
3219 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3220 uint32_t res_blocks, res_lines;
3221 uint32_t selected_result;
3223 uint32_t width = 0, height = 0;
3225 if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
3228 width = drm_rect_width(&intel_pstate->src) >> 16;
3229 height = drm_rect_height(&intel_pstate->src) >> 16;
3231 if (intel_rotation_90_or_270(plane->state->rotation))
3232 swap(width, height);
3234 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3235 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3237 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3238 cstate->base.adjusted_mode.crtc_htotal,
3244 plane_bytes_per_line = width * cpp;
3245 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3247 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3248 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3249 uint32_t min_scanlines = 4;
3250 uint32_t y_tile_minimum;
3251 if (intel_rotation_90_or_270(plane->state->rotation)) {
3252 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3253 drm_format_plane_cpp(fb->pixel_format, 1) :
3254 drm_format_plane_cpp(fb->pixel_format, 0);
3264 WARN(1, "Unsupported pixel depth for rotation");
3267 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3268 selected_result = max(method2, y_tile_minimum);
3270 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3271 selected_result = min(method1, method2);
3273 selected_result = method1;
3276 res_blocks = selected_result + 1;
3277 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3279 if (level >= 1 && level <= 7) {
3280 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3281 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3287 if (res_blocks >= ddb_allocation || res_lines > 31)
3290 *out_blocks = res_blocks;
3291 *out_lines = res_lines;
3296 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3297 struct skl_ddb_allocation *ddb,
3298 struct intel_crtc_state *cstate,
3300 struct skl_wm_level *result)
3302 struct drm_device *dev = dev_priv->dev;
3303 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3304 struct intel_plane *intel_plane;
3305 uint16_t ddb_blocks;
3306 enum pipe pipe = intel_crtc->pipe;
3308 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3309 int i = skl_wm_plane_id(intel_plane);
3311 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3313 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3318 &result->plane_res_b[i],
3319 &result->plane_res_l[i]);
3324 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3326 if (!cstate->base.active)
3329 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3332 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3333 skl_pipe_pixel_rate(cstate));
3336 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3337 struct skl_wm_level *trans_wm /* out */)
3339 struct drm_crtc *crtc = cstate->base.crtc;
3340 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3341 struct intel_plane *intel_plane;
3343 if (!cstate->base.active)
3346 /* Until we know more, just disable transition WMs */
3347 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3348 int i = skl_wm_plane_id(intel_plane);
3350 trans_wm->plane_en[i] = false;
3354 static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
3355 struct skl_ddb_allocation *ddb,
3356 struct skl_pipe_wm *pipe_wm)
3358 struct drm_device *dev = cstate->base.crtc->dev;
3359 const struct drm_i915_private *dev_priv = dev->dev_private;
3360 int level, max_level = ilk_wm_max_level(dev);
3362 for (level = 0; level <= max_level; level++) {
3363 skl_compute_wm_level(dev_priv, ddb, cstate,
3364 level, &pipe_wm->wm[level]);
3366 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3368 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3371 static void skl_compute_wm_results(struct drm_device *dev,
3372 struct skl_pipe_wm *p_wm,
3373 struct skl_wm_values *r,
3374 struct intel_crtc *intel_crtc)
3376 int level, max_level = ilk_wm_max_level(dev);
3377 enum pipe pipe = intel_crtc->pipe;
3381 for (level = 0; level <= max_level; level++) {
3382 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3385 temp |= p_wm->wm[level].plane_res_l[i] <<
3386 PLANE_WM_LINES_SHIFT;
3387 temp |= p_wm->wm[level].plane_res_b[i];
3388 if (p_wm->wm[level].plane_en[i])
3389 temp |= PLANE_WM_EN;
3391 r->plane[pipe][i][level] = temp;
3396 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3397 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3399 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3400 temp |= PLANE_WM_EN;
3402 r->plane[pipe][PLANE_CURSOR][level] = temp;
3406 /* transition WMs */
3407 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3409 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3410 temp |= p_wm->trans_wm.plane_res_b[i];
3411 if (p_wm->trans_wm.plane_en[i])
3412 temp |= PLANE_WM_EN;
3414 r->plane_trans[pipe][i] = temp;
3418 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3419 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3420 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3421 temp |= PLANE_WM_EN;
3423 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3425 r->wm_linetime[pipe] = p_wm->linetime;
3428 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3430 const struct skl_ddb_entry *entry)
3433 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3438 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3439 const struct skl_wm_values *new)
3441 struct drm_device *dev = dev_priv->dev;
3442 struct intel_crtc *crtc;
3444 for_each_intel_crtc(dev, crtc) {
3445 int i, level, max_level = ilk_wm_max_level(dev);
3446 enum pipe pipe = crtc->pipe;
3448 if (!new->dirty[pipe])
3451 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3453 for (level = 0; level <= max_level; level++) {
3454 for (i = 0; i < intel_num_planes(crtc); i++)
3455 I915_WRITE(PLANE_WM(pipe, i, level),
3456 new->plane[pipe][i][level]);
3457 I915_WRITE(CUR_WM(pipe, level),
3458 new->plane[pipe][PLANE_CURSOR][level]);
3460 for (i = 0; i < intel_num_planes(crtc); i++)
3461 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3462 new->plane_trans[pipe][i]);
3463 I915_WRITE(CUR_WM_TRANS(pipe),
3464 new->plane_trans[pipe][PLANE_CURSOR]);
3466 for (i = 0; i < intel_num_planes(crtc); i++) {
3467 skl_ddb_entry_write(dev_priv,
3468 PLANE_BUF_CFG(pipe, i),
3469 &new->ddb.plane[pipe][i]);
3470 skl_ddb_entry_write(dev_priv,
3471 PLANE_NV12_BUF_CFG(pipe, i),
3472 &new->ddb.y_plane[pipe][i]);
3475 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3476 &new->ddb.plane[pipe][PLANE_CURSOR]);
3481 * When setting up a new DDB allocation arrangement, we need to correctly
3482 * sequence the times at which the new allocations for the pipes are taken into
3483 * account or we'll have pipes fetching from space previously allocated to
3486 * Roughly the sequence looks like:
3487 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3488 * overlapping with a previous light-up pipe (another way to put it is:
3489 * pipes with their new allocation strickly included into their old ones).
3490 * 2. re-allocate the other pipes that get their allocation reduced
3491 * 3. allocate the pipes having their allocation increased
3493 * Steps 1. and 2. are here to take care of the following case:
3494 * - Initially DDB looks like this:
3497 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3501 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3505 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3509 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3511 for_each_plane(dev_priv, pipe, plane) {
3512 I915_WRITE(PLANE_SURF(pipe, plane),
3513 I915_READ(PLANE_SURF(pipe, plane)));
3515 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3519 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3520 const struct skl_ddb_allocation *new,
3523 uint16_t old_size, new_size;
3525 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3526 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3528 return old_size != new_size &&
3529 new->pipe[pipe].start >= old->pipe[pipe].start &&
3530 new->pipe[pipe].end <= old->pipe[pipe].end;
3533 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3534 struct skl_wm_values *new_values)
3536 struct drm_device *dev = dev_priv->dev;
3537 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3538 bool reallocated[I915_MAX_PIPES] = {};
3539 struct intel_crtc *crtc;
3542 new_ddb = &new_values->ddb;
3543 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3546 * First pass: flush the pipes with the new allocation contained into
3549 * We'll wait for the vblank on those pipes to ensure we can safely
3550 * re-allocate the freed space without this pipe fetching from it.
3552 for_each_intel_crtc(dev, crtc) {
3558 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3561 skl_wm_flush_pipe(dev_priv, pipe, 1);
3562 intel_wait_for_vblank(dev, pipe);
3564 reallocated[pipe] = true;
3569 * Second pass: flush the pipes that are having their allocation
3570 * reduced, but overlapping with a previous allocation.
3572 * Here as well we need to wait for the vblank to make sure the freed
3573 * space is not used anymore.
3575 for_each_intel_crtc(dev, crtc) {
3581 if (reallocated[pipe])
3584 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3585 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3586 skl_wm_flush_pipe(dev_priv, pipe, 2);
3587 intel_wait_for_vblank(dev, pipe);
3588 reallocated[pipe] = true;
3593 * Third pass: flush the pipes that got more space allocated.
3595 * We don't need to actively wait for the update here, next vblank
3596 * will just get more DDB space with the correct WM values.
3598 for_each_intel_crtc(dev, crtc) {
3605 * At this point, only the pipes more space than before are
3606 * left to re-allocate.
3608 if (reallocated[pipe])
3611 skl_wm_flush_pipe(dev_priv, pipe, 3);
3615 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3616 struct skl_ddb_allocation *ddb, /* out */
3617 struct skl_pipe_wm *pipe_wm /* out */)
3619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3620 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3622 skl_allocate_pipe_ddb(cstate, ddb);
3623 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
3625 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3628 intel_crtc->wm.active.skl = *pipe_wm;
3633 static void skl_update_other_pipe_wm(struct drm_device *dev,
3634 struct drm_crtc *crtc,
3635 struct skl_wm_values *r)
3637 struct intel_crtc *intel_crtc;
3638 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3641 * If the WM update hasn't changed the allocation for this_crtc (the
3642 * crtc we are currently computing the new WM values for), other
3643 * enabled crtcs will keep the same allocation and we don't need to
3644 * recompute anything for them.
3646 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3650 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3651 * other active pipes need new DDB allocation and WM values.
3653 for_each_intel_crtc(dev, intel_crtc) {
3654 struct skl_pipe_wm pipe_wm = {};
3657 if (this_crtc->pipe == intel_crtc->pipe)
3660 if (!intel_crtc->active)
3663 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3667 * If we end up re-computing the other pipe WM values, it's
3668 * because it was really needed, so we expect the WM values to
3671 WARN_ON(!wm_changed);
3673 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3674 r->dirty[intel_crtc->pipe] = true;
3678 static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3680 watermarks->wm_linetime[pipe] = 0;
3681 memset(watermarks->plane[pipe], 0,
3682 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3683 memset(watermarks->plane_trans[pipe],
3684 0, sizeof(uint32_t) * I915_MAX_PLANES);
3685 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
3687 /* Clear ddb entries for pipe */
3688 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3689 memset(&watermarks->ddb.plane[pipe], 0,
3690 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3691 memset(&watermarks->ddb.y_plane[pipe], 0,
3692 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3693 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3694 sizeof(struct skl_ddb_entry));
3698 static void skl_update_wm(struct drm_crtc *crtc)
3700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3701 struct drm_device *dev = crtc->dev;
3702 struct drm_i915_private *dev_priv = dev->dev_private;
3703 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3704 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3705 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
3708 /* Clear all dirty flags */
3709 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3711 skl_clear_wm(results, intel_crtc->pipe);
3713 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3716 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
3717 results->dirty[intel_crtc->pipe] = true;
3719 skl_update_other_pipe_wm(dev, crtc, results);
3720 skl_write_wm_values(dev_priv, results);
3721 skl_flush_wm_values(dev_priv, results);
3723 /* store the new configuration */
3724 dev_priv->wm.skl_hw = *results;
3727 static void ilk_compute_wm_config(struct drm_device *dev,
3728 struct intel_wm_config *config)
3730 struct intel_crtc *crtc;
3732 /* Compute the currently _active_ config */
3733 for_each_intel_crtc(dev, crtc) {
3734 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3736 if (!wm->pipe_enabled)
3739 config->sprites_enabled |= wm->sprites_enabled;
3740 config->sprites_scaled |= wm->sprites_scaled;
3741 config->num_pipes_active++;
3745 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3747 struct drm_device *dev = dev_priv->dev;
3748 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3749 struct ilk_wm_maximums max;
3750 struct intel_wm_config config = {};
3751 struct ilk_wm_values results = {};
3752 enum intel_ddb_partitioning partitioning;
3754 ilk_compute_wm_config(dev, &config);
3756 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3757 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3759 /* 5/6 split only in single pipe config on IVB+ */
3760 if (INTEL_INFO(dev)->gen >= 7 &&
3761 config.num_pipes_active == 1 && config.sprites_enabled) {
3762 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3763 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3765 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3767 best_lp_wm = &lp_wm_1_2;
3770 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3771 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3773 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3775 ilk_write_wm_values(dev_priv, &results);
3778 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3780 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
3781 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3783 mutex_lock(&dev_priv->wm.wm_mutex);
3784 intel_crtc->wm.active.ilk = cstate->wm.intermediate;
3785 ilk_program_watermarks(dev_priv);
3786 mutex_unlock(&dev_priv->wm.wm_mutex);
3789 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3791 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
3792 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3794 mutex_lock(&dev_priv->wm.wm_mutex);
3795 if (cstate->wm.need_postvbl_update) {
3796 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
3797 ilk_program_watermarks(dev_priv);
3799 mutex_unlock(&dev_priv->wm.wm_mutex);
3802 static void skl_pipe_wm_active_state(uint32_t val,
3803 struct skl_pipe_wm *active,
3809 bool is_enabled = (val & PLANE_WM_EN) != 0;
3813 active->wm[level].plane_en[i] = is_enabled;
3814 active->wm[level].plane_res_b[i] =
3815 val & PLANE_WM_BLOCKS_MASK;
3816 active->wm[level].plane_res_l[i] =
3817 (val >> PLANE_WM_LINES_SHIFT) &
3818 PLANE_WM_LINES_MASK;
3820 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3821 active->wm[level].plane_res_b[PLANE_CURSOR] =
3822 val & PLANE_WM_BLOCKS_MASK;
3823 active->wm[level].plane_res_l[PLANE_CURSOR] =
3824 (val >> PLANE_WM_LINES_SHIFT) &
3825 PLANE_WM_LINES_MASK;
3829 active->trans_wm.plane_en[i] = is_enabled;
3830 active->trans_wm.plane_res_b[i] =
3831 val & PLANE_WM_BLOCKS_MASK;
3832 active->trans_wm.plane_res_l[i] =
3833 (val >> PLANE_WM_LINES_SHIFT) &
3834 PLANE_WM_LINES_MASK;
3836 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3837 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3838 val & PLANE_WM_BLOCKS_MASK;
3839 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3840 (val >> PLANE_WM_LINES_SHIFT) &
3841 PLANE_WM_LINES_MASK;
3846 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3848 struct drm_device *dev = crtc->dev;
3849 struct drm_i915_private *dev_priv = dev->dev_private;
3850 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3852 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3853 struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
3854 enum pipe pipe = intel_crtc->pipe;
3855 int level, i, max_level;
3858 max_level = ilk_wm_max_level(dev);
3860 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3862 for (level = 0; level <= max_level; level++) {
3863 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3864 hw->plane[pipe][i][level] =
3865 I915_READ(PLANE_WM(pipe, i, level));
3866 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3869 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3870 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3871 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3873 if (!intel_crtc->active)
3876 hw->dirty[pipe] = true;
3878 active->linetime = hw->wm_linetime[pipe];
3880 for (level = 0; level <= max_level; level++) {
3881 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3882 temp = hw->plane[pipe][i][level];
3883 skl_pipe_wm_active_state(temp, active, false,
3886 temp = hw->plane[pipe][PLANE_CURSOR][level];
3887 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3890 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3891 temp = hw->plane_trans[pipe][i];
3892 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3895 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3896 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3898 intel_crtc->wm.active.skl = *active;
3901 void skl_wm_get_hw_state(struct drm_device *dev)
3903 struct drm_i915_private *dev_priv = dev->dev_private;
3904 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3905 struct drm_crtc *crtc;
3907 skl_ddb_get_hw_state(dev_priv, ddb);
3908 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3909 skl_pipe_wm_get_hw_state(crtc);
3912 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3914 struct drm_device *dev = crtc->dev;
3915 struct drm_i915_private *dev_priv = dev->dev_private;
3916 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3918 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3919 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
3920 enum pipe pipe = intel_crtc->pipe;
3921 static const i915_reg_t wm0_pipe_reg[] = {
3922 [PIPE_A] = WM0_PIPEA_ILK,
3923 [PIPE_B] = WM0_PIPEB_ILK,
3924 [PIPE_C] = WM0_PIPEC_IVB,
3927 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3928 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3929 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3931 memset(active, 0, sizeof(*active));
3933 active->pipe_enabled = intel_crtc->active;
3935 if (active->pipe_enabled) {
3936 u32 tmp = hw->wm_pipe[pipe];
3939 * For active pipes LP0 watermark is marked as
3940 * enabled, and LP1+ watermaks as disabled since
3941 * we can't really reverse compute them in case
3942 * multiple pipes are active.
3944 active->wm[0].enable = true;
3945 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3946 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3947 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3948 active->linetime = hw->wm_linetime[pipe];
3950 int level, max_level = ilk_wm_max_level(dev);
3953 * For inactive pipes, all watermark levels
3954 * should be marked as enabled but zeroed,
3955 * which is what we'd compute them to.
3957 for (level = 0; level <= max_level; level++)
3958 active->wm[level].enable = true;
3961 intel_crtc->wm.active.ilk = *active;
3964 #define _FW_WM(value, plane) \
3965 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3966 #define _FW_WM_VLV(value, plane) \
3967 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3969 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3970 struct vlv_wm_values *wm)
3975 for_each_pipe(dev_priv, pipe) {
3976 tmp = I915_READ(VLV_DDL(pipe));
3978 wm->ddl[pipe].primary =
3979 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3980 wm->ddl[pipe].cursor =
3981 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3982 wm->ddl[pipe].sprite[0] =
3983 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3984 wm->ddl[pipe].sprite[1] =
3985 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3988 tmp = I915_READ(DSPFW1);
3989 wm->sr.plane = _FW_WM(tmp, SR);
3990 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3991 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3992 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3994 tmp = I915_READ(DSPFW2);
3995 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
3996 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
3997 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
3999 tmp = I915_READ(DSPFW3);
4000 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4002 if (IS_CHERRYVIEW(dev_priv)) {
4003 tmp = I915_READ(DSPFW7_CHV);
4004 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4005 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4007 tmp = I915_READ(DSPFW8_CHV);
4008 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4009 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4011 tmp = I915_READ(DSPFW9_CHV);
4012 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4013 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4015 tmp = I915_READ(DSPHOWM);
4016 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4017 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4018 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4019 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4020 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4021 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4022 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4023 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4024 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4025 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4027 tmp = I915_READ(DSPFW7);
4028 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4029 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4031 tmp = I915_READ(DSPHOWM);
4032 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4033 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4034 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4035 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4036 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4037 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4038 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4045 void vlv_wm_get_hw_state(struct drm_device *dev)
4047 struct drm_i915_private *dev_priv = to_i915(dev);
4048 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4049 struct intel_plane *plane;
4053 vlv_read_wm_values(dev_priv, wm);
4055 for_each_intel_plane(dev, plane) {
4056 switch (plane->base.type) {
4058 case DRM_PLANE_TYPE_CURSOR:
4059 plane->wm.fifo_size = 63;
4061 case DRM_PLANE_TYPE_PRIMARY:
4062 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4064 case DRM_PLANE_TYPE_OVERLAY:
4065 sprite = plane->plane;
4066 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4071 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4072 wm->level = VLV_WM_LEVEL_PM2;
4074 if (IS_CHERRYVIEW(dev_priv)) {
4075 mutex_lock(&dev_priv->rps.hw_lock);
4077 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4078 if (val & DSP_MAXFIFO_PM5_ENABLE)
4079 wm->level = VLV_WM_LEVEL_PM5;
4082 * If DDR DVFS is disabled in the BIOS, Punit
4083 * will never ack the request. So if that happens
4084 * assume we don't have to enable/disable DDR DVFS
4085 * dynamically. To test that just set the REQ_ACK
4086 * bit to poke the Punit, but don't change the
4087 * HIGH/LOW bits so that we don't actually change
4088 * the current state.
4090 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4091 val |= FORCE_DDR_FREQ_REQ_ACK;
4092 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4094 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4095 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4096 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4097 "assuming DDR DVFS is disabled\n");
4098 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4100 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4101 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4102 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4105 mutex_unlock(&dev_priv->rps.hw_lock);
4108 for_each_pipe(dev_priv, pipe)
4109 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4110 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4111 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4113 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4114 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4117 void ilk_wm_get_hw_state(struct drm_device *dev)
4119 struct drm_i915_private *dev_priv = dev->dev_private;
4120 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4121 struct drm_crtc *crtc;
4123 for_each_crtc(dev, crtc)
4124 ilk_pipe_wm_get_hw_state(crtc);
4126 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4127 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4128 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4130 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4131 if (INTEL_INFO(dev)->gen >= 7) {
4132 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4133 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4136 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4137 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4138 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4139 else if (IS_IVYBRIDGE(dev))
4140 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4141 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4144 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4148 * intel_update_watermarks - update FIFO watermark values based on current modes
4150 * Calculate watermark values for the various WM regs based on current mode
4151 * and plane configuration.
4153 * There are several cases to deal with here:
4154 * - normal (i.e. non-self-refresh)
4155 * - self-refresh (SR) mode
4156 * - lines are large relative to FIFO size (buffer can hold up to 2)
4157 * - lines are small relative to FIFO size (buffer can hold more than 2
4158 * lines), so need to account for TLB latency
4160 * The normal calculation is:
4161 * watermark = dotclock * bytes per pixel * latency
4162 * where latency is platform & configuration dependent (we assume pessimal
4165 * The SR calculation is:
4166 * watermark = (trunc(latency/line time)+1) * surface width *
4169 * line time = htotal / dotclock
4170 * surface width = hdisplay for normal plane and 64 for cursor
4171 * and latency is assumed to be high, as above.
4173 * The final value programmed to the register should always be rounded up,
4174 * and include an extra 2 entries to account for clock crossings.
4176 * We don't use the sprite, so we can ignore that. And on Crestline we have
4177 * to set the non-SR watermarks to 8.
4179 void intel_update_watermarks(struct drm_crtc *crtc)
4181 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4183 if (dev_priv->display.update_wm)
4184 dev_priv->display.update_wm(crtc);
4188 * Lock protecting IPS related data structures
4190 DEFINE_SPINLOCK(mchdev_lock);
4192 /* Global for IPS driver to get at the current i915 device. Protected by
4194 static struct drm_i915_private *i915_mch_dev;
4196 bool ironlake_set_drps(struct drm_device *dev, u8 val)
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4201 assert_spin_locked(&mchdev_lock);
4203 rgvswctl = I915_READ16(MEMSWCTL);
4204 if (rgvswctl & MEMCTL_CMD_STS) {
4205 DRM_DEBUG("gpu busy, RCS change rejected\n");
4206 return false; /* still busy with another command */
4209 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4210 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4211 I915_WRITE16(MEMSWCTL, rgvswctl);
4212 POSTING_READ16(MEMSWCTL);
4214 rgvswctl |= MEMCTL_CMD_STS;
4215 I915_WRITE16(MEMSWCTL, rgvswctl);
4220 static void ironlake_enable_drps(struct drm_device *dev)
4222 struct drm_i915_private *dev_priv = dev->dev_private;
4224 u8 fmax, fmin, fstart, vstart;
4226 spin_lock_irq(&mchdev_lock);
4228 rgvmodectl = I915_READ(MEMMODECTL);
4230 /* Enable temp reporting */
4231 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4232 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4234 /* 100ms RC evaluation intervals */
4235 I915_WRITE(RCUPEI, 100000);
4236 I915_WRITE(RCDNEI, 100000);
4238 /* Set max/min thresholds to 90ms and 80ms respectively */
4239 I915_WRITE(RCBMAXAVG, 90000);
4240 I915_WRITE(RCBMINAVG, 80000);
4242 I915_WRITE(MEMIHYST, 1);
4244 /* Set up min, max, and cur for interrupt handling */
4245 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4246 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4247 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4248 MEMMODE_FSTART_SHIFT;
4250 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4253 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4254 dev_priv->ips.fstart = fstart;
4256 dev_priv->ips.max_delay = fstart;
4257 dev_priv->ips.min_delay = fmin;
4258 dev_priv->ips.cur_delay = fstart;
4260 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4261 fmax, fmin, fstart);
4263 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4266 * Interrupts will be enabled in ironlake_irq_postinstall
4269 I915_WRITE(VIDSTART, vstart);
4270 POSTING_READ(VIDSTART);
4272 rgvmodectl |= MEMMODE_SWMODE_EN;
4273 I915_WRITE(MEMMODECTL, rgvmodectl);
4275 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4276 DRM_ERROR("stuck trying to change perf mode\n");
4279 ironlake_set_drps(dev, fstart);
4281 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4282 I915_READ(DDREC) + I915_READ(CSIEC);
4283 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4284 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4285 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4287 spin_unlock_irq(&mchdev_lock);
4290 static void ironlake_disable_drps(struct drm_device *dev)
4292 struct drm_i915_private *dev_priv = dev->dev_private;
4295 spin_lock_irq(&mchdev_lock);
4297 rgvswctl = I915_READ16(MEMSWCTL);
4299 /* Ack interrupts, disable EFC interrupt */
4300 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4301 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4302 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4303 I915_WRITE(DEIIR, DE_PCU_EVENT);
4304 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4306 /* Go back to the starting frequency */
4307 ironlake_set_drps(dev, dev_priv->ips.fstart);
4309 rgvswctl |= MEMCTL_CMD_STS;
4310 I915_WRITE(MEMSWCTL, rgvswctl);
4313 spin_unlock_irq(&mchdev_lock);
4316 /* There's a funny hw issue where the hw returns all 0 when reading from
4317 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4318 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4319 * all limits and the gpu stuck at whatever frequency it is at atm).
4321 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4325 /* Only set the down limit when we've reached the lowest level to avoid
4326 * getting more interrupts, otherwise leave this clear. This prevents a
4327 * race in the hw when coming out of rc6: There's a tiny window where
4328 * the hw runs at the minimal clock before selecting the desired
4329 * frequency, if the down threshold expires in that window we will not
4330 * receive a down interrupt. */
4331 if (IS_GEN9(dev_priv)) {
4332 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4333 if (val <= dev_priv->rps.min_freq_softlimit)
4334 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4336 limits = dev_priv->rps.max_freq_softlimit << 24;
4337 if (val <= dev_priv->rps.min_freq_softlimit)
4338 limits |= dev_priv->rps.min_freq_softlimit << 16;
4344 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4347 u32 threshold_up = 0, threshold_down = 0; /* in % */
4348 u32 ei_up = 0, ei_down = 0;
4350 new_power = dev_priv->rps.power;
4351 switch (dev_priv->rps.power) {
4353 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
4354 new_power = BETWEEN;
4358 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
4359 new_power = LOW_POWER;
4360 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
4361 new_power = HIGH_POWER;
4365 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
4366 new_power = BETWEEN;
4369 /* Max/min bins are special */
4370 if (val <= dev_priv->rps.min_freq_softlimit)
4371 new_power = LOW_POWER;
4372 if (val >= dev_priv->rps.max_freq_softlimit)
4373 new_power = HIGH_POWER;
4374 if (new_power == dev_priv->rps.power)
4377 /* Note the units here are not exactly 1us, but 1280ns. */
4378 switch (new_power) {
4380 /* Upclock if more than 95% busy over 16ms */
4384 /* Downclock if less than 85% busy over 32ms */
4386 threshold_down = 85;
4390 /* Upclock if more than 90% busy over 13ms */
4394 /* Downclock if less than 75% busy over 32ms */
4396 threshold_down = 75;
4400 /* Upclock if more than 85% busy over 10ms */
4404 /* Downclock if less than 60% busy over 32ms */
4406 threshold_down = 60;
4410 I915_WRITE(GEN6_RP_UP_EI,
4411 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4412 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4413 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4415 I915_WRITE(GEN6_RP_DOWN_EI,
4416 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4417 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4418 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4420 I915_WRITE(GEN6_RP_CONTROL,
4421 GEN6_RP_MEDIA_TURBO |
4422 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4423 GEN6_RP_MEDIA_IS_GFX |
4425 GEN6_RP_UP_BUSY_AVG |
4426 GEN6_RP_DOWN_IDLE_AVG);
4428 dev_priv->rps.power = new_power;
4429 dev_priv->rps.up_threshold = threshold_up;
4430 dev_priv->rps.down_threshold = threshold_down;
4431 dev_priv->rps.last_adj = 0;
4434 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4438 if (val > dev_priv->rps.min_freq_softlimit)
4439 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4440 if (val < dev_priv->rps.max_freq_softlimit)
4441 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4443 mask &= dev_priv->pm_rps_events;
4445 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4448 /* gen6_set_rps is called to update the frequency request, but should also be
4449 * called when the range (min_delay and max_delay) is modified so that we can
4450 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4451 static void gen6_set_rps(struct drm_device *dev, u8 val)
4453 struct drm_i915_private *dev_priv = dev->dev_private;
4455 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4456 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
4459 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4460 WARN_ON(val > dev_priv->rps.max_freq);
4461 WARN_ON(val < dev_priv->rps.min_freq);
4463 /* min/max delay may still have been modified so be sure to
4464 * write the limits value.
4466 if (val != dev_priv->rps.cur_freq) {
4467 gen6_set_rps_thresholds(dev_priv, val);
4470 I915_WRITE(GEN6_RPNSWREQ,
4471 GEN9_FREQUENCY(val));
4472 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4473 I915_WRITE(GEN6_RPNSWREQ,
4474 HSW_FREQUENCY(val));
4476 I915_WRITE(GEN6_RPNSWREQ,
4477 GEN6_FREQUENCY(val) |
4479 GEN6_AGGRESSIVE_TURBO);
4482 /* Make sure we continue to get interrupts
4483 * until we hit the minimum or maximum frequencies.
4485 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4486 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4488 POSTING_READ(GEN6_RPNSWREQ);
4490 dev_priv->rps.cur_freq = val;
4491 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4494 static void valleyview_set_rps(struct drm_device *dev, u8 val)
4496 struct drm_i915_private *dev_priv = dev->dev_private;
4498 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4499 WARN_ON(val > dev_priv->rps.max_freq);
4500 WARN_ON(val < dev_priv->rps.min_freq);
4502 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4503 "Odd GPU freq value\n"))
4506 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4508 if (val != dev_priv->rps.cur_freq) {
4509 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4510 if (!IS_CHERRYVIEW(dev_priv))
4511 gen6_set_rps_thresholds(dev_priv, val);
4514 dev_priv->rps.cur_freq = val;
4515 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4518 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4520 * * If Gfx is Idle, then
4521 * 1. Forcewake Media well.
4522 * 2. Request idle freq.
4523 * 3. Release Forcewake of Media well.
4525 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4527 u32 val = dev_priv->rps.idle_freq;
4529 if (dev_priv->rps.cur_freq <= val)
4532 /* Wake up the media well, as that takes a lot less
4533 * power than the Render well. */
4534 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4535 valleyview_set_rps(dev_priv->dev, val);
4536 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4539 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4541 mutex_lock(&dev_priv->rps.hw_lock);
4542 if (dev_priv->rps.enabled) {
4543 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4544 gen6_rps_reset_ei(dev_priv);
4545 I915_WRITE(GEN6_PMINTRMSK,
4546 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4548 mutex_unlock(&dev_priv->rps.hw_lock);
4551 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4553 struct drm_device *dev = dev_priv->dev;
4555 mutex_lock(&dev_priv->rps.hw_lock);
4556 if (dev_priv->rps.enabled) {
4557 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4558 vlv_set_rps_idle(dev_priv);
4560 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4561 dev_priv->rps.last_adj = 0;
4562 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4564 mutex_unlock(&dev_priv->rps.hw_lock);
4566 spin_lock(&dev_priv->rps.client_lock);
4567 while (!list_empty(&dev_priv->rps.clients))
4568 list_del_init(dev_priv->rps.clients.next);
4569 spin_unlock(&dev_priv->rps.client_lock);
4572 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4573 struct intel_rps_client *rps,
4574 unsigned long submitted)
4576 /* This is intentionally racy! We peek at the state here, then
4577 * validate inside the RPS worker.
4579 if (!(dev_priv->mm.busy &&
4580 dev_priv->rps.enabled &&
4581 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4584 /* Force a RPS boost (and don't count it against the client) if
4585 * the GPU is severely congested.
4587 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4590 spin_lock(&dev_priv->rps.client_lock);
4591 if (rps == NULL || list_empty(&rps->link)) {
4592 spin_lock_irq(&dev_priv->irq_lock);
4593 if (dev_priv->rps.interrupts_enabled) {
4594 dev_priv->rps.client_boost = true;
4595 queue_work(dev_priv->wq, &dev_priv->rps.work);
4597 spin_unlock_irq(&dev_priv->irq_lock);
4600 list_add(&rps->link, &dev_priv->rps.clients);
4603 dev_priv->rps.boosts++;
4605 spin_unlock(&dev_priv->rps.client_lock);
4608 void intel_set_rps(struct drm_device *dev, u8 val)
4610 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4611 valleyview_set_rps(dev, val);
4613 gen6_set_rps(dev, val);
4616 static void gen9_disable_rc6(struct drm_device *dev)
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4620 I915_WRITE(GEN6_RC_CONTROL, 0);
4621 I915_WRITE(GEN9_PG_ENABLE, 0);
4624 static void gen9_disable_rps(struct drm_device *dev)
4626 struct drm_i915_private *dev_priv = dev->dev_private;
4628 I915_WRITE(GEN6_RP_CONTROL, 0);
4631 static void gen6_disable_rps(struct drm_device *dev)
4633 struct drm_i915_private *dev_priv = dev->dev_private;
4635 I915_WRITE(GEN6_RC_CONTROL, 0);
4636 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4637 I915_WRITE(GEN6_RP_CONTROL, 0);
4640 static void cherryview_disable_rps(struct drm_device *dev)
4642 struct drm_i915_private *dev_priv = dev->dev_private;
4644 I915_WRITE(GEN6_RC_CONTROL, 0);
4647 static void valleyview_disable_rps(struct drm_device *dev)
4649 struct drm_i915_private *dev_priv = dev->dev_private;
4651 /* we're doing forcewake before Disabling RC6,
4652 * This what the BIOS expects when going into suspend */
4653 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4655 I915_WRITE(GEN6_RC_CONTROL, 0);
4657 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4660 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4662 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4663 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4664 mode = GEN6_RC_CTL_RC6_ENABLE;
4669 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4670 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4671 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4672 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4675 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4676 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4679 static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4681 struct drm_i915_private *dev_priv = to_i915(dev);
4682 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4683 bool enable_rc6 = true;
4684 unsigned long rc6_ctx_base;
4686 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4687 DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
4692 * The exact context size is not known for BXT, so assume a page size
4695 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
4696 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
4697 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
4698 ggtt->stolen_reserved_size))) {
4699 DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
4703 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
4704 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4705 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4706 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4707 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
4711 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
4712 GEN6_RC_CTL_HW_ENABLE)) &&
4713 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
4714 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
4715 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
4722 int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4724 /* No RC6 before Ironlake and code is gone for ilk. */
4725 if (INTEL_INFO(dev)->gen < 6)
4731 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
4732 DRM_INFO("RC6 disabled by BIOS\n");
4736 /* Respect the kernel parameter if it is set */
4737 if (enable_rc6 >= 0) {
4741 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4744 mask = INTEL_RC6_ENABLE;
4746 if ((enable_rc6 & mask) != enable_rc6)
4747 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4748 enable_rc6 & mask, enable_rc6, mask);
4750 return enable_rc6 & mask;
4753 if (IS_IVYBRIDGE(dev))
4754 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4756 return INTEL_RC6_ENABLE;
4759 int intel_enable_rc6(const struct drm_device *dev)
4761 return i915.enable_rc6;
4764 static void gen6_init_rps_frequencies(struct drm_device *dev)
4766 struct drm_i915_private *dev_priv = dev->dev_private;
4767 uint32_t rp_state_cap;
4768 u32 ddcc_status = 0;
4771 /* All of these values are in units of 50MHz */
4772 dev_priv->rps.cur_freq = 0;
4773 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4774 if (IS_BROXTON(dev)) {
4775 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4776 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4777 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4778 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4780 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4781 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4782 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4783 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4786 /* hw_max = RP0 until we check for overclocking */
4787 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4789 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4790 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
4791 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4792 ret = sandybridge_pcode_read(dev_priv,
4793 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4796 dev_priv->rps.efficient_freq =
4798 ((ddcc_status >> 8) & 0xff),
4799 dev_priv->rps.min_freq,
4800 dev_priv->rps.max_freq);
4803 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4804 /* Store the frequency values in 16.66 MHZ units, which is
4805 the natural hardware unit for SKL */
4806 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4807 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4808 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4809 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4810 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4813 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4815 /* Preserve min/max settings in case of re-init */
4816 if (dev_priv->rps.max_freq_softlimit == 0)
4817 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4819 if (dev_priv->rps.min_freq_softlimit == 0) {
4820 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4821 dev_priv->rps.min_freq_softlimit =
4822 max_t(int, dev_priv->rps.efficient_freq,
4823 intel_freq_opcode(dev_priv, 450));
4825 dev_priv->rps.min_freq_softlimit =
4826 dev_priv->rps.min_freq;
4830 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4831 static void gen9_enable_rps(struct drm_device *dev)
4833 struct drm_i915_private *dev_priv = dev->dev_private;
4835 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4837 gen6_init_rps_frequencies(dev);
4839 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4840 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4842 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4843 * clear out the Control register just to avoid inconsitency
4844 * with debugfs interface, which will show Turbo as enabled
4845 * only and that is not expected by the User after adding the
4846 * WaGsvDisableTurbo. Apart from this there is no problem even
4847 * if the Turbo is left enabled in the Control register, as the
4848 * Up/Down interrupts would remain masked.
4850 gen9_disable_rps(dev);
4851 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4855 /* Program defaults and thresholds for RPS*/
4856 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4857 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4859 /* 1 second timeout*/
4860 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4861 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4863 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4865 /* Leaning on the below call to gen6_set_rps to program/setup the
4866 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4867 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4868 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4869 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4871 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4874 static void gen9_enable_rc6(struct drm_device *dev)
4876 struct drm_i915_private *dev_priv = dev->dev_private;
4877 struct intel_engine_cs *engine;
4878 uint32_t rc6_mask = 0;
4880 /* 1a: Software RC state - RC0 */
4881 I915_WRITE(GEN6_RC_STATE, 0);
4883 /* 1b: Get forcewake during program sequence. Although the driver
4884 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4885 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4887 /* 2a: Disable RC states. */
4888 I915_WRITE(GEN6_RC_CONTROL, 0);
4890 /* 2b: Program RC6 thresholds.*/
4892 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4893 if (IS_SKYLAKE(dev))
4894 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4896 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4897 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4898 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4899 for_each_engine(engine, dev_priv)
4900 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4902 if (HAS_GUC_UCODE(dev))
4903 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4905 I915_WRITE(GEN6_RC_SLEEP, 0);
4907 /* 2c: Program Coarse Power Gating Policies. */
4908 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4909 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4911 /* 3a: Enable RC6 */
4912 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4913 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4914 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4915 /* WaRsUseTimeoutMode */
4916 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
4917 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4918 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4919 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4920 GEN7_RC_CTL_TO_MODE |
4923 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4924 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4925 GEN6_RC_CTL_EI_MODE(1) |
4930 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4931 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4933 if (NEEDS_WaRsDisableCoarsePowerGating(dev))
4934 I915_WRITE(GEN9_PG_ENABLE, 0);
4936 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4937 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4939 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4943 static void gen8_enable_rps(struct drm_device *dev)
4945 struct drm_i915_private *dev_priv = dev->dev_private;
4946 struct intel_engine_cs *engine;
4947 uint32_t rc6_mask = 0;
4949 /* 1a: Software RC state - RC0 */
4950 I915_WRITE(GEN6_RC_STATE, 0);
4952 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4953 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4954 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4956 /* 2a: Disable RC states. */
4957 I915_WRITE(GEN6_RC_CONTROL, 0);
4959 /* Initialize rps frequencies */
4960 gen6_init_rps_frequencies(dev);
4962 /* 2b: Program RC6 thresholds.*/
4963 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4964 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4965 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4966 for_each_engine(engine, dev_priv)
4967 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4968 I915_WRITE(GEN6_RC_SLEEP, 0);
4969 if (IS_BROADWELL(dev))
4970 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4972 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4975 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4976 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4977 intel_print_rc6_info(dev, rc6_mask);
4978 if (IS_BROADWELL(dev))
4979 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4980 GEN7_RC_CTL_TO_MODE |
4983 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4984 GEN6_RC_CTL_EI_MODE(1) |
4987 /* 4 Program defaults and thresholds for RPS*/
4988 I915_WRITE(GEN6_RPNSWREQ,
4989 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4990 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4991 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4992 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4993 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4995 /* Docs recommend 900MHz, and 300 MHz respectively */
4996 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4997 dev_priv->rps.max_freq_softlimit << 24 |
4998 dev_priv->rps.min_freq_softlimit << 16);
5000 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5001 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5002 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5003 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5005 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5008 I915_WRITE(GEN6_RP_CONTROL,
5009 GEN6_RP_MEDIA_TURBO |
5010 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5011 GEN6_RP_MEDIA_IS_GFX |
5013 GEN6_RP_UP_BUSY_AVG |
5014 GEN6_RP_DOWN_IDLE_AVG);
5016 /* 6: Ring frequency + overclocking (our driver does this later */
5018 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5019 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
5021 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5024 static void gen6_enable_rps(struct drm_device *dev)
5026 struct drm_i915_private *dev_priv = dev->dev_private;
5027 struct intel_engine_cs *engine;
5028 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5033 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5035 /* Here begins a magic sequence of register writes to enable
5036 * auto-downclocking.
5038 * Perhaps there might be some value in exposing these to
5041 I915_WRITE(GEN6_RC_STATE, 0);
5043 /* Clear the DBG now so we don't confuse earlier errors */
5044 gtfifodbg = I915_READ(GTFIFODBG);
5046 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5047 I915_WRITE(GTFIFODBG, gtfifodbg);
5050 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5052 /* Initialize rps frequencies */
5053 gen6_init_rps_frequencies(dev);
5055 /* disable the counters and set deterministic thresholds */
5056 I915_WRITE(GEN6_RC_CONTROL, 0);
5058 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5059 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5060 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5061 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5062 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5064 for_each_engine(engine, dev_priv)
5065 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5067 I915_WRITE(GEN6_RC_SLEEP, 0);
5068 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5069 if (IS_IVYBRIDGE(dev))
5070 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5072 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5073 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5074 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5076 /* Check if we are enabling RC6 */
5077 rc6_mode = intel_enable_rc6(dev_priv->dev);
5078 if (rc6_mode & INTEL_RC6_ENABLE)
5079 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5081 /* We don't use those on Haswell */
5082 if (!IS_HASWELL(dev)) {
5083 if (rc6_mode & INTEL_RC6p_ENABLE)
5084 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5086 if (rc6_mode & INTEL_RC6pp_ENABLE)
5087 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5090 intel_print_rc6_info(dev, rc6_mask);
5092 I915_WRITE(GEN6_RC_CONTROL,
5094 GEN6_RC_CTL_EI_MODE(1) |
5095 GEN6_RC_CTL_HW_ENABLE);
5097 /* Power down if completely idle for over 50ms */
5098 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5099 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5101 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
5103 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5105 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
5106 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
5107 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
5108 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
5109 (pcu_mbox & 0xff) * 50);
5110 dev_priv->rps.max_freq = pcu_mbox & 0xff;
5113 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5114 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
5117 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5118 if (IS_GEN6(dev) && ret) {
5119 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5120 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5121 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5122 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5123 rc6vids &= 0xffff00;
5124 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5125 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5127 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5130 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5133 static void __gen6_update_ring_freq(struct drm_device *dev)
5135 struct drm_i915_private *dev_priv = dev->dev_private;
5137 unsigned int gpu_freq;
5138 unsigned int max_ia_freq, min_ring_freq;
5139 unsigned int max_gpu_freq, min_gpu_freq;
5140 int scaling_factor = 180;
5141 struct cpufreq_policy *policy;
5143 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5145 policy = cpufreq_cpu_get(0);
5147 max_ia_freq = policy->cpuinfo.max_freq;
5148 cpufreq_cpu_put(policy);
5151 * Default to measured freq if none found, PCU will ensure we
5154 max_ia_freq = tsc_khz;
5157 /* Convert from kHz to MHz */
5158 max_ia_freq /= 1000;
5160 min_ring_freq = I915_READ(DCLK) & 0xf;
5161 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5162 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5164 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5165 /* Convert GT frequency to 50 HZ units */
5166 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5167 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5169 min_gpu_freq = dev_priv->rps.min_freq;
5170 max_gpu_freq = dev_priv->rps.max_freq;
5174 * For each potential GPU frequency, load a ring frequency we'd like
5175 * to use for memory access. We do this by specifying the IA frequency
5176 * the PCU should use as a reference to determine the ring frequency.
5178 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5179 int diff = max_gpu_freq - gpu_freq;
5180 unsigned int ia_freq = 0, ring_freq = 0;
5182 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5184 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5185 * No floor required for ring frequency on SKL.
5187 ring_freq = gpu_freq;
5188 } else if (INTEL_INFO(dev)->gen >= 8) {
5189 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5190 ring_freq = max(min_ring_freq, gpu_freq);
5191 } else if (IS_HASWELL(dev)) {
5192 ring_freq = mult_frac(gpu_freq, 5, 4);
5193 ring_freq = max(min_ring_freq, ring_freq);
5194 /* leave ia_freq as the default, chosen by cpufreq */
5196 /* On older processors, there is no separate ring
5197 * clock domain, so in order to boost the bandwidth
5198 * of the ring, we need to upclock the CPU (ia_freq).
5200 * For GPU frequencies less than 750MHz,
5201 * just use the lowest ring freq.
5203 if (gpu_freq < min_freq)
5206 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5207 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5210 sandybridge_pcode_write(dev_priv,
5211 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5212 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5213 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5218 void gen6_update_ring_freq(struct drm_device *dev)
5220 struct drm_i915_private *dev_priv = dev->dev_private;
5222 if (!HAS_CORE_RING_FREQ(dev))
5225 mutex_lock(&dev_priv->rps.hw_lock);
5226 __gen6_update_ring_freq(dev);
5227 mutex_unlock(&dev_priv->rps.hw_lock);
5230 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5232 struct drm_device *dev = dev_priv->dev;
5235 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5237 switch (INTEL_INFO(dev)->eu_total) {
5239 /* (2 * 4) config */
5240 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5243 /* (2 * 6) config */
5244 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5247 /* (2 * 8) config */
5249 /* Setting (2 * 8) Min RP0 for any other combination */
5250 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5254 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5259 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5263 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5264 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5269 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5273 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5274 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5279 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5283 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5285 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5290 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5294 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5296 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5298 rp0 = min_t(u32, rp0, 0xea);
5303 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5307 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5308 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5309 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5310 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5315 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5319 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5321 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5322 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5323 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5324 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5325 * to make sure it matches what Punit accepts.
5327 return max_t(u32, val, 0xc0);
5330 /* Check that the pctx buffer wasn't move under us. */
5331 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5333 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5335 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5336 dev_priv->vlv_pctx->stolen->start);
5340 /* Check that the pcbr address is not empty. */
5341 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5343 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5345 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5348 static void cherryview_setup_pctx(struct drm_device *dev)
5350 struct drm_i915_private *dev_priv = to_i915(dev);
5351 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5352 unsigned long pctx_paddr, paddr;
5354 int pctx_size = 32*1024;
5356 pcbr = I915_READ(VLV_PCBR);
5357 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5358 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5359 paddr = (dev_priv->mm.stolen_base +
5360 (ggtt->stolen_size - pctx_size));
5362 pctx_paddr = (paddr & (~4095));
5363 I915_WRITE(VLV_PCBR, pctx_paddr);
5366 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5369 static void valleyview_setup_pctx(struct drm_device *dev)
5371 struct drm_i915_private *dev_priv = dev->dev_private;
5372 struct drm_i915_gem_object *pctx;
5373 unsigned long pctx_paddr;
5375 int pctx_size = 24*1024;
5377 mutex_lock(&dev->struct_mutex);
5379 pcbr = I915_READ(VLV_PCBR);
5381 /* BIOS set it up already, grab the pre-alloc'd space */
5384 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5385 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5387 I915_GTT_OFFSET_NONE,
5392 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5395 * From the Gunit register HAS:
5396 * The Gfx driver is expected to program this register and ensure
5397 * proper allocation within Gfx stolen memory. For example, this
5398 * register should be programmed such than the PCBR range does not
5399 * overlap with other ranges, such as the frame buffer, protected
5400 * memory, or any other relevant ranges.
5402 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5404 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5408 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5409 I915_WRITE(VLV_PCBR, pctx_paddr);
5412 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5413 dev_priv->vlv_pctx = pctx;
5414 mutex_unlock(&dev->struct_mutex);
5417 static void valleyview_cleanup_pctx(struct drm_device *dev)
5419 struct drm_i915_private *dev_priv = dev->dev_private;
5421 if (WARN_ON(!dev_priv->vlv_pctx))
5424 drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
5425 dev_priv->vlv_pctx = NULL;
5428 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5430 dev_priv->rps.gpll_ref_freq =
5431 vlv_get_cck_clock(dev_priv, "GPLL ref",
5432 CCK_GPLL_CLOCK_CONTROL,
5433 dev_priv->czclk_freq);
5435 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5436 dev_priv->rps.gpll_ref_freq);
5439 static void valleyview_init_gt_powersave(struct drm_device *dev)
5441 struct drm_i915_private *dev_priv = dev->dev_private;
5444 valleyview_setup_pctx(dev);
5446 vlv_init_gpll_ref_freq(dev_priv);
5448 mutex_lock(&dev_priv->rps.hw_lock);
5450 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5451 switch ((val >> 6) & 3) {
5454 dev_priv->mem_freq = 800;
5457 dev_priv->mem_freq = 1066;
5460 dev_priv->mem_freq = 1333;
5463 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5465 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5466 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5467 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5468 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5469 dev_priv->rps.max_freq);
5471 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5472 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5473 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5474 dev_priv->rps.efficient_freq);
5476 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5477 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5478 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5479 dev_priv->rps.rp1_freq);
5481 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5482 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5483 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5484 dev_priv->rps.min_freq);
5486 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5488 /* Preserve min/max settings in case of re-init */
5489 if (dev_priv->rps.max_freq_softlimit == 0)
5490 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5492 if (dev_priv->rps.min_freq_softlimit == 0)
5493 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5495 mutex_unlock(&dev_priv->rps.hw_lock);
5498 static void cherryview_init_gt_powersave(struct drm_device *dev)
5500 struct drm_i915_private *dev_priv = dev->dev_private;
5503 cherryview_setup_pctx(dev);
5505 vlv_init_gpll_ref_freq(dev_priv);
5507 mutex_lock(&dev_priv->rps.hw_lock);
5509 mutex_lock(&dev_priv->sb_lock);
5510 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5511 mutex_unlock(&dev_priv->sb_lock);
5513 switch ((val >> 2) & 0x7) {
5515 dev_priv->mem_freq = 2000;
5518 dev_priv->mem_freq = 1600;
5521 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5523 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5524 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5525 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5526 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5527 dev_priv->rps.max_freq);
5529 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5530 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5531 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5532 dev_priv->rps.efficient_freq);
5534 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5535 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5536 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5537 dev_priv->rps.rp1_freq);
5539 /* PUnit validated range is only [RPe, RP0] */
5540 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5541 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5542 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5543 dev_priv->rps.min_freq);
5545 WARN_ONCE((dev_priv->rps.max_freq |
5546 dev_priv->rps.efficient_freq |
5547 dev_priv->rps.rp1_freq |
5548 dev_priv->rps.min_freq) & 1,
5549 "Odd GPU freq values\n");
5551 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5553 /* Preserve min/max settings in case of re-init */
5554 if (dev_priv->rps.max_freq_softlimit == 0)
5555 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5557 if (dev_priv->rps.min_freq_softlimit == 0)
5558 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5560 mutex_unlock(&dev_priv->rps.hw_lock);
5563 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5565 valleyview_cleanup_pctx(dev);
5568 static void cherryview_enable_rps(struct drm_device *dev)
5570 struct drm_i915_private *dev_priv = dev->dev_private;
5571 struct intel_engine_cs *engine;
5572 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5574 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5576 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5577 GT_FIFO_FREE_ENTRIES_CHV);
5579 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5581 I915_WRITE(GTFIFODBG, gtfifodbg);
5584 cherryview_check_pctx(dev_priv);
5586 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5587 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5588 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5590 /* Disable RC states. */
5591 I915_WRITE(GEN6_RC_CONTROL, 0);
5593 /* 2a: Program RC6 thresholds.*/
5594 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5595 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5596 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5598 for_each_engine(engine, dev_priv)
5599 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5600 I915_WRITE(GEN6_RC_SLEEP, 0);
5602 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5603 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5605 /* allows RC6 residency counter to work */
5606 I915_WRITE(VLV_COUNTER_CONTROL,
5607 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5608 VLV_MEDIA_RC6_COUNT_EN |
5609 VLV_RENDER_RC6_COUNT_EN));
5611 /* For now we assume BIOS is allocating and populating the PCBR */
5612 pcbr = I915_READ(VLV_PCBR);
5615 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5616 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5617 rc6_mode = GEN7_RC_CTL_TO_MODE;
5619 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5621 /* 4 Program defaults and thresholds for RPS*/
5622 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5623 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5624 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5625 I915_WRITE(GEN6_RP_UP_EI, 66000);
5626 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5628 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5631 I915_WRITE(GEN6_RP_CONTROL,
5632 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5633 GEN6_RP_MEDIA_IS_GFX |
5635 GEN6_RP_UP_BUSY_AVG |
5636 GEN6_RP_DOWN_IDLE_AVG);
5638 /* Setting Fixed Bias */
5639 val = VLV_OVERRIDE_EN |
5641 CHV_BIAS_CPU_50_SOC_50;
5642 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5644 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5646 /* RPS code assumes GPLL is used */
5647 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5649 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5650 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5652 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5653 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5654 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5655 dev_priv->rps.cur_freq);
5657 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5658 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5659 dev_priv->rps.idle_freq);
5661 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
5663 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5666 static void valleyview_enable_rps(struct drm_device *dev)
5668 struct drm_i915_private *dev_priv = dev->dev_private;
5669 struct intel_engine_cs *engine;
5670 u32 gtfifodbg, val, rc6_mode = 0;
5672 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5674 valleyview_check_pctx(dev_priv);
5676 gtfifodbg = I915_READ(GTFIFODBG);
5678 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5680 I915_WRITE(GTFIFODBG, gtfifodbg);
5683 /* If VLV, Forcewake all wells, else re-direct to regular path */
5684 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5686 /* Disable RC states. */
5687 I915_WRITE(GEN6_RC_CONTROL, 0);
5689 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5690 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5691 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5692 I915_WRITE(GEN6_RP_UP_EI, 66000);
5693 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5695 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5697 I915_WRITE(GEN6_RP_CONTROL,
5698 GEN6_RP_MEDIA_TURBO |
5699 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5700 GEN6_RP_MEDIA_IS_GFX |
5702 GEN6_RP_UP_BUSY_AVG |
5703 GEN6_RP_DOWN_IDLE_CONT);
5705 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5706 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5707 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5709 for_each_engine(engine, dev_priv)
5710 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5712 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5714 /* allows RC6 residency counter to work */
5715 I915_WRITE(VLV_COUNTER_CONTROL,
5716 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5717 VLV_RENDER_RC0_COUNT_EN |
5718 VLV_MEDIA_RC6_COUNT_EN |
5719 VLV_RENDER_RC6_COUNT_EN));
5721 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5722 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5724 intel_print_rc6_info(dev, rc6_mode);
5726 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5728 /* Setting Fixed Bias */
5729 val = VLV_OVERRIDE_EN |
5731 VLV_BIAS_CPU_125_SOC_875;
5732 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5734 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5736 /* RPS code assumes GPLL is used */
5737 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5739 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5740 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5742 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5743 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5744 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5745 dev_priv->rps.cur_freq);
5747 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5748 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5749 dev_priv->rps.idle_freq);
5751 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
5753 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5756 static unsigned long intel_pxfreq(u32 vidfreq)
5759 int div = (vidfreq & 0x3f0000) >> 16;
5760 int post = (vidfreq & 0x3000) >> 12;
5761 int pre = (vidfreq & 0x7);
5766 freq = ((div * 133333) / ((1<<post) * pre));
5771 static const struct cparams {
5777 { 1, 1333, 301, 28664 },
5778 { 1, 1066, 294, 24460 },
5779 { 1, 800, 294, 25192 },
5780 { 0, 1333, 276, 27605 },
5781 { 0, 1066, 276, 27605 },
5782 { 0, 800, 231, 23784 },
5785 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5787 u64 total_count, diff, ret;
5788 u32 count1, count2, count3, m = 0, c = 0;
5789 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5792 assert_spin_locked(&mchdev_lock);
5794 diff1 = now - dev_priv->ips.last_time1;
5796 /* Prevent division-by-zero if we are asking too fast.
5797 * Also, we don't get interesting results if we are polling
5798 * faster than once in 10ms, so just return the saved value
5802 return dev_priv->ips.chipset_power;
5804 count1 = I915_READ(DMIEC);
5805 count2 = I915_READ(DDREC);
5806 count3 = I915_READ(CSIEC);
5808 total_count = count1 + count2 + count3;
5810 /* FIXME: handle per-counter overflow */
5811 if (total_count < dev_priv->ips.last_count1) {
5812 diff = ~0UL - dev_priv->ips.last_count1;
5813 diff += total_count;
5815 diff = total_count - dev_priv->ips.last_count1;
5818 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5819 if (cparams[i].i == dev_priv->ips.c_m &&
5820 cparams[i].t == dev_priv->ips.r_t) {
5827 diff = div_u64(diff, diff1);
5828 ret = ((m * diff) + c);
5829 ret = div_u64(ret, 10);
5831 dev_priv->ips.last_count1 = total_count;
5832 dev_priv->ips.last_time1 = now;
5834 dev_priv->ips.chipset_power = ret;
5839 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5841 struct drm_device *dev = dev_priv->dev;
5844 if (INTEL_INFO(dev)->gen != 5)
5847 spin_lock_irq(&mchdev_lock);
5849 val = __i915_chipset_val(dev_priv);
5851 spin_unlock_irq(&mchdev_lock);
5856 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5858 unsigned long m, x, b;
5861 tsfs = I915_READ(TSFS);
5863 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5864 x = I915_READ8(TR1);
5866 b = tsfs & TSFS_INTR_MASK;
5868 return ((m * x) / 127) - b;
5871 static int _pxvid_to_vd(u8 pxvid)
5876 if (pxvid >= 8 && pxvid < 31)
5879 return (pxvid + 2) * 125;
5882 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5884 struct drm_device *dev = dev_priv->dev;
5885 const int vd = _pxvid_to_vd(pxvid);
5886 const int vm = vd - 1125;
5888 if (INTEL_INFO(dev)->is_mobile)
5889 return vm > 0 ? vm : 0;
5894 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5896 u64 now, diff, diffms;
5899 assert_spin_locked(&mchdev_lock);
5901 now = ktime_get_raw_ns();
5902 diffms = now - dev_priv->ips.last_time2;
5903 do_div(diffms, NSEC_PER_MSEC);
5905 /* Don't divide by 0 */
5909 count = I915_READ(GFXEC);
5911 if (count < dev_priv->ips.last_count2) {
5912 diff = ~0UL - dev_priv->ips.last_count2;
5915 diff = count - dev_priv->ips.last_count2;
5918 dev_priv->ips.last_count2 = count;
5919 dev_priv->ips.last_time2 = now;
5921 /* More magic constants... */
5923 diff = div_u64(diff, diffms * 10);
5924 dev_priv->ips.gfx_power = diff;
5927 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5929 struct drm_device *dev = dev_priv->dev;
5931 if (INTEL_INFO(dev)->gen != 5)
5934 spin_lock_irq(&mchdev_lock);
5936 __i915_update_gfx_val(dev_priv);
5938 spin_unlock_irq(&mchdev_lock);
5941 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5943 unsigned long t, corr, state1, corr2, state2;
5946 assert_spin_locked(&mchdev_lock);
5948 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
5949 pxvid = (pxvid >> 24) & 0x7f;
5950 ext_v = pvid_to_extvid(dev_priv, pxvid);
5954 t = i915_mch_val(dev_priv);
5956 /* Revel in the empirically derived constants */
5958 /* Correction factor in 1/100000 units */
5960 corr = ((t * 2349) + 135940);
5962 corr = ((t * 964) + 29317);
5964 corr = ((t * 301) + 1004);
5966 corr = corr * ((150142 * state1) / 10000 - 78642);
5968 corr2 = (corr * dev_priv->ips.corr);
5970 state2 = (corr2 * state1) / 10000;
5971 state2 /= 100; /* convert to mW */
5973 __i915_update_gfx_val(dev_priv);
5975 return dev_priv->ips.gfx_power + state2;
5978 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5980 struct drm_device *dev = dev_priv->dev;
5983 if (INTEL_INFO(dev)->gen != 5)
5986 spin_lock_irq(&mchdev_lock);
5988 val = __i915_gfx_val(dev_priv);
5990 spin_unlock_irq(&mchdev_lock);
5996 * i915_read_mch_val - return value for IPS use
5998 * Calculate and return a value for the IPS driver to use when deciding whether
5999 * we have thermal and power headroom to increase CPU or GPU power budget.
6001 unsigned long i915_read_mch_val(void)
6003 struct drm_i915_private *dev_priv;
6004 unsigned long chipset_val, graphics_val, ret = 0;
6006 spin_lock_irq(&mchdev_lock);
6009 dev_priv = i915_mch_dev;
6011 chipset_val = __i915_chipset_val(dev_priv);
6012 graphics_val = __i915_gfx_val(dev_priv);
6014 ret = chipset_val + graphics_val;
6017 spin_unlock_irq(&mchdev_lock);
6021 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6024 * i915_gpu_raise - raise GPU frequency limit
6026 * Raise the limit; IPS indicates we have thermal headroom.
6028 bool i915_gpu_raise(void)
6030 struct drm_i915_private *dev_priv;
6033 spin_lock_irq(&mchdev_lock);
6034 if (!i915_mch_dev) {
6038 dev_priv = i915_mch_dev;
6040 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6041 dev_priv->ips.max_delay--;
6044 spin_unlock_irq(&mchdev_lock);
6048 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6051 * i915_gpu_lower - lower GPU frequency limit
6053 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6054 * frequency maximum.
6056 bool i915_gpu_lower(void)
6058 struct drm_i915_private *dev_priv;
6061 spin_lock_irq(&mchdev_lock);
6062 if (!i915_mch_dev) {
6066 dev_priv = i915_mch_dev;
6068 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6069 dev_priv->ips.max_delay++;
6072 spin_unlock_irq(&mchdev_lock);
6076 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6079 * i915_gpu_busy - indicate GPU business to IPS
6081 * Tell the IPS driver whether or not the GPU is busy.
6083 bool i915_gpu_busy(void)
6085 struct drm_i915_private *dev_priv;
6086 struct intel_engine_cs *engine;
6089 spin_lock_irq(&mchdev_lock);
6092 dev_priv = i915_mch_dev;
6094 for_each_engine(engine, dev_priv)
6095 ret |= !list_empty(&engine->request_list);
6098 spin_unlock_irq(&mchdev_lock);
6102 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6105 * i915_gpu_turbo_disable - disable graphics turbo
6107 * Disable graphics turbo by resetting the max frequency and setting the
6108 * current frequency to the default.
6110 bool i915_gpu_turbo_disable(void)
6112 struct drm_i915_private *dev_priv;
6115 spin_lock_irq(&mchdev_lock);
6116 if (!i915_mch_dev) {
6120 dev_priv = i915_mch_dev;
6122 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6124 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
6128 spin_unlock_irq(&mchdev_lock);
6132 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6135 * Tells the intel_ips driver that the i915 driver is now loaded, if
6136 * IPS got loaded first.
6138 * This awkward dance is so that neither module has to depend on the
6139 * other in order for IPS to do the appropriate communication of
6140 * GPU turbo limits to i915.
6143 ips_ping_for_i915_load(void)
6147 link = symbol_get(ips_link_to_i915_driver);
6150 symbol_put(ips_link_to_i915_driver);
6154 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6156 /* We only register the i915 ips part with intel-ips once everything is
6157 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6158 spin_lock_irq(&mchdev_lock);
6159 i915_mch_dev = dev_priv;
6160 spin_unlock_irq(&mchdev_lock);
6162 ips_ping_for_i915_load();
6165 void intel_gpu_ips_teardown(void)
6167 spin_lock_irq(&mchdev_lock);
6168 i915_mch_dev = NULL;
6169 spin_unlock_irq(&mchdev_lock);
6172 static void intel_init_emon(struct drm_device *dev)
6174 struct drm_i915_private *dev_priv = dev->dev_private;
6179 /* Disable to program */
6183 /* Program energy weights for various events */
6184 I915_WRITE(SDEW, 0x15040d00);
6185 I915_WRITE(CSIEW0, 0x007f0000);
6186 I915_WRITE(CSIEW1, 0x1e220004);
6187 I915_WRITE(CSIEW2, 0x04000004);
6189 for (i = 0; i < 5; i++)
6190 I915_WRITE(PEW(i), 0);
6191 for (i = 0; i < 3; i++)
6192 I915_WRITE(DEW(i), 0);
6194 /* Program P-state weights to account for frequency power adjustment */
6195 for (i = 0; i < 16; i++) {
6196 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6197 unsigned long freq = intel_pxfreq(pxvidfreq);
6198 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6203 val *= (freq / 1000);
6205 val /= (127*127*900);
6207 DRM_ERROR("bad pxval: %ld\n", val);
6210 /* Render standby states get 0 weight */
6214 for (i = 0; i < 4; i++) {
6215 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6216 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6217 I915_WRITE(PXW(i), val);
6220 /* Adjust magic regs to magic values (more experimental results) */
6221 I915_WRITE(OGW0, 0);
6222 I915_WRITE(OGW1, 0);
6223 I915_WRITE(EG0, 0x00007f00);
6224 I915_WRITE(EG1, 0x0000000e);
6225 I915_WRITE(EG2, 0x000e0000);
6226 I915_WRITE(EG3, 0x68000300);
6227 I915_WRITE(EG4, 0x42000000);
6228 I915_WRITE(EG5, 0x00140031);
6232 for (i = 0; i < 8; i++)
6233 I915_WRITE(PXWL(i), 0);
6235 /* Enable PMON + select events */
6236 I915_WRITE(ECR, 0x80000019);
6238 lcfuse = I915_READ(LCFUSE02);
6240 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6243 void intel_init_gt_powersave(struct drm_device *dev)
6245 struct drm_i915_private *dev_priv = dev->dev_private;
6248 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6251 if (!i915.enable_rc6) {
6252 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6253 intel_runtime_pm_get(dev_priv);
6256 if (IS_CHERRYVIEW(dev))
6257 cherryview_init_gt_powersave(dev);
6258 else if (IS_VALLEYVIEW(dev))
6259 valleyview_init_gt_powersave(dev);
6262 void intel_cleanup_gt_powersave(struct drm_device *dev)
6264 struct drm_i915_private *dev_priv = dev->dev_private;
6266 if (IS_CHERRYVIEW(dev))
6268 else if (IS_VALLEYVIEW(dev))
6269 valleyview_cleanup_gt_powersave(dev);
6271 if (!i915.enable_rc6)
6272 intel_runtime_pm_put(dev_priv);
6275 static void gen6_suspend_rps(struct drm_device *dev)
6277 struct drm_i915_private *dev_priv = dev->dev_private;
6279 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6281 gen6_disable_rps_interrupts(dev);
6285 * intel_suspend_gt_powersave - suspend PM work and helper threads
6288 * We don't want to disable RC6 or other features here, we just want
6289 * to make sure any work we've queued has finished and won't bother
6290 * us while we're suspended.
6292 void intel_suspend_gt_powersave(struct drm_device *dev)
6294 struct drm_i915_private *dev_priv = dev->dev_private;
6296 if (INTEL_INFO(dev)->gen < 6)
6299 gen6_suspend_rps(dev);
6301 /* Force GPU to min freq during suspend */
6302 gen6_rps_idle(dev_priv);
6305 void intel_disable_gt_powersave(struct drm_device *dev)
6307 struct drm_i915_private *dev_priv = dev->dev_private;
6309 if (IS_IRONLAKE_M(dev)) {
6310 ironlake_disable_drps(dev);
6311 } else if (INTEL_INFO(dev)->gen >= 6) {
6312 intel_suspend_gt_powersave(dev);
6314 mutex_lock(&dev_priv->rps.hw_lock);
6315 if (INTEL_INFO(dev)->gen >= 9) {
6316 gen9_disable_rc6(dev);
6317 gen9_disable_rps(dev);
6318 } else if (IS_CHERRYVIEW(dev))
6319 cherryview_disable_rps(dev);
6320 else if (IS_VALLEYVIEW(dev))
6321 valleyview_disable_rps(dev);
6323 gen6_disable_rps(dev);
6325 dev_priv->rps.enabled = false;
6326 mutex_unlock(&dev_priv->rps.hw_lock);
6330 static void intel_gen6_powersave_work(struct work_struct *work)
6332 struct drm_i915_private *dev_priv =
6333 container_of(work, struct drm_i915_private,
6334 rps.delayed_resume_work.work);
6335 struct drm_device *dev = dev_priv->dev;
6337 mutex_lock(&dev_priv->rps.hw_lock);
6339 gen6_reset_rps_interrupts(dev);
6341 if (IS_CHERRYVIEW(dev)) {
6342 cherryview_enable_rps(dev);
6343 } else if (IS_VALLEYVIEW(dev)) {
6344 valleyview_enable_rps(dev);
6345 } else if (INTEL_INFO(dev)->gen >= 9) {
6346 gen9_enable_rc6(dev);
6347 gen9_enable_rps(dev);
6348 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
6349 __gen6_update_ring_freq(dev);
6350 } else if (IS_BROADWELL(dev)) {
6351 gen8_enable_rps(dev);
6352 __gen6_update_ring_freq(dev);
6354 gen6_enable_rps(dev);
6355 __gen6_update_ring_freq(dev);
6358 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6359 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6361 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6362 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6364 dev_priv->rps.enabled = true;
6366 gen6_enable_rps_interrupts(dev);
6368 mutex_unlock(&dev_priv->rps.hw_lock);
6370 intel_runtime_pm_put(dev_priv);
6373 void intel_enable_gt_powersave(struct drm_device *dev)
6375 struct drm_i915_private *dev_priv = dev->dev_private;
6377 /* Powersaving is controlled by the host when inside a VM */
6378 if (intel_vgpu_active(dev))
6381 if (IS_IRONLAKE_M(dev)) {
6382 ironlake_enable_drps(dev);
6383 mutex_lock(&dev->struct_mutex);
6384 intel_init_emon(dev);
6385 mutex_unlock(&dev->struct_mutex);
6386 } else if (INTEL_INFO(dev)->gen >= 6) {
6388 * PCU communication is slow and this doesn't need to be
6389 * done at any specific time, so do this out of our fast path
6390 * to make resume and init faster.
6392 * We depend on the HW RC6 power context save/restore
6393 * mechanism when entering D3 through runtime PM suspend. So
6394 * disable RPM until RPS/RC6 is properly setup. We can only
6395 * get here via the driver load/system resume/runtime resume
6396 * paths, so the _noresume version is enough (and in case of
6397 * runtime resume it's necessary).
6399 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6400 round_jiffies_up_relative(HZ)))
6401 intel_runtime_pm_get_noresume(dev_priv);
6405 void intel_reset_gt_powersave(struct drm_device *dev)
6407 struct drm_i915_private *dev_priv = dev->dev_private;
6409 if (INTEL_INFO(dev)->gen < 6)
6412 gen6_suspend_rps(dev);
6413 dev_priv->rps.enabled = false;
6416 static void ibx_init_clock_gating(struct drm_device *dev)
6418 struct drm_i915_private *dev_priv = dev->dev_private;
6421 * On Ibex Peak and Cougar Point, we need to disable clock
6422 * gating for the panel power sequencer or it will fail to
6423 * start up when no ports are active.
6425 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6428 static void g4x_disable_trickle_feed(struct drm_device *dev)
6430 struct drm_i915_private *dev_priv = dev->dev_private;
6433 for_each_pipe(dev_priv, pipe) {
6434 I915_WRITE(DSPCNTR(pipe),
6435 I915_READ(DSPCNTR(pipe)) |
6436 DISPPLANE_TRICKLE_FEED_DISABLE);
6438 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6439 POSTING_READ(DSPSURF(pipe));
6443 static void ilk_init_lp_watermarks(struct drm_device *dev)
6445 struct drm_i915_private *dev_priv = dev->dev_private;
6447 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6448 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6449 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6452 * Don't touch WM1S_LP_EN here.
6453 * Doing so could cause underruns.
6457 static void ironlake_init_clock_gating(struct drm_device *dev)
6459 struct drm_i915_private *dev_priv = dev->dev_private;
6460 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6464 * WaFbcDisableDpfcClockGating:ilk
6466 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6467 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6468 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6470 I915_WRITE(PCH_3DCGDIS0,
6471 MARIUNIT_CLOCK_GATE_DISABLE |
6472 SVSMUNIT_CLOCK_GATE_DISABLE);
6473 I915_WRITE(PCH_3DCGDIS1,
6474 VFMUNIT_CLOCK_GATE_DISABLE);
6477 * According to the spec the following bits should be set in
6478 * order to enable memory self-refresh
6479 * The bit 22/21 of 0x42004
6480 * The bit 5 of 0x42020
6481 * The bit 15 of 0x45000
6483 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6484 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6485 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6486 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6487 I915_WRITE(DISP_ARB_CTL,
6488 (I915_READ(DISP_ARB_CTL) |
6491 ilk_init_lp_watermarks(dev);
6494 * Based on the document from hardware guys the following bits
6495 * should be set unconditionally in order to enable FBC.
6496 * The bit 22 of 0x42000
6497 * The bit 22 of 0x42004
6498 * The bit 7,8,9 of 0x42020.
6500 if (IS_IRONLAKE_M(dev)) {
6501 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6502 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6503 I915_READ(ILK_DISPLAY_CHICKEN1) |
6505 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6506 I915_READ(ILK_DISPLAY_CHICKEN2) |
6510 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6512 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6513 I915_READ(ILK_DISPLAY_CHICKEN2) |
6514 ILK_ELPIN_409_SELECT);
6515 I915_WRITE(_3D_CHICKEN2,
6516 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6517 _3D_CHICKEN2_WM_READ_PIPELINED);
6519 /* WaDisableRenderCachePipelinedFlush:ilk */
6520 I915_WRITE(CACHE_MODE_0,
6521 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6523 /* WaDisable_RenderCache_OperationalFlush:ilk */
6524 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6526 g4x_disable_trickle_feed(dev);
6528 ibx_init_clock_gating(dev);
6531 static void cpt_init_clock_gating(struct drm_device *dev)
6533 struct drm_i915_private *dev_priv = dev->dev_private;
6538 * On Ibex Peak and Cougar Point, we need to disable clock
6539 * gating for the panel power sequencer or it will fail to
6540 * start up when no ports are active.
6542 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6543 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6544 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6545 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6546 DPLS_EDP_PPS_FIX_DIS);
6547 /* The below fixes the weird display corruption, a few pixels shifted
6548 * downward, on (only) LVDS of some HP laptops with IVY.
6550 for_each_pipe(dev_priv, pipe) {
6551 val = I915_READ(TRANS_CHICKEN2(pipe));
6552 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6553 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6554 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6555 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6556 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6557 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6558 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6559 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6561 /* WADP0ClockGatingDisable */
6562 for_each_pipe(dev_priv, pipe) {
6563 I915_WRITE(TRANS_CHICKEN1(pipe),
6564 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6568 static void gen6_check_mch_setup(struct drm_device *dev)
6570 struct drm_i915_private *dev_priv = dev->dev_private;
6573 tmp = I915_READ(MCH_SSKPD);
6574 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6575 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6579 static void gen6_init_clock_gating(struct drm_device *dev)
6581 struct drm_i915_private *dev_priv = dev->dev_private;
6582 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6584 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6586 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6587 I915_READ(ILK_DISPLAY_CHICKEN2) |
6588 ILK_ELPIN_409_SELECT);
6590 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6591 I915_WRITE(_3D_CHICKEN,
6592 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6594 /* WaDisable_RenderCache_OperationalFlush:snb */
6595 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6598 * BSpec recoomends 8x4 when MSAA is used,
6599 * however in practice 16x4 seems fastest.
6601 * Note that PS/WM thread counts depend on the WIZ hashing
6602 * disable bit, which we don't touch here, but it's good
6603 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6605 I915_WRITE(GEN6_GT_MODE,
6606 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6608 ilk_init_lp_watermarks(dev);
6610 I915_WRITE(CACHE_MODE_0,
6611 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6613 I915_WRITE(GEN6_UCGCTL1,
6614 I915_READ(GEN6_UCGCTL1) |
6615 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6616 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6618 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6619 * gating disable must be set. Failure to set it results in
6620 * flickering pixels due to Z write ordering failures after
6621 * some amount of runtime in the Mesa "fire" demo, and Unigine
6622 * Sanctuary and Tropics, and apparently anything else with
6623 * alpha test or pixel discard.
6625 * According to the spec, bit 11 (RCCUNIT) must also be set,
6626 * but we didn't debug actual testcases to find it out.
6628 * WaDisableRCCUnitClockGating:snb
6629 * WaDisableRCPBUnitClockGating:snb
6631 I915_WRITE(GEN6_UCGCTL2,
6632 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6633 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6635 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6636 I915_WRITE(_3D_CHICKEN3,
6637 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6641 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6642 * 3DSTATE_SF number of SF output attributes is more than 16."
6644 I915_WRITE(_3D_CHICKEN3,
6645 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6648 * According to the spec the following bits should be
6649 * set in order to enable memory self-refresh and fbc:
6650 * The bit21 and bit22 of 0x42000
6651 * The bit21 and bit22 of 0x42004
6652 * The bit5 and bit7 of 0x42020
6653 * The bit14 of 0x70180
6654 * The bit14 of 0x71180
6656 * WaFbcAsynchFlipDisableFbcQueue:snb
6658 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6659 I915_READ(ILK_DISPLAY_CHICKEN1) |
6660 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6661 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6662 I915_READ(ILK_DISPLAY_CHICKEN2) |
6663 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6664 I915_WRITE(ILK_DSPCLK_GATE_D,
6665 I915_READ(ILK_DSPCLK_GATE_D) |
6666 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6667 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6669 g4x_disable_trickle_feed(dev);
6671 cpt_init_clock_gating(dev);
6673 gen6_check_mch_setup(dev);
6676 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6678 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6681 * WaVSThreadDispatchOverride:ivb,vlv
6683 * This actually overrides the dispatch
6684 * mode for all thread types.
6686 reg &= ~GEN7_FF_SCHED_MASK;
6687 reg |= GEN7_FF_TS_SCHED_HW;
6688 reg |= GEN7_FF_VS_SCHED_HW;
6689 reg |= GEN7_FF_DS_SCHED_HW;
6691 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6694 static void lpt_init_clock_gating(struct drm_device *dev)
6696 struct drm_i915_private *dev_priv = dev->dev_private;
6699 * TODO: this bit should only be enabled when really needed, then
6700 * disabled when not needed anymore in order to save power.
6702 if (HAS_PCH_LPT_LP(dev))
6703 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6704 I915_READ(SOUTH_DSPCLK_GATE_D) |
6705 PCH_LP_PARTITION_LEVEL_DISABLE);
6707 /* WADPOClockGatingDisable:hsw */
6708 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6709 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6710 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6713 static void lpt_suspend_hw(struct drm_device *dev)
6715 struct drm_i915_private *dev_priv = dev->dev_private;
6717 if (HAS_PCH_LPT_LP(dev)) {
6718 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6720 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6721 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6725 static void kabylake_init_clock_gating(struct drm_device *dev)
6727 struct drm_i915_private *dev_priv = dev->dev_private;
6729 gen9_init_clock_gating(dev);
6731 /* WaDisableSDEUnitClockGating:kbl */
6732 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6733 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6734 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6736 /* WaDisableGamClockGating:kbl */
6737 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
6738 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6739 GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
6741 /* WaFbcNukeOnHostModify:kbl */
6742 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6743 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6746 static void skylake_init_clock_gating(struct drm_device *dev)
6748 struct drm_i915_private *dev_priv = dev->dev_private;
6750 gen9_init_clock_gating(dev);
6752 /* WaFbcNukeOnHostModify:skl */
6753 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6754 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
6757 static void broadwell_init_clock_gating(struct drm_device *dev)
6759 struct drm_i915_private *dev_priv = dev->dev_private;
6763 ilk_init_lp_watermarks(dev);
6765 /* WaSwitchSolVfFArbitrationPriority:bdw */
6766 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6768 /* WaPsrDPAMaskVBlankInSRD:bdw */
6769 I915_WRITE(CHICKEN_PAR1_1,
6770 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6772 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6773 for_each_pipe(dev_priv, pipe) {
6774 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6775 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6776 BDW_DPRS_MASK_VBLANK_SRD);
6779 /* WaVSRefCountFullforceMissDisable:bdw */
6780 /* WaDSRefCountFullforceMissDisable:bdw */
6781 I915_WRITE(GEN7_FF_THREAD_MODE,
6782 I915_READ(GEN7_FF_THREAD_MODE) &
6783 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6785 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6786 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6788 /* WaDisableSDEUnitClockGating:bdw */
6789 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6790 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6793 * WaProgramL3SqcReg1Default:bdw
6794 * WaTempDisableDOPClkGating:bdw
6796 misccpctl = I915_READ(GEN7_MISCCPCTL);
6797 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6798 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6800 * Wait at least 100 clocks before re-enabling clock gating. See
6801 * the definition of L3SQCREG1 in BSpec.
6803 POSTING_READ(GEN8_L3SQCREG1);
6805 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6808 * WaGttCachingOffByDefault:bdw
6809 * GTT cache may not work with big pages, so if those
6810 * are ever enabled GTT cache may need to be disabled.
6812 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6814 lpt_init_clock_gating(dev);
6817 static void haswell_init_clock_gating(struct drm_device *dev)
6819 struct drm_i915_private *dev_priv = dev->dev_private;
6821 ilk_init_lp_watermarks(dev);
6823 /* L3 caching of data atomics doesn't work -- disable it. */
6824 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6825 I915_WRITE(HSW_ROW_CHICKEN3,
6826 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6828 /* This is required by WaCatErrorRejectionIssue:hsw */
6829 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6830 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6831 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6833 /* WaVSRefCountFullforceMissDisable:hsw */
6834 I915_WRITE(GEN7_FF_THREAD_MODE,
6835 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6837 /* WaDisable_RenderCache_OperationalFlush:hsw */
6838 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6840 /* enable HiZ Raw Stall Optimization */
6841 I915_WRITE(CACHE_MODE_0_GEN7,
6842 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6844 /* WaDisable4x2SubspanOptimization:hsw */
6845 I915_WRITE(CACHE_MODE_1,
6846 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6849 * BSpec recommends 8x4 when MSAA is used,
6850 * however in practice 16x4 seems fastest.
6852 * Note that PS/WM thread counts depend on the WIZ hashing
6853 * disable bit, which we don't touch here, but it's good
6854 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6856 I915_WRITE(GEN7_GT_MODE,
6857 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6859 /* WaSampleCChickenBitEnable:hsw */
6860 I915_WRITE(HALF_SLICE_CHICKEN3,
6861 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6863 /* WaSwitchSolVfFArbitrationPriority:hsw */
6864 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6866 /* WaRsPkgCStateDisplayPMReq:hsw */
6867 I915_WRITE(CHICKEN_PAR1_1,
6868 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6870 lpt_init_clock_gating(dev);
6873 static void ivybridge_init_clock_gating(struct drm_device *dev)
6875 struct drm_i915_private *dev_priv = dev->dev_private;
6878 ilk_init_lp_watermarks(dev);
6880 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6882 /* WaDisableEarlyCull:ivb */
6883 I915_WRITE(_3D_CHICKEN3,
6884 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6886 /* WaDisableBackToBackFlipFix:ivb */
6887 I915_WRITE(IVB_CHICKEN3,
6888 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6889 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6891 /* WaDisablePSDDualDispatchEnable:ivb */
6892 if (IS_IVB_GT1(dev))
6893 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6894 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6896 /* WaDisable_RenderCache_OperationalFlush:ivb */
6897 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6899 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6900 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6901 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6903 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6904 I915_WRITE(GEN7_L3CNTLREG1,
6905 GEN7_WA_FOR_GEN7_L3_CONTROL);
6906 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6907 GEN7_WA_L3_CHICKEN_MODE);
6908 if (IS_IVB_GT1(dev))
6909 I915_WRITE(GEN7_ROW_CHICKEN2,
6910 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6912 /* must write both registers */
6913 I915_WRITE(GEN7_ROW_CHICKEN2,
6914 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6915 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6916 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6919 /* WaForceL3Serialization:ivb */
6920 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6921 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6924 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6925 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6927 I915_WRITE(GEN6_UCGCTL2,
6928 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6930 /* This is required by WaCatErrorRejectionIssue:ivb */
6931 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6932 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6933 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6935 g4x_disable_trickle_feed(dev);
6937 gen7_setup_fixed_func_scheduler(dev_priv);
6939 if (0) { /* causes HiZ corruption on ivb:gt1 */
6940 /* enable HiZ Raw Stall Optimization */
6941 I915_WRITE(CACHE_MODE_0_GEN7,
6942 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6945 /* WaDisable4x2SubspanOptimization:ivb */
6946 I915_WRITE(CACHE_MODE_1,
6947 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6950 * BSpec recommends 8x4 when MSAA is used,
6951 * however in practice 16x4 seems fastest.
6953 * Note that PS/WM thread counts depend on the WIZ hashing
6954 * disable bit, which we don't touch here, but it's good
6955 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6957 I915_WRITE(GEN7_GT_MODE,
6958 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6960 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6961 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6962 snpcr |= GEN6_MBC_SNPCR_MED;
6963 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6965 if (!HAS_PCH_NOP(dev))
6966 cpt_init_clock_gating(dev);
6968 gen6_check_mch_setup(dev);
6971 static void valleyview_init_clock_gating(struct drm_device *dev)
6973 struct drm_i915_private *dev_priv = dev->dev_private;
6975 /* WaDisableEarlyCull:vlv */
6976 I915_WRITE(_3D_CHICKEN3,
6977 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6979 /* WaDisableBackToBackFlipFix:vlv */
6980 I915_WRITE(IVB_CHICKEN3,
6981 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6982 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6984 /* WaPsdDispatchEnable:vlv */
6985 /* WaDisablePSDDualDispatchEnable:vlv */
6986 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6987 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6988 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6990 /* WaDisable_RenderCache_OperationalFlush:vlv */
6991 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6993 /* WaForceL3Serialization:vlv */
6994 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6995 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6997 /* WaDisableDopClockGating:vlv */
6998 I915_WRITE(GEN7_ROW_CHICKEN2,
6999 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7001 /* This is required by WaCatErrorRejectionIssue:vlv */
7002 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7003 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7004 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7006 gen7_setup_fixed_func_scheduler(dev_priv);
7009 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7010 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7012 I915_WRITE(GEN6_UCGCTL2,
7013 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7015 /* WaDisableL3Bank2xClockGate:vlv
7016 * Disabling L3 clock gating- MMIO 940c[25] = 1
7017 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7018 I915_WRITE(GEN7_UCGCTL4,
7019 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7022 * BSpec says this must be set, even though
7023 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7025 I915_WRITE(CACHE_MODE_1,
7026 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7029 * BSpec recommends 8x4 when MSAA is used,
7030 * however in practice 16x4 seems fastest.
7032 * Note that PS/WM thread counts depend on the WIZ hashing
7033 * disable bit, which we don't touch here, but it's good
7034 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7036 I915_WRITE(GEN7_GT_MODE,
7037 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7040 * WaIncreaseL3CreditsForVLVB0:vlv
7041 * This is the hardware default actually.
7043 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7046 * WaDisableVLVClockGating_VBIIssue:vlv
7047 * Disable clock gating on th GCFG unit to prevent a delay
7048 * in the reporting of vblank events.
7050 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7053 static void cherryview_init_clock_gating(struct drm_device *dev)
7055 struct drm_i915_private *dev_priv = dev->dev_private;
7057 /* WaVSRefCountFullforceMissDisable:chv */
7058 /* WaDSRefCountFullforceMissDisable:chv */
7059 I915_WRITE(GEN7_FF_THREAD_MODE,
7060 I915_READ(GEN7_FF_THREAD_MODE) &
7061 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7063 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7064 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7065 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7067 /* WaDisableCSUnitClockGating:chv */
7068 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7069 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7071 /* WaDisableSDEUnitClockGating:chv */
7072 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7073 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7076 * GTT cache may not work with big pages, so if those
7077 * are ever enabled GTT cache may need to be disabled.
7079 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7082 static void g4x_init_clock_gating(struct drm_device *dev)
7084 struct drm_i915_private *dev_priv = dev->dev_private;
7085 uint32_t dspclk_gate;
7087 I915_WRITE(RENCLK_GATE_D1, 0);
7088 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7089 GS_UNIT_CLOCK_GATE_DISABLE |
7090 CL_UNIT_CLOCK_GATE_DISABLE);
7091 I915_WRITE(RAMCLK_GATE_D, 0);
7092 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7093 OVRUNIT_CLOCK_GATE_DISABLE |
7094 OVCUNIT_CLOCK_GATE_DISABLE;
7096 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7097 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7099 /* WaDisableRenderCachePipelinedFlush */
7100 I915_WRITE(CACHE_MODE_0,
7101 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7103 /* WaDisable_RenderCache_OperationalFlush:g4x */
7104 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7106 g4x_disable_trickle_feed(dev);
7109 static void crestline_init_clock_gating(struct drm_device *dev)
7111 struct drm_i915_private *dev_priv = dev->dev_private;
7113 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7114 I915_WRITE(RENCLK_GATE_D2, 0);
7115 I915_WRITE(DSPCLK_GATE_D, 0);
7116 I915_WRITE(RAMCLK_GATE_D, 0);
7117 I915_WRITE16(DEUC, 0);
7118 I915_WRITE(MI_ARB_STATE,
7119 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7121 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7122 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7125 static void broadwater_init_clock_gating(struct drm_device *dev)
7127 struct drm_i915_private *dev_priv = dev->dev_private;
7129 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7130 I965_RCC_CLOCK_GATE_DISABLE |
7131 I965_RCPB_CLOCK_GATE_DISABLE |
7132 I965_ISC_CLOCK_GATE_DISABLE |
7133 I965_FBC_CLOCK_GATE_DISABLE);
7134 I915_WRITE(RENCLK_GATE_D2, 0);
7135 I915_WRITE(MI_ARB_STATE,
7136 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7138 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7139 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7142 static void gen3_init_clock_gating(struct drm_device *dev)
7144 struct drm_i915_private *dev_priv = dev->dev_private;
7145 u32 dstate = I915_READ(D_STATE);
7147 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7148 DSTATE_DOT_CLOCK_GATING;
7149 I915_WRITE(D_STATE, dstate);
7151 if (IS_PINEVIEW(dev))
7152 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7154 /* IIR "flip pending" means done if this bit is set */
7155 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7157 /* interrupts should cause a wake up from C3 */
7158 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7160 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7161 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7163 I915_WRITE(MI_ARB_STATE,
7164 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7167 static void i85x_init_clock_gating(struct drm_device *dev)
7169 struct drm_i915_private *dev_priv = dev->dev_private;
7171 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7173 /* interrupts should cause a wake up from C3 */
7174 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7175 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7177 I915_WRITE(MEM_MODE,
7178 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7181 static void i830_init_clock_gating(struct drm_device *dev)
7183 struct drm_i915_private *dev_priv = dev->dev_private;
7185 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7187 I915_WRITE(MEM_MODE,
7188 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7189 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7192 void intel_init_clock_gating(struct drm_device *dev)
7194 struct drm_i915_private *dev_priv = dev->dev_private;
7196 dev_priv->display.init_clock_gating(dev);
7199 void intel_suspend_hw(struct drm_device *dev)
7201 if (HAS_PCH_LPT(dev))
7202 lpt_suspend_hw(dev);
7205 static void nop_init_clock_gating(struct drm_device *dev)
7207 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7211 * intel_init_clock_gating_hooks - setup the clock gating hooks
7212 * @dev_priv: device private
7214 * Setup the hooks that configure which clocks of a given platform can be
7215 * gated and also apply various GT and display specific workarounds for these
7216 * platforms. Note that some GT specific workarounds are applied separately
7217 * when GPU contexts or batchbuffers start their execution.
7219 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7221 if (IS_SKYLAKE(dev_priv))
7222 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7223 else if (IS_KABYLAKE(dev_priv))
7224 dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7225 else if (IS_BROXTON(dev_priv))
7226 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7227 else if (IS_BROADWELL(dev_priv))
7228 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7229 else if (IS_CHERRYVIEW(dev_priv))
7230 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7231 else if (IS_HASWELL(dev_priv))
7232 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7233 else if (IS_IVYBRIDGE(dev_priv))
7234 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7235 else if (IS_VALLEYVIEW(dev_priv))
7236 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7237 else if (IS_GEN6(dev_priv))
7238 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7239 else if (IS_GEN5(dev_priv))
7240 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7241 else if (IS_G4X(dev_priv))
7242 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7243 else if (IS_CRESTLINE(dev_priv))
7244 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7245 else if (IS_BROADWATER(dev_priv))
7246 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7247 else if (IS_GEN3(dev_priv))
7248 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7249 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7250 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7251 else if (IS_GEN2(dev_priv))
7252 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7254 MISSING_CASE(INTEL_DEVID(dev_priv));
7255 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7259 /* Set up chip specific power management-related functions */
7260 void intel_init_pm(struct drm_device *dev)
7262 struct drm_i915_private *dev_priv = dev->dev_private;
7264 intel_fbc_init(dev_priv);
7267 if (IS_PINEVIEW(dev))
7268 i915_pineview_get_mem_freq(dev);
7269 else if (IS_GEN5(dev))
7270 i915_ironlake_get_mem_freq(dev);
7272 /* For FIFO watermark updates */
7273 if (INTEL_INFO(dev)->gen >= 9) {
7274 skl_setup_wm_latency(dev);
7275 dev_priv->display.update_wm = skl_update_wm;
7276 } else if (HAS_PCH_SPLIT(dev)) {
7277 ilk_setup_wm_latency(dev);
7279 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7280 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7281 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7282 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7283 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7284 dev_priv->display.compute_intermediate_wm =
7285 ilk_compute_intermediate_wm;
7286 dev_priv->display.initial_watermarks =
7287 ilk_initial_watermarks;
7288 dev_priv->display.optimize_watermarks =
7289 ilk_optimize_watermarks;
7291 DRM_DEBUG_KMS("Failed to read display plane latency. "
7294 } else if (IS_CHERRYVIEW(dev)) {
7295 vlv_setup_wm_latency(dev);
7296 dev_priv->display.update_wm = vlv_update_wm;
7297 } else if (IS_VALLEYVIEW(dev)) {
7298 vlv_setup_wm_latency(dev);
7299 dev_priv->display.update_wm = vlv_update_wm;
7300 } else if (IS_PINEVIEW(dev)) {
7301 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7304 dev_priv->mem_freq)) {
7305 DRM_INFO("failed to find known CxSR latency "
7306 "(found ddr%s fsb freq %d, mem freq %d), "
7308 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7309 dev_priv->fsb_freq, dev_priv->mem_freq);
7310 /* Disable CxSR and never update its watermark again */
7311 intel_set_memory_cxsr(dev_priv, false);
7312 dev_priv->display.update_wm = NULL;
7314 dev_priv->display.update_wm = pineview_update_wm;
7315 } else if (IS_G4X(dev)) {
7316 dev_priv->display.update_wm = g4x_update_wm;
7317 } else if (IS_GEN4(dev)) {
7318 dev_priv->display.update_wm = i965_update_wm;
7319 } else if (IS_GEN3(dev)) {
7320 dev_priv->display.update_wm = i9xx_update_wm;
7321 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7322 } else if (IS_GEN2(dev)) {
7323 if (INTEL_INFO(dev)->num_pipes == 1) {
7324 dev_priv->display.update_wm = i845_update_wm;
7325 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7327 dev_priv->display.update_wm = i9xx_update_wm;
7328 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7331 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7335 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7339 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7340 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7344 I915_WRITE(GEN6_PCODE_DATA, *val);
7345 I915_WRITE(GEN6_PCODE_DATA1, 0);
7346 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7348 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7350 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7354 *val = I915_READ(GEN6_PCODE_DATA);
7355 I915_WRITE(GEN6_PCODE_DATA, 0);
7360 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7362 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7364 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7365 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7369 I915_WRITE(GEN6_PCODE_DATA, val);
7370 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7372 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7374 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7378 I915_WRITE(GEN6_PCODE_DATA, 0);
7383 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7387 * Slow = Fast = GPLL ref * N
7389 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7392 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7394 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7397 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7401 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7403 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7406 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7408 /* CHV needs even values */
7409 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7412 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7414 if (IS_GEN9(dev_priv))
7415 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7417 else if (IS_CHERRYVIEW(dev_priv))
7418 return chv_gpu_freq(dev_priv, val);
7419 else if (IS_VALLEYVIEW(dev_priv))
7420 return byt_gpu_freq(dev_priv, val);
7422 return val * GT_FREQUENCY_MULTIPLIER;
7425 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7427 if (IS_GEN9(dev_priv))
7428 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7429 GT_FREQUENCY_MULTIPLIER);
7430 else if (IS_CHERRYVIEW(dev_priv))
7431 return chv_freq_opcode(dev_priv, val);
7432 else if (IS_VALLEYVIEW(dev_priv))
7433 return byt_freq_opcode(dev_priv, val);
7435 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7438 struct request_boost {
7439 struct work_struct work;
7440 struct drm_i915_gem_request *req;
7443 static void __intel_rps_boost_work(struct work_struct *work)
7445 struct request_boost *boost = container_of(work, struct request_boost, work);
7446 struct drm_i915_gem_request *req = boost->req;
7448 if (!i915_gem_request_completed(req, true))
7449 gen6_rps_boost(to_i915(req->engine->dev), NULL,
7450 req->emitted_jiffies);
7452 i915_gem_request_unreference__unlocked(req);
7456 void intel_queue_rps_boost_for_request(struct drm_device *dev,
7457 struct drm_i915_gem_request *req)
7459 struct request_boost *boost;
7461 if (req == NULL || INTEL_INFO(dev)->gen < 6)
7464 if (i915_gem_request_completed(req, true))
7467 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7471 i915_gem_request_reference(req);
7474 INIT_WORK(&boost->work, __intel_rps_boost_work);
7475 queue_work(to_i915(dev)->wq, &boost->work);
7478 void intel_pm_setup(struct drm_device *dev)
7480 struct drm_i915_private *dev_priv = dev->dev_private;
7482 mutex_init(&dev_priv->rps.hw_lock);
7483 spin_lock_init(&dev_priv->rps.client_lock);
7485 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7486 intel_gen6_powersave_work);
7487 INIT_LIST_HEAD(&dev_priv->rps.clients);
7488 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7489 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
7491 dev_priv->pm.suspended = false;
7492 atomic_set(&dev_priv->pm.wakeref_count, 0);
7493 atomic_set(&dev_priv->pm.atomic_seq, 0);