2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
37 * RC6 is a special power stage which allows the GPU to enter an very
38 * low-voltage mode when idle, using down to 0V while at this stage. This
39 * stage is entered automatically when the GPU is idle when RC6 support is
40 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 * There are different RC6 modes available in Intel GPU, which differentiate
43 * among each other with the latency required to enter and leave RC6 and
44 * voltage consumed by the GPU in different states.
46 * The combination of the following flags define which states GPU is allowed
47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48 * RC6pp is deepest RC6. Their support by hardware varies according to the
49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50 * which brings the most power savings; deeper states save more power, but
51 * require higher latency to switch to and wake up.
53 #define INTEL_RC6_ENABLE (1<<0)
54 #define INTEL_RC6p_ENABLE (1<<1)
55 #define INTEL_RC6pp_ENABLE (1<<2)
57 static void bxt_init_clock_gating(struct drm_device *dev)
59 struct drm_i915_private *dev_priv = dev->dev_private;
61 /* WaDisableSDEUnitClockGating:bxt */
62 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
63 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
67 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
69 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
70 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
73 * Wa: Backlight PWM may stop in the asserted state, causing backlight
76 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
77 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
78 PWM1_GATING_DIS | PWM2_GATING_DIS);
81 static void i915_pineview_get_mem_freq(struct drm_device *dev)
83 struct drm_i915_private *dev_priv = dev->dev_private;
86 tmp = I915_READ(CLKCFG);
88 switch (tmp & CLKCFG_FSB_MASK) {
90 dev_priv->fsb_freq = 533; /* 133*4 */
93 dev_priv->fsb_freq = 800; /* 200*4 */
96 dev_priv->fsb_freq = 667; /* 167*4 */
99 dev_priv->fsb_freq = 400; /* 100*4 */
103 switch (tmp & CLKCFG_MEM_MASK) {
105 dev_priv->mem_freq = 533;
108 dev_priv->mem_freq = 667;
111 dev_priv->mem_freq = 800;
115 /* detect pineview DDR3 setting */
116 tmp = I915_READ(CSHRDDR3CTL);
117 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
120 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
122 struct drm_i915_private *dev_priv = dev->dev_private;
125 ddrpll = I915_READ16(DDRMPLL1);
126 csipll = I915_READ16(CSIPLL0);
128 switch (ddrpll & 0xff) {
130 dev_priv->mem_freq = 800;
133 dev_priv->mem_freq = 1066;
136 dev_priv->mem_freq = 1333;
139 dev_priv->mem_freq = 1600;
142 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
144 dev_priv->mem_freq = 0;
148 dev_priv->ips.r_t = dev_priv->mem_freq;
150 switch (csipll & 0x3ff) {
152 dev_priv->fsb_freq = 3200;
155 dev_priv->fsb_freq = 3733;
158 dev_priv->fsb_freq = 4266;
161 dev_priv->fsb_freq = 4800;
164 dev_priv->fsb_freq = 5333;
167 dev_priv->fsb_freq = 5866;
170 dev_priv->fsb_freq = 6400;
173 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
175 dev_priv->fsb_freq = 0;
179 if (dev_priv->fsb_freq == 3200) {
180 dev_priv->ips.c_m = 0;
181 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
182 dev_priv->ips.c_m = 1;
184 dev_priv->ips.c_m = 2;
188 static const struct cxsr_latency cxsr_latency_table[] = {
189 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
190 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
191 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
192 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
193 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
195 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
196 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
197 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
198 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
199 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
201 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
202 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
203 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
204 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
205 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
207 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
208 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
209 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
210 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
211 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
213 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
214 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
215 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
216 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
217 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
219 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
220 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
221 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
222 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
223 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
226 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
231 const struct cxsr_latency *latency;
234 if (fsb == 0 || mem == 0)
237 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
238 latency = &cxsr_latency_table[i];
239 if (is_desktop == latency->is_desktop &&
240 is_ddr3 == latency->is_ddr3 &&
241 fsb == latency->fsb_freq && mem == latency->mem_freq)
245 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
250 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
254 mutex_lock(&dev_priv->rps.hw_lock);
256 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
258 val &= ~FORCE_DDR_HIGH_FREQ;
260 val |= FORCE_DDR_HIGH_FREQ;
261 val &= ~FORCE_DDR_LOW_FREQ;
262 val |= FORCE_DDR_FREQ_REQ_ACK;
263 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
265 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
266 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
267 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
269 mutex_unlock(&dev_priv->rps.hw_lock);
272 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
276 mutex_lock(&dev_priv->rps.hw_lock);
278 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
280 val |= DSP_MAXFIFO_PM5_ENABLE;
282 val &= ~DSP_MAXFIFO_PM5_ENABLE;
283 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
285 mutex_unlock(&dev_priv->rps.hw_lock);
288 #define FW_WM(value, plane) \
289 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
291 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
293 struct drm_device *dev = dev_priv->dev;
296 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
297 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
298 POSTING_READ(FW_BLC_SELF_VLV);
299 dev_priv->wm.vlv.cxsr = enable;
300 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
301 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
302 POSTING_READ(FW_BLC_SELF);
303 } else if (IS_PINEVIEW(dev)) {
304 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
305 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
306 I915_WRITE(DSPFW3, val);
307 POSTING_READ(DSPFW3);
308 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
309 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
310 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
311 I915_WRITE(FW_BLC_SELF, val);
312 POSTING_READ(FW_BLC_SELF);
313 } else if (IS_I915GM(dev)) {
314 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
315 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
316 I915_WRITE(INSTPM, val);
317 POSTING_READ(INSTPM);
322 DRM_DEBUG_KMS("memory self-refresh is %s\n",
323 enable ? "enabled" : "disabled");
328 * Latency for FIFO fetches is dependent on several factors:
329 * - memory configuration (speed, channels)
331 * - current MCH state
332 * It can be fairly high in some situations, so here we assume a fairly
333 * pessimal value. It's a tradeoff between extra memory fetches (if we
334 * set this value too high, the FIFO will fetch frequently to stay full)
335 * and power consumption (set it too low to save power and we might see
336 * FIFO underruns and display "flicker").
338 * A value of 5us seems to be a good balance; safe for very low end
339 * platforms but not overly aggressive on lower latency configs.
341 static const int pessimal_latency_ns = 5000;
343 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
344 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
346 static int vlv_get_fifo_size(struct drm_device *dev,
347 enum pipe pipe, int plane)
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 int sprite0_start, sprite1_start, size;
353 uint32_t dsparb, dsparb2, dsparb3;
355 dsparb = I915_READ(DSPARB);
356 dsparb2 = I915_READ(DSPARB2);
357 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
358 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
361 dsparb = I915_READ(DSPARB);
362 dsparb2 = I915_READ(DSPARB2);
363 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
364 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
367 dsparb2 = I915_READ(DSPARB2);
368 dsparb3 = I915_READ(DSPARB3);
369 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
370 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
378 size = sprite0_start;
381 size = sprite1_start - sprite0_start;
384 size = 512 - 1 - sprite1_start;
390 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
391 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
392 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
398 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
400 struct drm_i915_private *dev_priv = dev->dev_private;
401 uint32_t dsparb = I915_READ(DSPARB);
404 size = dsparb & 0x7f;
406 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
408 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
409 plane ? "B" : "A", size);
414 static int i830_get_fifo_size(struct drm_device *dev, int plane)
416 struct drm_i915_private *dev_priv = dev->dev_private;
417 uint32_t dsparb = I915_READ(DSPARB);
420 size = dsparb & 0x1ff;
422 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
423 size >>= 1; /* Convert to cachelines */
425 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
426 plane ? "B" : "A", size);
431 static int i845_get_fifo_size(struct drm_device *dev, int plane)
433 struct drm_i915_private *dev_priv = dev->dev_private;
434 uint32_t dsparb = I915_READ(DSPARB);
437 size = dsparb & 0x7f;
438 size >>= 2; /* Convert to cachelines */
440 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
447 /* Pineview has different values for various configs */
448 static const struct intel_watermark_params pineview_display_wm = {
449 .fifo_size = PINEVIEW_DISPLAY_FIFO,
450 .max_wm = PINEVIEW_MAX_WM,
451 .default_wm = PINEVIEW_DFT_WM,
452 .guard_size = PINEVIEW_GUARD_WM,
453 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
455 static const struct intel_watermark_params pineview_display_hplloff_wm = {
456 .fifo_size = PINEVIEW_DISPLAY_FIFO,
457 .max_wm = PINEVIEW_MAX_WM,
458 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
459 .guard_size = PINEVIEW_GUARD_WM,
460 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
462 static const struct intel_watermark_params pineview_cursor_wm = {
463 .fifo_size = PINEVIEW_CURSOR_FIFO,
464 .max_wm = PINEVIEW_CURSOR_MAX_WM,
465 .default_wm = PINEVIEW_CURSOR_DFT_WM,
466 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
467 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
469 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
470 .fifo_size = PINEVIEW_CURSOR_FIFO,
471 .max_wm = PINEVIEW_CURSOR_MAX_WM,
472 .default_wm = PINEVIEW_CURSOR_DFT_WM,
473 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
474 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
476 static const struct intel_watermark_params g4x_wm_info = {
477 .fifo_size = G4X_FIFO_SIZE,
478 .max_wm = G4X_MAX_WM,
479 .default_wm = G4X_MAX_WM,
481 .cacheline_size = G4X_FIFO_LINE_SIZE,
483 static const struct intel_watermark_params g4x_cursor_wm_info = {
484 .fifo_size = I965_CURSOR_FIFO,
485 .max_wm = I965_CURSOR_MAX_WM,
486 .default_wm = I965_CURSOR_DFT_WM,
488 .cacheline_size = G4X_FIFO_LINE_SIZE,
490 static const struct intel_watermark_params valleyview_wm_info = {
491 .fifo_size = VALLEYVIEW_FIFO_SIZE,
492 .max_wm = VALLEYVIEW_MAX_WM,
493 .default_wm = VALLEYVIEW_MAX_WM,
495 .cacheline_size = G4X_FIFO_LINE_SIZE,
497 static const struct intel_watermark_params valleyview_cursor_wm_info = {
498 .fifo_size = I965_CURSOR_FIFO,
499 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
500 .default_wm = I965_CURSOR_DFT_WM,
502 .cacheline_size = G4X_FIFO_LINE_SIZE,
504 static const struct intel_watermark_params i965_cursor_wm_info = {
505 .fifo_size = I965_CURSOR_FIFO,
506 .max_wm = I965_CURSOR_MAX_WM,
507 .default_wm = I965_CURSOR_DFT_WM,
509 .cacheline_size = I915_FIFO_LINE_SIZE,
511 static const struct intel_watermark_params i945_wm_info = {
512 .fifo_size = I945_FIFO_SIZE,
513 .max_wm = I915_MAX_WM,
516 .cacheline_size = I915_FIFO_LINE_SIZE,
518 static const struct intel_watermark_params i915_wm_info = {
519 .fifo_size = I915_FIFO_SIZE,
520 .max_wm = I915_MAX_WM,
523 .cacheline_size = I915_FIFO_LINE_SIZE,
525 static const struct intel_watermark_params i830_a_wm_info = {
526 .fifo_size = I855GM_FIFO_SIZE,
527 .max_wm = I915_MAX_WM,
530 .cacheline_size = I830_FIFO_LINE_SIZE,
532 static const struct intel_watermark_params i830_bc_wm_info = {
533 .fifo_size = I855GM_FIFO_SIZE,
534 .max_wm = I915_MAX_WM/2,
537 .cacheline_size = I830_FIFO_LINE_SIZE,
539 static const struct intel_watermark_params i845_wm_info = {
540 .fifo_size = I830_FIFO_SIZE,
541 .max_wm = I915_MAX_WM,
544 .cacheline_size = I830_FIFO_LINE_SIZE,
548 * intel_calculate_wm - calculate watermark level
549 * @clock_in_khz: pixel clock
550 * @wm: chip FIFO params
551 * @pixel_size: display pixel size
552 * @latency_ns: memory latency for the platform
554 * Calculate the watermark level (the level at which the display plane will
555 * start fetching from memory again). Each chip has a different display
556 * FIFO size and allocation, so the caller needs to figure that out and pass
557 * in the correct intel_watermark_params structure.
559 * As the pixel clock runs, the FIFO will be drained at a rate that depends
560 * on the pixel size. When it reaches the watermark level, it'll start
561 * fetching FIFO line sized based chunks from memory until the FIFO fills
562 * past the watermark point. If the FIFO drains completely, a FIFO underrun
563 * will occur, and a display engine hang could result.
565 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
566 const struct intel_watermark_params *wm,
569 unsigned long latency_ns)
571 long entries_required, wm_size;
574 * Note: we need to make sure we don't overflow for various clock &
576 * clocks go from a few thousand to several hundred thousand.
577 * latency is usually a few thousand
579 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
581 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
583 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
585 wm_size = fifo_size - (entries_required + wm->guard_size);
587 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
589 /* Don't promote wm_size to unsigned... */
590 if (wm_size > (long)wm->max_wm)
591 wm_size = wm->max_wm;
593 wm_size = wm->default_wm;
596 * Bspec seems to indicate that the value shouldn't be lower than
597 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
598 * Lets go for 8 which is the burst size since certain platforms
599 * already use a hardcoded 8 (which is what the spec says should be
608 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
610 struct drm_crtc *crtc, *enabled = NULL;
612 for_each_crtc(dev, crtc) {
613 if (intel_crtc_active(crtc)) {
623 static void pineview_update_wm(struct drm_crtc *unused_crtc)
625 struct drm_device *dev = unused_crtc->dev;
626 struct drm_i915_private *dev_priv = dev->dev_private;
627 struct drm_crtc *crtc;
628 const struct cxsr_latency *latency;
632 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
633 dev_priv->fsb_freq, dev_priv->mem_freq);
635 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
636 intel_set_memory_cxsr(dev_priv, false);
640 crtc = single_enabled_crtc(dev);
642 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
643 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
644 int clock = adjusted_mode->crtc_clock;
647 wm = intel_calculate_wm(clock, &pineview_display_wm,
648 pineview_display_wm.fifo_size,
649 pixel_size, latency->display_sr);
650 reg = I915_READ(DSPFW1);
651 reg &= ~DSPFW_SR_MASK;
652 reg |= FW_WM(wm, SR);
653 I915_WRITE(DSPFW1, reg);
654 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
657 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
658 pineview_display_wm.fifo_size,
659 pixel_size, latency->cursor_sr);
660 reg = I915_READ(DSPFW3);
661 reg &= ~DSPFW_CURSOR_SR_MASK;
662 reg |= FW_WM(wm, CURSOR_SR);
663 I915_WRITE(DSPFW3, reg);
665 /* Display HPLL off SR */
666 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
667 pineview_display_hplloff_wm.fifo_size,
668 pixel_size, latency->display_hpll_disable);
669 reg = I915_READ(DSPFW3);
670 reg &= ~DSPFW_HPLL_SR_MASK;
671 reg |= FW_WM(wm, HPLL_SR);
672 I915_WRITE(DSPFW3, reg);
674 /* cursor HPLL off SR */
675 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
676 pineview_display_hplloff_wm.fifo_size,
677 pixel_size, latency->cursor_hpll_disable);
678 reg = I915_READ(DSPFW3);
679 reg &= ~DSPFW_HPLL_CURSOR_MASK;
680 reg |= FW_WM(wm, HPLL_CURSOR);
681 I915_WRITE(DSPFW3, reg);
682 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
684 intel_set_memory_cxsr(dev_priv, true);
686 intel_set_memory_cxsr(dev_priv, false);
690 static bool g4x_compute_wm0(struct drm_device *dev,
692 const struct intel_watermark_params *display,
693 int display_latency_ns,
694 const struct intel_watermark_params *cursor,
695 int cursor_latency_ns,
699 struct drm_crtc *crtc;
700 const struct drm_display_mode *adjusted_mode;
701 int htotal, hdisplay, clock, pixel_size;
702 int line_time_us, line_count;
703 int entries, tlb_miss;
705 crtc = intel_get_crtc_for_plane(dev, plane);
706 if (!intel_crtc_active(crtc)) {
707 *cursor_wm = cursor->guard_size;
708 *plane_wm = display->guard_size;
712 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
713 clock = adjusted_mode->crtc_clock;
714 htotal = adjusted_mode->crtc_htotal;
715 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
716 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
718 /* Use the small buffer method to calculate plane watermark */
719 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
720 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
723 entries = DIV_ROUND_UP(entries, display->cacheline_size);
724 *plane_wm = entries + display->guard_size;
725 if (*plane_wm > (int)display->max_wm)
726 *plane_wm = display->max_wm;
728 /* Use the large buffer method to calculate cursor watermark */
729 line_time_us = max(htotal * 1000 / clock, 1);
730 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
731 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
732 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
735 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
736 *cursor_wm = entries + cursor->guard_size;
737 if (*cursor_wm > (int)cursor->max_wm)
738 *cursor_wm = (int)cursor->max_wm;
744 * Check the wm result.
746 * If any calculated watermark values is larger than the maximum value that
747 * can be programmed into the associated watermark register, that watermark
750 static bool g4x_check_srwm(struct drm_device *dev,
751 int display_wm, int cursor_wm,
752 const struct intel_watermark_params *display,
753 const struct intel_watermark_params *cursor)
755 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
756 display_wm, cursor_wm);
758 if (display_wm > display->max_wm) {
759 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
760 display_wm, display->max_wm);
764 if (cursor_wm > cursor->max_wm) {
765 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
766 cursor_wm, cursor->max_wm);
770 if (!(display_wm || cursor_wm)) {
771 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
778 static bool g4x_compute_srwm(struct drm_device *dev,
781 const struct intel_watermark_params *display,
782 const struct intel_watermark_params *cursor,
783 int *display_wm, int *cursor_wm)
785 struct drm_crtc *crtc;
786 const struct drm_display_mode *adjusted_mode;
787 int hdisplay, htotal, pixel_size, clock;
788 unsigned long line_time_us;
789 int line_count, line_size;
794 *display_wm = *cursor_wm = 0;
798 crtc = intel_get_crtc_for_plane(dev, plane);
799 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
800 clock = adjusted_mode->crtc_clock;
801 htotal = adjusted_mode->crtc_htotal;
802 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
803 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
805 line_time_us = max(htotal * 1000 / clock, 1);
806 line_count = (latency_ns / line_time_us + 1000) / 1000;
807 line_size = hdisplay * pixel_size;
809 /* Use the minimum of the small and large buffer method for primary */
810 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
811 large = line_count * line_size;
813 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
814 *display_wm = entries + display->guard_size;
816 /* calculate the self-refresh watermark for display cursor */
817 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
818 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
819 *cursor_wm = entries + cursor->guard_size;
821 return g4x_check_srwm(dev,
822 *display_wm, *cursor_wm,
826 #define FW_WM_VLV(value, plane) \
827 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
829 static void vlv_write_wm_values(struct intel_crtc *crtc,
830 const struct vlv_wm_values *wm)
832 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
833 enum pipe pipe = crtc->pipe;
835 I915_WRITE(VLV_DDL(pipe),
836 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
837 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
838 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
839 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
842 FW_WM(wm->sr.plane, SR) |
843 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
844 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
845 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
847 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
848 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
849 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
851 FW_WM(wm->sr.cursor, CURSOR_SR));
853 if (IS_CHERRYVIEW(dev_priv)) {
854 I915_WRITE(DSPFW7_CHV,
855 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
856 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
857 I915_WRITE(DSPFW8_CHV,
858 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
859 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
860 I915_WRITE(DSPFW9_CHV,
861 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
862 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
864 FW_WM(wm->sr.plane >> 9, SR_HI) |
865 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
866 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
867 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
868 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
869 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
870 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
871 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
872 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
873 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
876 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
877 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
879 FW_WM(wm->sr.plane >> 9, SR_HI) |
880 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
881 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
882 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
883 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
884 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
885 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
888 /* zero (unused) WM1 watermarks */
889 I915_WRITE(DSPFW4, 0);
890 I915_WRITE(DSPFW5, 0);
891 I915_WRITE(DSPFW6, 0);
892 I915_WRITE(DSPHOWM1, 0);
894 POSTING_READ(DSPFW1);
902 VLV_WM_LEVEL_DDR_DVFS,
905 /* latency must be in 0.1us units. */
906 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
907 unsigned int pipe_htotal,
908 unsigned int horiz_pixels,
909 unsigned int bytes_per_pixel,
910 unsigned int latency)
914 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
915 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
916 ret = DIV_ROUND_UP(ret, 64);
921 static void vlv_setup_wm_latency(struct drm_device *dev)
923 struct drm_i915_private *dev_priv = dev->dev_private;
925 /* all latencies in usec */
926 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
928 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
930 if (IS_CHERRYVIEW(dev_priv)) {
931 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
932 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
934 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
938 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
939 struct intel_crtc *crtc,
940 const struct intel_plane_state *state,
943 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
944 int clock, htotal, pixel_size, width, wm;
946 if (dev_priv->wm.pri_latency[level] == 0)
952 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
953 clock = crtc->config->base.adjusted_mode.crtc_clock;
954 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
955 width = crtc->config->pipe_src_w;
956 if (WARN_ON(htotal == 0))
959 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
961 * FIXME the formula gives values that are
962 * too big for the cursor FIFO, and hence we
963 * would never be able to use cursors. For
964 * now just hardcode the watermark.
968 wm = vlv_wm_method2(clock, htotal, width, pixel_size,
969 dev_priv->wm.pri_latency[level] * 10);
972 return min_t(int, wm, USHRT_MAX);
975 static void vlv_compute_fifo(struct intel_crtc *crtc)
977 struct drm_device *dev = crtc->base.dev;
978 struct vlv_wm_state *wm_state = &crtc->wm_state;
979 struct intel_plane *plane;
980 unsigned int total_rate = 0;
981 const int fifo_size = 512 - 1;
982 int fifo_extra, fifo_left = fifo_size;
984 for_each_intel_plane_on_crtc(dev, crtc, plane) {
985 struct intel_plane_state *state =
986 to_intel_plane_state(plane->base.state);
988 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
991 if (state->visible) {
992 wm_state->num_active_planes++;
993 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
997 for_each_intel_plane_on_crtc(dev, crtc, plane) {
998 struct intel_plane_state *state =
999 to_intel_plane_state(plane->base.state);
1002 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1003 plane->wm.fifo_size = 63;
1007 if (!state->visible) {
1008 plane->wm.fifo_size = 0;
1012 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1013 plane->wm.fifo_size = fifo_size * rate / total_rate;
1014 fifo_left -= plane->wm.fifo_size;
1017 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1019 /* spread the remainder evenly */
1020 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1026 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1029 /* give it all to the first plane if none are active */
1030 if (plane->wm.fifo_size == 0 &&
1031 wm_state->num_active_planes)
1034 plane_extra = min(fifo_extra, fifo_left);
1035 plane->wm.fifo_size += plane_extra;
1036 fifo_left -= plane_extra;
1039 WARN_ON(fifo_left != 0);
1042 static void vlv_invert_wms(struct intel_crtc *crtc)
1044 struct vlv_wm_state *wm_state = &crtc->wm_state;
1047 for (level = 0; level < wm_state->num_levels; level++) {
1048 struct drm_device *dev = crtc->base.dev;
1049 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1050 struct intel_plane *plane;
1052 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1053 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1055 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1056 switch (plane->base.type) {
1058 case DRM_PLANE_TYPE_CURSOR:
1059 wm_state->wm[level].cursor = plane->wm.fifo_size -
1060 wm_state->wm[level].cursor;
1062 case DRM_PLANE_TYPE_PRIMARY:
1063 wm_state->wm[level].primary = plane->wm.fifo_size -
1064 wm_state->wm[level].primary;
1066 case DRM_PLANE_TYPE_OVERLAY:
1067 sprite = plane->plane;
1068 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1069 wm_state->wm[level].sprite[sprite];
1076 static void vlv_compute_wm(struct intel_crtc *crtc)
1078 struct drm_device *dev = crtc->base.dev;
1079 struct vlv_wm_state *wm_state = &crtc->wm_state;
1080 struct intel_plane *plane;
1081 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1084 memset(wm_state, 0, sizeof(*wm_state));
1086 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1087 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1089 wm_state->num_active_planes = 0;
1091 vlv_compute_fifo(crtc);
1093 if (wm_state->num_active_planes != 1)
1094 wm_state->cxsr = false;
1096 if (wm_state->cxsr) {
1097 for (level = 0; level < wm_state->num_levels; level++) {
1098 wm_state->sr[level].plane = sr_fifo_size;
1099 wm_state->sr[level].cursor = 63;
1103 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1104 struct intel_plane_state *state =
1105 to_intel_plane_state(plane->base.state);
1107 if (!state->visible)
1110 /* normal watermarks */
1111 for (level = 0; level < wm_state->num_levels; level++) {
1112 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1113 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1116 if (WARN_ON(level == 0 && wm > max_wm))
1119 if (wm > plane->wm.fifo_size)
1122 switch (plane->base.type) {
1124 case DRM_PLANE_TYPE_CURSOR:
1125 wm_state->wm[level].cursor = wm;
1127 case DRM_PLANE_TYPE_PRIMARY:
1128 wm_state->wm[level].primary = wm;
1130 case DRM_PLANE_TYPE_OVERLAY:
1131 sprite = plane->plane;
1132 wm_state->wm[level].sprite[sprite] = wm;
1137 wm_state->num_levels = level;
1139 if (!wm_state->cxsr)
1142 /* maxfifo watermarks */
1143 switch (plane->base.type) {
1145 case DRM_PLANE_TYPE_CURSOR:
1146 for (level = 0; level < wm_state->num_levels; level++)
1147 wm_state->sr[level].cursor =
1148 wm_state->wm[level].cursor;
1150 case DRM_PLANE_TYPE_PRIMARY:
1151 for (level = 0; level < wm_state->num_levels; level++)
1152 wm_state->sr[level].plane =
1153 min(wm_state->sr[level].plane,
1154 wm_state->wm[level].primary);
1156 case DRM_PLANE_TYPE_OVERLAY:
1157 sprite = plane->plane;
1158 for (level = 0; level < wm_state->num_levels; level++)
1159 wm_state->sr[level].plane =
1160 min(wm_state->sr[level].plane,
1161 wm_state->wm[level].sprite[sprite]);
1166 /* clear any (partially) filled invalid levels */
1167 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1168 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1169 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1172 vlv_invert_wms(crtc);
1175 #define VLV_FIFO(plane, value) \
1176 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1178 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1180 struct drm_device *dev = crtc->base.dev;
1181 struct drm_i915_private *dev_priv = to_i915(dev);
1182 struct intel_plane *plane;
1183 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1185 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1186 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1187 WARN_ON(plane->wm.fifo_size != 63);
1191 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1192 sprite0_start = plane->wm.fifo_size;
1193 else if (plane->plane == 0)
1194 sprite1_start = sprite0_start + plane->wm.fifo_size;
1196 fifo_size = sprite1_start + plane->wm.fifo_size;
1199 WARN_ON(fifo_size != 512 - 1);
1201 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1202 pipe_name(crtc->pipe), sprite0_start,
1203 sprite1_start, fifo_size);
1205 switch (crtc->pipe) {
1206 uint32_t dsparb, dsparb2, dsparb3;
1208 dsparb = I915_READ(DSPARB);
1209 dsparb2 = I915_READ(DSPARB2);
1211 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1212 VLV_FIFO(SPRITEB, 0xff));
1213 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1214 VLV_FIFO(SPRITEB, sprite1_start));
1216 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1217 VLV_FIFO(SPRITEB_HI, 0x1));
1218 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1219 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1221 I915_WRITE(DSPARB, dsparb);
1222 I915_WRITE(DSPARB2, dsparb2);
1225 dsparb = I915_READ(DSPARB);
1226 dsparb2 = I915_READ(DSPARB2);
1228 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1229 VLV_FIFO(SPRITED, 0xff));
1230 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1231 VLV_FIFO(SPRITED, sprite1_start));
1233 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1234 VLV_FIFO(SPRITED_HI, 0xff));
1235 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1236 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1238 I915_WRITE(DSPARB, dsparb);
1239 I915_WRITE(DSPARB2, dsparb2);
1242 dsparb3 = I915_READ(DSPARB3);
1243 dsparb2 = I915_READ(DSPARB2);
1245 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1246 VLV_FIFO(SPRITEF, 0xff));
1247 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1248 VLV_FIFO(SPRITEF, sprite1_start));
1250 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1251 VLV_FIFO(SPRITEF_HI, 0xff));
1252 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1253 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1255 I915_WRITE(DSPARB3, dsparb3);
1256 I915_WRITE(DSPARB2, dsparb2);
1265 static void vlv_merge_wm(struct drm_device *dev,
1266 struct vlv_wm_values *wm)
1268 struct intel_crtc *crtc;
1269 int num_active_crtcs = 0;
1271 wm->level = to_i915(dev)->wm.max_level;
1274 for_each_intel_crtc(dev, crtc) {
1275 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1280 if (!wm_state->cxsr)
1284 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1287 if (num_active_crtcs != 1)
1290 if (num_active_crtcs > 1)
1291 wm->level = VLV_WM_LEVEL_PM2;
1293 for_each_intel_crtc(dev, crtc) {
1294 struct vlv_wm_state *wm_state = &crtc->wm_state;
1295 enum pipe pipe = crtc->pipe;
1300 wm->pipe[pipe] = wm_state->wm[wm->level];
1302 wm->sr = wm_state->sr[wm->level];
1304 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1305 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1306 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1307 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1311 static void vlv_update_wm(struct drm_crtc *crtc)
1313 struct drm_device *dev = crtc->dev;
1314 struct drm_i915_private *dev_priv = dev->dev_private;
1315 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1316 enum pipe pipe = intel_crtc->pipe;
1317 struct vlv_wm_values wm = {};
1319 vlv_compute_wm(intel_crtc);
1320 vlv_merge_wm(dev, &wm);
1322 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1323 /* FIXME should be part of crtc atomic commit */
1324 vlv_pipe_set_fifo_size(intel_crtc);
1328 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1329 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1330 chv_set_memory_dvfs(dev_priv, false);
1332 if (wm.level < VLV_WM_LEVEL_PM5 &&
1333 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1334 chv_set_memory_pm5(dev_priv, false);
1336 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1337 intel_set_memory_cxsr(dev_priv, false);
1339 /* FIXME should be part of crtc atomic commit */
1340 vlv_pipe_set_fifo_size(intel_crtc);
1342 vlv_write_wm_values(intel_crtc, &wm);
1344 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1345 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1346 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1347 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1348 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1350 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1351 intel_set_memory_cxsr(dev_priv, true);
1353 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1354 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1355 chv_set_memory_pm5(dev_priv, true);
1357 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1358 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1359 chv_set_memory_dvfs(dev_priv, true);
1361 dev_priv->wm.vlv = wm;
1364 #define single_plane_enabled(mask) is_power_of_2(mask)
1366 static void g4x_update_wm(struct drm_crtc *crtc)
1368 struct drm_device *dev = crtc->dev;
1369 static const int sr_latency_ns = 12000;
1370 struct drm_i915_private *dev_priv = dev->dev_private;
1371 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1372 int plane_sr, cursor_sr;
1373 unsigned int enabled = 0;
1376 if (g4x_compute_wm0(dev, PIPE_A,
1377 &g4x_wm_info, pessimal_latency_ns,
1378 &g4x_cursor_wm_info, pessimal_latency_ns,
1379 &planea_wm, &cursora_wm))
1380 enabled |= 1 << PIPE_A;
1382 if (g4x_compute_wm0(dev, PIPE_B,
1383 &g4x_wm_info, pessimal_latency_ns,
1384 &g4x_cursor_wm_info, pessimal_latency_ns,
1385 &planeb_wm, &cursorb_wm))
1386 enabled |= 1 << PIPE_B;
1388 if (single_plane_enabled(enabled) &&
1389 g4x_compute_srwm(dev, ffs(enabled) - 1,
1392 &g4x_cursor_wm_info,
1393 &plane_sr, &cursor_sr)) {
1394 cxsr_enabled = true;
1396 cxsr_enabled = false;
1397 intel_set_memory_cxsr(dev_priv, false);
1398 plane_sr = cursor_sr = 0;
1401 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1402 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1403 planea_wm, cursora_wm,
1404 planeb_wm, cursorb_wm,
1405 plane_sr, cursor_sr);
1408 FW_WM(plane_sr, SR) |
1409 FW_WM(cursorb_wm, CURSORB) |
1410 FW_WM(planeb_wm, PLANEB) |
1411 FW_WM(planea_wm, PLANEA));
1413 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1414 FW_WM(cursora_wm, CURSORA));
1415 /* HPLL off in SR has some issues on G4x... disable it */
1417 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1418 FW_WM(cursor_sr, CURSOR_SR));
1421 intel_set_memory_cxsr(dev_priv, true);
1424 static void i965_update_wm(struct drm_crtc *unused_crtc)
1426 struct drm_device *dev = unused_crtc->dev;
1427 struct drm_i915_private *dev_priv = dev->dev_private;
1428 struct drm_crtc *crtc;
1433 /* Calc sr entries for one plane configs */
1434 crtc = single_enabled_crtc(dev);
1436 /* self-refresh has much higher latency */
1437 static const int sr_latency_ns = 12000;
1438 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1439 int clock = adjusted_mode->crtc_clock;
1440 int htotal = adjusted_mode->crtc_htotal;
1441 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1442 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
1443 unsigned long line_time_us;
1446 line_time_us = max(htotal * 1000 / clock, 1);
1448 /* Use ns/us then divide to preserve precision */
1449 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1450 pixel_size * hdisplay;
1451 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1452 srwm = I965_FIFO_SIZE - entries;
1456 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1460 pixel_size * crtc->cursor->state->crtc_w;
1461 entries = DIV_ROUND_UP(entries,
1462 i965_cursor_wm_info.cacheline_size);
1463 cursor_sr = i965_cursor_wm_info.fifo_size -
1464 (entries + i965_cursor_wm_info.guard_size);
1466 if (cursor_sr > i965_cursor_wm_info.max_wm)
1467 cursor_sr = i965_cursor_wm_info.max_wm;
1469 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1470 "cursor %d\n", srwm, cursor_sr);
1472 cxsr_enabled = true;
1474 cxsr_enabled = false;
1475 /* Turn off self refresh if both pipes are enabled */
1476 intel_set_memory_cxsr(dev_priv, false);
1479 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1482 /* 965 has limitations... */
1483 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1487 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1488 FW_WM(8, PLANEC_OLD));
1489 /* update cursor SR watermark */
1490 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1493 intel_set_memory_cxsr(dev_priv, true);
1498 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1500 struct drm_device *dev = unused_crtc->dev;
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1502 const struct intel_watermark_params *wm_info;
1507 int planea_wm, planeb_wm;
1508 struct drm_crtc *crtc, *enabled = NULL;
1511 wm_info = &i945_wm_info;
1512 else if (!IS_GEN2(dev))
1513 wm_info = &i915_wm_info;
1515 wm_info = &i830_a_wm_info;
1517 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1518 crtc = intel_get_crtc_for_plane(dev, 0);
1519 if (intel_crtc_active(crtc)) {
1520 const struct drm_display_mode *adjusted_mode;
1521 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1525 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1526 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1527 wm_info, fifo_size, cpp,
1528 pessimal_latency_ns);
1531 planea_wm = fifo_size - wm_info->guard_size;
1532 if (planea_wm > (long)wm_info->max_wm)
1533 planea_wm = wm_info->max_wm;
1537 wm_info = &i830_bc_wm_info;
1539 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1540 crtc = intel_get_crtc_for_plane(dev, 1);
1541 if (intel_crtc_active(crtc)) {
1542 const struct drm_display_mode *adjusted_mode;
1543 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1547 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1548 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1549 wm_info, fifo_size, cpp,
1550 pessimal_latency_ns);
1551 if (enabled == NULL)
1556 planeb_wm = fifo_size - wm_info->guard_size;
1557 if (planeb_wm > (long)wm_info->max_wm)
1558 planeb_wm = wm_info->max_wm;
1561 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1563 if (IS_I915GM(dev) && enabled) {
1564 struct drm_i915_gem_object *obj;
1566 obj = intel_fb_obj(enabled->primary->state->fb);
1568 /* self-refresh seems busted with untiled */
1569 if (obj->tiling_mode == I915_TILING_NONE)
1574 * Overlay gets an aggressive default since video jitter is bad.
1578 /* Play safe and disable self-refresh before adjusting watermarks. */
1579 intel_set_memory_cxsr(dev_priv, false);
1581 /* Calc sr entries for one plane configs */
1582 if (HAS_FW_BLC(dev) && enabled) {
1583 /* self-refresh has much higher latency */
1584 static const int sr_latency_ns = 6000;
1585 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1586 int clock = adjusted_mode->crtc_clock;
1587 int htotal = adjusted_mode->crtc_htotal;
1588 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1589 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
1590 unsigned long line_time_us;
1593 line_time_us = max(htotal * 1000 / clock, 1);
1595 /* Use ns/us then divide to preserve precision */
1596 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1597 pixel_size * hdisplay;
1598 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1599 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1600 srwm = wm_info->fifo_size - entries;
1604 if (IS_I945G(dev) || IS_I945GM(dev))
1605 I915_WRITE(FW_BLC_SELF,
1606 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1607 else if (IS_I915GM(dev))
1608 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1611 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1612 planea_wm, planeb_wm, cwm, srwm);
1614 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1615 fwater_hi = (cwm & 0x1f);
1617 /* Set request length to 8 cachelines per fetch */
1618 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1619 fwater_hi = fwater_hi | (1 << 8);
1621 I915_WRITE(FW_BLC, fwater_lo);
1622 I915_WRITE(FW_BLC2, fwater_hi);
1625 intel_set_memory_cxsr(dev_priv, true);
1628 static void i845_update_wm(struct drm_crtc *unused_crtc)
1630 struct drm_device *dev = unused_crtc->dev;
1631 struct drm_i915_private *dev_priv = dev->dev_private;
1632 struct drm_crtc *crtc;
1633 const struct drm_display_mode *adjusted_mode;
1637 crtc = single_enabled_crtc(dev);
1641 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1642 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1644 dev_priv->display.get_fifo_size(dev, 0),
1645 4, pessimal_latency_ns);
1646 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1647 fwater_lo |= (3<<8) | planea_wm;
1649 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1651 I915_WRITE(FW_BLC, fwater_lo);
1654 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1656 uint32_t pixel_rate;
1658 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1660 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1661 * adjust the pixel_rate here. */
1663 if (pipe_config->pch_pfit.enabled) {
1664 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1665 uint32_t pfit_size = pipe_config->pch_pfit.size;
1667 pipe_w = pipe_config->pipe_src_w;
1668 pipe_h = pipe_config->pipe_src_h;
1670 pfit_w = (pfit_size >> 16) & 0xFFFF;
1671 pfit_h = pfit_size & 0xFFFF;
1672 if (pipe_w < pfit_w)
1674 if (pipe_h < pfit_h)
1677 if (WARN_ON(!pfit_w || !pfit_h))
1680 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1687 /* latency must be in 0.1us units. */
1688 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1693 if (WARN(latency == 0, "Latency value missing\n"))
1696 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1697 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1702 /* latency must be in 0.1us units. */
1703 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1704 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1709 if (WARN(latency == 0, "Latency value missing\n"))
1711 if (WARN_ON(!pipe_htotal))
1714 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1715 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1716 ret = DIV_ROUND_UP(ret, 64) + 2;
1720 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1721 uint8_t bytes_per_pixel)
1724 * Neither of these should be possible since this function shouldn't be
1725 * called if the CRTC is off or the plane is invisible. But let's be
1726 * extra paranoid to avoid a potential divide-by-zero if we screw up
1727 * elsewhere in the driver.
1729 if (WARN_ON(!bytes_per_pixel))
1731 if (WARN_ON(!horiz_pixels))
1734 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1737 struct ilk_wm_maximums {
1745 * For both WM_PIPE and WM_LP.
1746 * mem_value must be in 0.1us units.
1748 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1749 const struct intel_plane_state *pstate,
1753 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1754 uint32_t method1, method2;
1756 if (!cstate->base.active || !pstate->visible)
1759 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1764 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1765 cstate->base.adjusted_mode.crtc_htotal,
1766 drm_rect_width(&pstate->dst),
1770 return min(method1, method2);
1774 * For both WM_PIPE and WM_LP.
1775 * mem_value must be in 0.1us units.
1777 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1778 const struct intel_plane_state *pstate,
1781 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1782 uint32_t method1, method2;
1784 if (!cstate->base.active || !pstate->visible)
1787 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1788 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1789 cstate->base.adjusted_mode.crtc_htotal,
1790 drm_rect_width(&pstate->dst),
1793 return min(method1, method2);
1797 * For both WM_PIPE and WM_LP.
1798 * mem_value must be in 0.1us units.
1800 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1801 const struct intel_plane_state *pstate,
1804 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1806 if (!cstate->base.active || !pstate->visible)
1809 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1810 cstate->base.adjusted_mode.crtc_htotal,
1811 drm_rect_width(&pstate->dst),
1816 /* Only for WM_LP. */
1817 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1818 const struct intel_plane_state *pstate,
1821 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1823 if (!cstate->base.active || !pstate->visible)
1826 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
1829 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1831 if (INTEL_INFO(dev)->gen >= 8)
1833 else if (INTEL_INFO(dev)->gen >= 7)
1839 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1840 int level, bool is_sprite)
1842 if (INTEL_INFO(dev)->gen >= 8)
1843 /* BDW primary/sprite plane watermarks */
1844 return level == 0 ? 255 : 2047;
1845 else if (INTEL_INFO(dev)->gen >= 7)
1846 /* IVB/HSW primary/sprite plane watermarks */
1847 return level == 0 ? 127 : 1023;
1848 else if (!is_sprite)
1849 /* ILK/SNB primary plane watermarks */
1850 return level == 0 ? 127 : 511;
1852 /* ILK/SNB sprite plane watermarks */
1853 return level == 0 ? 63 : 255;
1856 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1859 if (INTEL_INFO(dev)->gen >= 7)
1860 return level == 0 ? 63 : 255;
1862 return level == 0 ? 31 : 63;
1865 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1867 if (INTEL_INFO(dev)->gen >= 8)
1873 /* Calculate the maximum primary/sprite plane watermark */
1874 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1876 const struct intel_wm_config *config,
1877 enum intel_ddb_partitioning ddb_partitioning,
1880 unsigned int fifo_size = ilk_display_fifo_size(dev);
1882 /* if sprites aren't enabled, sprites get nothing */
1883 if (is_sprite && !config->sprites_enabled)
1886 /* HSW allows LP1+ watermarks even with multiple pipes */
1887 if (level == 0 || config->num_pipes_active > 1) {
1888 fifo_size /= INTEL_INFO(dev)->num_pipes;
1891 * For some reason the non self refresh
1892 * FIFO size is only half of the self
1893 * refresh FIFO size on ILK/SNB.
1895 if (INTEL_INFO(dev)->gen <= 6)
1899 if (config->sprites_enabled) {
1900 /* level 0 is always calculated with 1:1 split */
1901 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1910 /* clamp to max that the registers can hold */
1911 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1914 /* Calculate the maximum cursor plane watermark */
1915 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1917 const struct intel_wm_config *config)
1919 /* HSW LP1+ watermarks w/ multiple pipes */
1920 if (level > 0 && config->num_pipes_active > 1)
1923 /* otherwise just report max that registers can hold */
1924 return ilk_cursor_wm_reg_max(dev, level);
1927 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1929 const struct intel_wm_config *config,
1930 enum intel_ddb_partitioning ddb_partitioning,
1931 struct ilk_wm_maximums *max)
1933 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1934 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1935 max->cur = ilk_cursor_wm_max(dev, level, config);
1936 max->fbc = ilk_fbc_wm_reg_max(dev);
1939 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1941 struct ilk_wm_maximums *max)
1943 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1944 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1945 max->cur = ilk_cursor_wm_reg_max(dev, level);
1946 max->fbc = ilk_fbc_wm_reg_max(dev);
1949 static bool ilk_validate_wm_level(int level,
1950 const struct ilk_wm_maximums *max,
1951 struct intel_wm_level *result)
1955 /* already determined to be invalid? */
1956 if (!result->enable)
1959 result->enable = result->pri_val <= max->pri &&
1960 result->spr_val <= max->spr &&
1961 result->cur_val <= max->cur;
1963 ret = result->enable;
1966 * HACK until we can pre-compute everything,
1967 * and thus fail gracefully if LP0 watermarks
1970 if (level == 0 && !result->enable) {
1971 if (result->pri_val > max->pri)
1972 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1973 level, result->pri_val, max->pri);
1974 if (result->spr_val > max->spr)
1975 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1976 level, result->spr_val, max->spr);
1977 if (result->cur_val > max->cur)
1978 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1979 level, result->cur_val, max->cur);
1981 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1982 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1983 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1984 result->enable = true;
1990 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1991 const struct intel_crtc *intel_crtc,
1993 struct intel_crtc_state *cstate,
1994 struct intel_plane_state *pristate,
1995 struct intel_plane_state *sprstate,
1996 struct intel_plane_state *curstate,
1997 struct intel_wm_level *result)
1999 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2000 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2001 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2003 /* WM1+ latency values stored in 0.5us units */
2010 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2011 pri_latency, level);
2012 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2013 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2014 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2015 result->enable = true;
2019 hsw_compute_linetime_wm(struct drm_device *dev,
2020 struct intel_crtc_state *cstate)
2022 struct drm_i915_private *dev_priv = dev->dev_private;
2023 const struct drm_display_mode *adjusted_mode =
2024 &cstate->base.adjusted_mode;
2025 u32 linetime, ips_linetime;
2027 if (!cstate->base.active)
2029 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2031 if (WARN_ON(dev_priv->cdclk_freq == 0))
2034 /* The WM are computed with base on how long it takes to fill a single
2035 * row at the given clock rate, multiplied by 8.
2037 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2038 adjusted_mode->crtc_clock);
2039 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2040 dev_priv->cdclk_freq);
2042 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2043 PIPE_WM_LINETIME_TIME(linetime);
2046 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2048 struct drm_i915_private *dev_priv = dev->dev_private;
2053 int level, max_level = ilk_wm_max_level(dev);
2055 /* read the first set of memory latencies[0:3] */
2056 val = 0; /* data0 to be programmed to 0 for first set */
2057 mutex_lock(&dev_priv->rps.hw_lock);
2058 ret = sandybridge_pcode_read(dev_priv,
2059 GEN9_PCODE_READ_MEM_LATENCY,
2061 mutex_unlock(&dev_priv->rps.hw_lock);
2064 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2068 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2069 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2070 GEN9_MEM_LATENCY_LEVEL_MASK;
2071 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2072 GEN9_MEM_LATENCY_LEVEL_MASK;
2073 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2074 GEN9_MEM_LATENCY_LEVEL_MASK;
2076 /* read the second set of memory latencies[4:7] */
2077 val = 1; /* data0 to be programmed to 1 for second set */
2078 mutex_lock(&dev_priv->rps.hw_lock);
2079 ret = sandybridge_pcode_read(dev_priv,
2080 GEN9_PCODE_READ_MEM_LATENCY,
2082 mutex_unlock(&dev_priv->rps.hw_lock);
2084 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2088 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2089 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2090 GEN9_MEM_LATENCY_LEVEL_MASK;
2091 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2092 GEN9_MEM_LATENCY_LEVEL_MASK;
2093 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2094 GEN9_MEM_LATENCY_LEVEL_MASK;
2097 * WaWmMemoryReadLatency:skl
2099 * punit doesn't take into account the read latency so we need
2100 * to add 2us to the various latency levels we retrieve from
2102 * - W0 is a bit special in that it's the only level that
2103 * can't be disabled if we want to have display working, so
2104 * we always add 2us there.
2105 * - For levels >=1, punit returns 0us latency when they are
2106 * disabled, so we respect that and don't add 2us then
2108 * Additionally, if a level n (n > 1) has a 0us latency, all
2109 * levels m (m >= n) need to be disabled. We make sure to
2110 * sanitize the values out of the punit to satisfy this
2114 for (level = 1; level <= max_level; level++)
2118 for (i = level + 1; i <= max_level; i++)
2123 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2124 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2126 wm[0] = (sskpd >> 56) & 0xFF;
2128 wm[0] = sskpd & 0xF;
2129 wm[1] = (sskpd >> 4) & 0xFF;
2130 wm[2] = (sskpd >> 12) & 0xFF;
2131 wm[3] = (sskpd >> 20) & 0x1FF;
2132 wm[4] = (sskpd >> 32) & 0x1FF;
2133 } else if (INTEL_INFO(dev)->gen >= 6) {
2134 uint32_t sskpd = I915_READ(MCH_SSKPD);
2136 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2137 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2138 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2139 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2140 } else if (INTEL_INFO(dev)->gen >= 5) {
2141 uint32_t mltr = I915_READ(MLTR_ILK);
2143 /* ILK primary LP0 latency is 700 ns */
2145 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2146 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2150 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2152 /* ILK sprite LP0 latency is 1300 ns */
2153 if (INTEL_INFO(dev)->gen == 5)
2157 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2159 /* ILK cursor LP0 latency is 1300 ns */
2160 if (INTEL_INFO(dev)->gen == 5)
2163 /* WaDoubleCursorLP3Latency:ivb */
2164 if (IS_IVYBRIDGE(dev))
2168 int ilk_wm_max_level(const struct drm_device *dev)
2170 /* how many WM levels are we expecting */
2171 if (INTEL_INFO(dev)->gen >= 9)
2173 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2175 else if (INTEL_INFO(dev)->gen >= 6)
2181 static void intel_print_wm_latency(struct drm_device *dev,
2183 const uint16_t wm[8])
2185 int level, max_level = ilk_wm_max_level(dev);
2187 for (level = 0; level <= max_level; level++) {
2188 unsigned int latency = wm[level];
2191 DRM_ERROR("%s WM%d latency not provided\n",
2197 * - latencies are in us on gen9.
2198 * - before then, WM1+ latency values are in 0.5us units
2205 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2206 name, level, wm[level],
2207 latency / 10, latency % 10);
2211 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2212 uint16_t wm[5], uint16_t min)
2214 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2219 wm[0] = max(wm[0], min);
2220 for (level = 1; level <= max_level; level++)
2221 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2226 static void snb_wm_latency_quirk(struct drm_device *dev)
2228 struct drm_i915_private *dev_priv = dev->dev_private;
2232 * The BIOS provided WM memory latency values are often
2233 * inadequate for high resolution displays. Adjust them.
2235 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2236 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2237 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2242 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2243 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2244 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2245 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2248 static void ilk_setup_wm_latency(struct drm_device *dev)
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2252 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2254 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2255 sizeof(dev_priv->wm.pri_latency));
2256 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2257 sizeof(dev_priv->wm.pri_latency));
2259 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2260 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2262 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2263 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2264 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2267 snb_wm_latency_quirk(dev);
2270 static void skl_setup_wm_latency(struct drm_device *dev)
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2274 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2275 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2278 /* Compute new watermarks for the pipe */
2279 static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2280 struct drm_atomic_state *state)
2282 struct intel_pipe_wm *pipe_wm;
2283 struct drm_device *dev = intel_crtc->base.dev;
2284 const struct drm_i915_private *dev_priv = dev->dev_private;
2285 struct intel_crtc_state *cstate = NULL;
2286 struct intel_plane *intel_plane;
2287 struct drm_plane_state *ps;
2288 struct intel_plane_state *pristate = NULL;
2289 struct intel_plane_state *sprstate = NULL;
2290 struct intel_plane_state *curstate = NULL;
2291 int level, max_level = ilk_wm_max_level(dev);
2292 /* LP0 watermark maximums depend on this pipe alone */
2293 struct intel_wm_config config = {
2294 .num_pipes_active = 1,
2296 struct ilk_wm_maximums max;
2298 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
2300 return PTR_ERR(cstate);
2302 pipe_wm = &cstate->wm.optimal.ilk;
2303 memset(pipe_wm, 0, sizeof(*pipe_wm));
2305 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2306 ps = drm_atomic_get_plane_state(state,
2307 &intel_plane->base);
2311 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2312 pristate = to_intel_plane_state(ps);
2313 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2314 sprstate = to_intel_plane_state(ps);
2315 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2316 curstate = to_intel_plane_state(ps);
2319 config.sprites_enabled = sprstate->visible;
2320 config.sprites_scaled = sprstate->visible &&
2321 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2322 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2324 pipe_wm->pipe_enabled = cstate->base.active;
2325 pipe_wm->sprites_enabled = config.sprites_enabled;
2326 pipe_wm->sprites_scaled = config.sprites_scaled;
2328 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2329 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
2332 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2333 if (config.sprites_scaled)
2336 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2337 pristate, sprstate, curstate, &pipe_wm->wm[0]);
2339 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2340 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
2342 /* LP0 watermarks always use 1/2 DDB partitioning */
2343 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2345 /* At least LP0 must be valid */
2346 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2349 ilk_compute_wm_reg_maximums(dev, 1, &max);
2351 for (level = 1; level <= max_level; level++) {
2352 struct intel_wm_level wm = {};
2354 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2355 pristate, sprstate, curstate, &wm);
2358 * Disable any watermark level that exceeds the
2359 * register maximums since such watermarks are
2362 if (!ilk_validate_wm_level(level, &max, &wm))
2365 pipe_wm->wm[level] = wm;
2372 * Merge the watermarks from all active pipes for a specific level.
2374 static void ilk_merge_wm_level(struct drm_device *dev,
2376 struct intel_wm_level *ret_wm)
2378 const struct intel_crtc *intel_crtc;
2380 ret_wm->enable = true;
2382 for_each_intel_crtc(dev, intel_crtc) {
2383 const struct intel_crtc_state *cstate =
2384 to_intel_crtc_state(intel_crtc->base.state);
2385 const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
2386 const struct intel_wm_level *wm = &active->wm[level];
2388 if (!active->pipe_enabled)
2392 * The watermark values may have been used in the past,
2393 * so we must maintain them in the registers for some
2394 * time even if the level is now disabled.
2397 ret_wm->enable = false;
2399 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2400 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2401 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2402 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2407 * Merge all low power watermarks for all active pipes.
2409 static void ilk_wm_merge(struct drm_device *dev,
2410 const struct intel_wm_config *config,
2411 const struct ilk_wm_maximums *max,
2412 struct intel_pipe_wm *merged)
2414 struct drm_i915_private *dev_priv = dev->dev_private;
2415 int level, max_level = ilk_wm_max_level(dev);
2416 int last_enabled_level = max_level;
2418 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2419 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2420 config->num_pipes_active > 1)
2423 /* ILK: FBC WM must be disabled always */
2424 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2426 /* merge each WM1+ level */
2427 for (level = 1; level <= max_level; level++) {
2428 struct intel_wm_level *wm = &merged->wm[level];
2430 ilk_merge_wm_level(dev, level, wm);
2432 if (level > last_enabled_level)
2434 else if (!ilk_validate_wm_level(level, max, wm))
2435 /* make sure all following levels get disabled */
2436 last_enabled_level = level - 1;
2439 * The spec says it is preferred to disable
2440 * FBC WMs instead of disabling a WM level.
2442 if (wm->fbc_val > max->fbc) {
2444 merged->fbc_wm_enabled = false;
2449 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2451 * FIXME this is racy. FBC might get enabled later.
2452 * What we should check here is whether FBC can be
2453 * enabled sometime later.
2455 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2456 intel_fbc_is_active(dev_priv)) {
2457 for (level = 2; level <= max_level; level++) {
2458 struct intel_wm_level *wm = &merged->wm[level];
2465 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2467 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2468 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2471 /* The value we need to program into the WM_LPx latency field */
2472 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2476 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2479 return dev_priv->wm.pri_latency[level];
2482 static void ilk_compute_wm_results(struct drm_device *dev,
2483 const struct intel_pipe_wm *merged,
2484 enum intel_ddb_partitioning partitioning,
2485 struct ilk_wm_values *results)
2487 struct intel_crtc *intel_crtc;
2490 results->enable_fbc_wm = merged->fbc_wm_enabled;
2491 results->partitioning = partitioning;
2493 /* LP1+ register values */
2494 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2495 const struct intel_wm_level *r;
2497 level = ilk_wm_lp_to_level(wm_lp, merged);
2499 r = &merged->wm[level];
2502 * Maintain the watermark values even if the level is
2503 * disabled. Doing otherwise could cause underruns.
2505 results->wm_lp[wm_lp - 1] =
2506 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2507 (r->pri_val << WM1_LP_SR_SHIFT) |
2511 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2513 if (INTEL_INFO(dev)->gen >= 8)
2514 results->wm_lp[wm_lp - 1] |=
2515 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2517 results->wm_lp[wm_lp - 1] |=
2518 r->fbc_val << WM1_LP_FBC_SHIFT;
2521 * Always set WM1S_LP_EN when spr_val != 0, even if the
2522 * level is disabled. Doing otherwise could cause underruns.
2524 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2525 WARN_ON(wm_lp != 1);
2526 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2528 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2531 /* LP0 register values */
2532 for_each_intel_crtc(dev, intel_crtc) {
2533 const struct intel_crtc_state *cstate =
2534 to_intel_crtc_state(intel_crtc->base.state);
2535 enum pipe pipe = intel_crtc->pipe;
2536 const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
2538 if (WARN_ON(!r->enable))
2541 results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
2543 results->wm_pipe[pipe] =
2544 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2545 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2550 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2551 * case both are at the same level. Prefer r1 in case they're the same. */
2552 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2553 struct intel_pipe_wm *r1,
2554 struct intel_pipe_wm *r2)
2556 int level, max_level = ilk_wm_max_level(dev);
2557 int level1 = 0, level2 = 0;
2559 for (level = 1; level <= max_level; level++) {
2560 if (r1->wm[level].enable)
2562 if (r2->wm[level].enable)
2566 if (level1 == level2) {
2567 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2571 } else if (level1 > level2) {
2578 /* dirty bits used to track which watermarks need changes */
2579 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2580 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2581 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2582 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2583 #define WM_DIRTY_FBC (1 << 24)
2584 #define WM_DIRTY_DDB (1 << 25)
2586 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2587 const struct ilk_wm_values *old,
2588 const struct ilk_wm_values *new)
2590 unsigned int dirty = 0;
2594 for_each_pipe(dev_priv, pipe) {
2595 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2596 dirty |= WM_DIRTY_LINETIME(pipe);
2597 /* Must disable LP1+ watermarks too */
2598 dirty |= WM_DIRTY_LP_ALL;
2601 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2602 dirty |= WM_DIRTY_PIPE(pipe);
2603 /* Must disable LP1+ watermarks too */
2604 dirty |= WM_DIRTY_LP_ALL;
2608 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2609 dirty |= WM_DIRTY_FBC;
2610 /* Must disable LP1+ watermarks too */
2611 dirty |= WM_DIRTY_LP_ALL;
2614 if (old->partitioning != new->partitioning) {
2615 dirty |= WM_DIRTY_DDB;
2616 /* Must disable LP1+ watermarks too */
2617 dirty |= WM_DIRTY_LP_ALL;
2620 /* LP1+ watermarks already deemed dirty, no need to continue */
2621 if (dirty & WM_DIRTY_LP_ALL)
2624 /* Find the lowest numbered LP1+ watermark in need of an update... */
2625 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2626 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2627 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2631 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2632 for (; wm_lp <= 3; wm_lp++)
2633 dirty |= WM_DIRTY_LP(wm_lp);
2638 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2641 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2642 bool changed = false;
2644 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2645 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2646 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2649 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2650 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2651 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2654 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2655 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2656 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2661 * Don't touch WM1S_LP_EN here.
2662 * Doing so could cause underruns.
2669 * The spec says we shouldn't write when we don't need, because every write
2670 * causes WMs to be re-evaluated, expending some power.
2672 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2673 struct ilk_wm_values *results)
2675 struct drm_device *dev = dev_priv->dev;
2676 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2680 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2684 _ilk_disable_lp_wm(dev_priv, dirty);
2686 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2687 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2688 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2689 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2690 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2691 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2693 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2694 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2695 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2696 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2697 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2698 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2700 if (dirty & WM_DIRTY_DDB) {
2701 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2702 val = I915_READ(WM_MISC);
2703 if (results->partitioning == INTEL_DDB_PART_1_2)
2704 val &= ~WM_MISC_DATA_PARTITION_5_6;
2706 val |= WM_MISC_DATA_PARTITION_5_6;
2707 I915_WRITE(WM_MISC, val);
2709 val = I915_READ(DISP_ARB_CTL2);
2710 if (results->partitioning == INTEL_DDB_PART_1_2)
2711 val &= ~DISP_DATA_PARTITION_5_6;
2713 val |= DISP_DATA_PARTITION_5_6;
2714 I915_WRITE(DISP_ARB_CTL2, val);
2718 if (dirty & WM_DIRTY_FBC) {
2719 val = I915_READ(DISP_ARB_CTL);
2720 if (results->enable_fbc_wm)
2721 val &= ~DISP_FBC_WM_DIS;
2723 val |= DISP_FBC_WM_DIS;
2724 I915_WRITE(DISP_ARB_CTL, val);
2727 if (dirty & WM_DIRTY_LP(1) &&
2728 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2729 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2731 if (INTEL_INFO(dev)->gen >= 7) {
2732 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2733 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2734 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2735 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2738 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2739 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2740 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2741 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2742 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2743 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2745 dev_priv->wm.hw = *results;
2748 static bool ilk_disable_lp_wm(struct drm_device *dev)
2750 struct drm_i915_private *dev_priv = dev->dev_private;
2752 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2756 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2757 * different active planes.
2760 #define SKL_DDB_SIZE 896 /* in blocks */
2761 #define BXT_DDB_SIZE 512
2764 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2765 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2766 * other universal planes are in indices 1..n. Note that this may leave unused
2767 * indices between the top "sprite" plane and the cursor.
2770 skl_wm_plane_id(const struct intel_plane *plane)
2772 switch (plane->base.type) {
2773 case DRM_PLANE_TYPE_PRIMARY:
2775 case DRM_PLANE_TYPE_CURSOR:
2776 return PLANE_CURSOR;
2777 case DRM_PLANE_TYPE_OVERLAY:
2778 return plane->plane + 1;
2780 MISSING_CASE(plane->base.type);
2781 return plane->plane;
2786 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2787 const struct intel_crtc_state *cstate,
2788 const struct intel_wm_config *config,
2789 struct skl_ddb_entry *alloc /* out */)
2791 struct drm_crtc *for_crtc = cstate->base.crtc;
2792 struct drm_crtc *crtc;
2793 unsigned int pipe_size, ddb_size;
2794 int nth_active_pipe;
2796 if (!cstate->base.active) {
2802 if (IS_BROXTON(dev))
2803 ddb_size = BXT_DDB_SIZE;
2805 ddb_size = SKL_DDB_SIZE;
2807 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2809 nth_active_pipe = 0;
2810 for_each_crtc(dev, crtc) {
2811 if (!to_intel_crtc(crtc)->active)
2814 if (crtc == for_crtc)
2820 pipe_size = ddb_size / config->num_pipes_active;
2821 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2822 alloc->end = alloc->start + pipe_size;
2825 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2827 if (config->num_pipes_active == 1)
2833 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2835 entry->start = reg & 0x3ff;
2836 entry->end = (reg >> 16) & 0x3ff;
2841 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2842 struct skl_ddb_allocation *ddb /* out */)
2848 memset(ddb, 0, sizeof(*ddb));
2850 for_each_pipe(dev_priv, pipe) {
2851 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
2854 for_each_plane(dev_priv, pipe, plane) {
2855 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2856 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2860 val = I915_READ(CUR_BUF_CFG(pipe));
2861 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2867 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2868 const struct drm_plane_state *pstate,
2871 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2872 struct drm_framebuffer *fb = pstate->fb;
2874 /* for planar format */
2875 if (fb->pixel_format == DRM_FORMAT_NV12) {
2876 if (y) /* y-plane data rate */
2877 return intel_crtc->config->pipe_src_w *
2878 intel_crtc->config->pipe_src_h *
2879 drm_format_plane_cpp(fb->pixel_format, 0);
2880 else /* uv-plane data rate */
2881 return (intel_crtc->config->pipe_src_w/2) *
2882 (intel_crtc->config->pipe_src_h/2) *
2883 drm_format_plane_cpp(fb->pixel_format, 1);
2886 /* for packed formats */
2887 return intel_crtc->config->pipe_src_w *
2888 intel_crtc->config->pipe_src_h *
2889 drm_format_plane_cpp(fb->pixel_format, 0);
2893 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2894 * a 8192x4096@32bpp framebuffer:
2895 * 3 * 4096 * 8192 * 4 < 2^32
2898 skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
2900 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2901 struct drm_device *dev = intel_crtc->base.dev;
2902 const struct intel_plane *intel_plane;
2903 unsigned int total_data_rate = 0;
2905 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2906 const struct drm_plane_state *pstate = intel_plane->base.state;
2908 if (pstate->fb == NULL)
2911 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2915 total_data_rate += skl_plane_relative_data_rate(cstate,
2919 if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
2921 total_data_rate += skl_plane_relative_data_rate(cstate,
2926 return total_data_rate;
2930 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2931 struct skl_ddb_allocation *ddb /* out */)
2933 struct drm_crtc *crtc = cstate->base.crtc;
2934 struct drm_device *dev = crtc->dev;
2935 struct drm_i915_private *dev_priv = to_i915(dev);
2936 struct intel_wm_config *config = &dev_priv->wm.config;
2937 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2938 struct intel_plane *intel_plane;
2939 enum pipe pipe = intel_crtc->pipe;
2940 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2941 uint16_t alloc_size, start, cursor_blocks;
2942 uint16_t minimum[I915_MAX_PLANES];
2943 uint16_t y_minimum[I915_MAX_PLANES];
2944 unsigned int total_data_rate;
2946 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
2947 alloc_size = skl_ddb_entry_size(alloc);
2948 if (alloc_size == 0) {
2949 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2950 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
2951 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
2955 cursor_blocks = skl_cursor_allocation(config);
2956 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
2957 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
2959 alloc_size -= cursor_blocks;
2960 alloc->end -= cursor_blocks;
2962 /* 1. Allocate the mininum required blocks for each active plane */
2963 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2964 struct drm_plane *plane = &intel_plane->base;
2965 struct drm_framebuffer *fb = plane->state->fb;
2966 int id = skl_wm_plane_id(intel_plane);
2970 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2974 alloc_size -= minimum[id];
2975 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
2976 alloc_size -= y_minimum[id];
2980 * 2. Distribute the remaining space in proportion to the amount of
2981 * data each plane needs to fetch from memory.
2983 * FIXME: we may not allocate every single block here.
2985 total_data_rate = skl_get_total_relative_data_rate(cstate);
2987 start = alloc->start;
2988 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2989 struct drm_plane *plane = &intel_plane->base;
2990 struct drm_plane_state *pstate = intel_plane->base.state;
2991 unsigned int data_rate, y_data_rate;
2992 uint16_t plane_blocks, y_plane_blocks = 0;
2993 int id = skl_wm_plane_id(intel_plane);
2995 if (pstate->fb == NULL)
2997 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3000 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3003 * allocation for (packed formats) or (uv-plane part of planar format):
3004 * promote the expression to 64 bits to avoid overflowing, the
3005 * result is < available as data_rate / total_data_rate < 1
3007 plane_blocks = minimum[id];
3008 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3011 ddb->plane[pipe][id].start = start;
3012 ddb->plane[pipe][id].end = start + plane_blocks;
3014 start += plane_blocks;
3017 * allocation for y_plane part of planar format:
3019 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
3020 y_data_rate = skl_plane_relative_data_rate(cstate,
3023 y_plane_blocks = y_minimum[id];
3024 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3027 ddb->y_plane[pipe][id].start = start;
3028 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3030 start += y_plane_blocks;
3037 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3039 /* TODO: Take into account the scalers once we support them */
3040 return config->base.adjusted_mode.crtc_clock;
3044 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3045 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3046 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3047 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3049 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3052 uint32_t wm_intermediate_val, ret;
3057 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
3058 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3063 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3064 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
3065 uint64_t tiling, uint32_t latency)
3068 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3069 uint32_t wm_intermediate_val;
3074 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
3076 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3077 tiling == I915_FORMAT_MOD_Yf_TILED) {
3078 plane_bytes_per_line *= 4;
3079 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3080 plane_blocks_per_line /= 4;
3082 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3085 wm_intermediate_val = latency * pixel_rate;
3086 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3087 plane_blocks_per_line;
3092 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3093 const struct intel_crtc *intel_crtc)
3095 struct drm_device *dev = intel_crtc->base.dev;
3096 struct drm_i915_private *dev_priv = dev->dev_private;
3097 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3100 * If ddb allocation of pipes changed, it may require recalculation of
3103 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
3109 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3110 struct intel_crtc_state *cstate,
3111 struct intel_plane *intel_plane,
3112 uint16_t ddb_allocation,
3114 uint16_t *out_blocks, /* out */
3115 uint8_t *out_lines /* out */)
3117 struct drm_plane *plane = &intel_plane->base;
3118 struct drm_framebuffer *fb = plane->state->fb;
3119 uint32_t latency = dev_priv->wm.skl_latency[level];
3120 uint32_t method1, method2;
3121 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3122 uint32_t res_blocks, res_lines;
3123 uint32_t selected_result;
3124 uint8_t bytes_per_pixel;
3126 if (latency == 0 || !cstate->base.active || !fb)
3129 bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
3130 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3133 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3134 cstate->base.adjusted_mode.crtc_htotal,
3140 plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
3141 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3143 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3144 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3145 uint32_t min_scanlines = 4;
3146 uint32_t y_tile_minimum;
3147 if (intel_rotation_90_or_270(plane->state->rotation)) {
3148 int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3149 drm_format_plane_cpp(fb->pixel_format, 1) :
3150 drm_format_plane_cpp(fb->pixel_format, 0);
3160 WARN(1, "Unsupported pixel depth for rotation");
3163 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3164 selected_result = max(method2, y_tile_minimum);
3166 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3167 selected_result = min(method1, method2);
3169 selected_result = method1;
3172 res_blocks = selected_result + 1;
3173 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3175 if (level >= 1 && level <= 7) {
3176 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3177 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3183 if (res_blocks >= ddb_allocation || res_lines > 31)
3186 *out_blocks = res_blocks;
3187 *out_lines = res_lines;
3192 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3193 struct skl_ddb_allocation *ddb,
3194 struct intel_crtc_state *cstate,
3196 struct skl_wm_level *result)
3198 struct drm_device *dev = dev_priv->dev;
3199 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3200 struct intel_plane *intel_plane;
3201 uint16_t ddb_blocks;
3202 enum pipe pipe = intel_crtc->pipe;
3204 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3205 int i = skl_wm_plane_id(intel_plane);
3207 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3209 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3214 &result->plane_res_b[i],
3215 &result->plane_res_l[i]);
3220 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3222 if (!cstate->base.active)
3225 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3228 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3229 skl_pipe_pixel_rate(cstate));
3232 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3233 struct skl_wm_level *trans_wm /* out */)
3235 struct drm_crtc *crtc = cstate->base.crtc;
3236 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3237 struct intel_plane *intel_plane;
3239 if (!cstate->base.active)
3242 /* Until we know more, just disable transition WMs */
3243 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3244 int i = skl_wm_plane_id(intel_plane);
3246 trans_wm->plane_en[i] = false;
3250 static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
3251 struct skl_ddb_allocation *ddb,
3252 struct skl_pipe_wm *pipe_wm)
3254 struct drm_device *dev = cstate->base.crtc->dev;
3255 const struct drm_i915_private *dev_priv = dev->dev_private;
3256 int level, max_level = ilk_wm_max_level(dev);
3258 for (level = 0; level <= max_level; level++) {
3259 skl_compute_wm_level(dev_priv, ddb, cstate,
3260 level, &pipe_wm->wm[level]);
3262 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3264 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3267 static void skl_compute_wm_results(struct drm_device *dev,
3268 struct skl_pipe_wm *p_wm,
3269 struct skl_wm_values *r,
3270 struct intel_crtc *intel_crtc)
3272 int level, max_level = ilk_wm_max_level(dev);
3273 enum pipe pipe = intel_crtc->pipe;
3277 for (level = 0; level <= max_level; level++) {
3278 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3281 temp |= p_wm->wm[level].plane_res_l[i] <<
3282 PLANE_WM_LINES_SHIFT;
3283 temp |= p_wm->wm[level].plane_res_b[i];
3284 if (p_wm->wm[level].plane_en[i])
3285 temp |= PLANE_WM_EN;
3287 r->plane[pipe][i][level] = temp;
3292 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3293 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3295 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3296 temp |= PLANE_WM_EN;
3298 r->plane[pipe][PLANE_CURSOR][level] = temp;
3302 /* transition WMs */
3303 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3305 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3306 temp |= p_wm->trans_wm.plane_res_b[i];
3307 if (p_wm->trans_wm.plane_en[i])
3308 temp |= PLANE_WM_EN;
3310 r->plane_trans[pipe][i] = temp;
3314 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3315 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3316 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3317 temp |= PLANE_WM_EN;
3319 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3321 r->wm_linetime[pipe] = p_wm->linetime;
3324 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3326 const struct skl_ddb_entry *entry)
3329 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3334 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3335 const struct skl_wm_values *new)
3337 struct drm_device *dev = dev_priv->dev;
3338 struct intel_crtc *crtc;
3340 for_each_intel_crtc(dev, crtc) {
3341 int i, level, max_level = ilk_wm_max_level(dev);
3342 enum pipe pipe = crtc->pipe;
3344 if (!new->dirty[pipe])
3347 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3349 for (level = 0; level <= max_level; level++) {
3350 for (i = 0; i < intel_num_planes(crtc); i++)
3351 I915_WRITE(PLANE_WM(pipe, i, level),
3352 new->plane[pipe][i][level]);
3353 I915_WRITE(CUR_WM(pipe, level),
3354 new->plane[pipe][PLANE_CURSOR][level]);
3356 for (i = 0; i < intel_num_planes(crtc); i++)
3357 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3358 new->plane_trans[pipe][i]);
3359 I915_WRITE(CUR_WM_TRANS(pipe),
3360 new->plane_trans[pipe][PLANE_CURSOR]);
3362 for (i = 0; i < intel_num_planes(crtc); i++) {
3363 skl_ddb_entry_write(dev_priv,
3364 PLANE_BUF_CFG(pipe, i),
3365 &new->ddb.plane[pipe][i]);
3366 skl_ddb_entry_write(dev_priv,
3367 PLANE_NV12_BUF_CFG(pipe, i),
3368 &new->ddb.y_plane[pipe][i]);
3371 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3372 &new->ddb.plane[pipe][PLANE_CURSOR]);
3377 * When setting up a new DDB allocation arrangement, we need to correctly
3378 * sequence the times at which the new allocations for the pipes are taken into
3379 * account or we'll have pipes fetching from space previously allocated to
3382 * Roughly the sequence looks like:
3383 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3384 * overlapping with a previous light-up pipe (another way to put it is:
3385 * pipes with their new allocation strickly included into their old ones).
3386 * 2. re-allocate the other pipes that get their allocation reduced
3387 * 3. allocate the pipes having their allocation increased
3389 * Steps 1. and 2. are here to take care of the following case:
3390 * - Initially DDB looks like this:
3393 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3397 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3401 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3405 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3407 for_each_plane(dev_priv, pipe, plane) {
3408 I915_WRITE(PLANE_SURF(pipe, plane),
3409 I915_READ(PLANE_SURF(pipe, plane)));
3411 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3415 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3416 const struct skl_ddb_allocation *new,
3419 uint16_t old_size, new_size;
3421 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3422 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3424 return old_size != new_size &&
3425 new->pipe[pipe].start >= old->pipe[pipe].start &&
3426 new->pipe[pipe].end <= old->pipe[pipe].end;
3429 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3430 struct skl_wm_values *new_values)
3432 struct drm_device *dev = dev_priv->dev;
3433 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3434 bool reallocated[I915_MAX_PIPES] = {};
3435 struct intel_crtc *crtc;
3438 new_ddb = &new_values->ddb;
3439 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3442 * First pass: flush the pipes with the new allocation contained into
3445 * We'll wait for the vblank on those pipes to ensure we can safely
3446 * re-allocate the freed space without this pipe fetching from it.
3448 for_each_intel_crtc(dev, crtc) {
3454 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3457 skl_wm_flush_pipe(dev_priv, pipe, 1);
3458 intel_wait_for_vblank(dev, pipe);
3460 reallocated[pipe] = true;
3465 * Second pass: flush the pipes that are having their allocation
3466 * reduced, but overlapping with a previous allocation.
3468 * Here as well we need to wait for the vblank to make sure the freed
3469 * space is not used anymore.
3471 for_each_intel_crtc(dev, crtc) {
3477 if (reallocated[pipe])
3480 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3481 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3482 skl_wm_flush_pipe(dev_priv, pipe, 2);
3483 intel_wait_for_vblank(dev, pipe);
3484 reallocated[pipe] = true;
3489 * Third pass: flush the pipes that got more space allocated.
3491 * We don't need to actively wait for the update here, next vblank
3492 * will just get more DDB space with the correct WM values.
3494 for_each_intel_crtc(dev, crtc) {
3501 * At this point, only the pipes more space than before are
3502 * left to re-allocate.
3504 if (reallocated[pipe])
3507 skl_wm_flush_pipe(dev_priv, pipe, 3);
3511 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3512 struct skl_ddb_allocation *ddb, /* out */
3513 struct skl_pipe_wm *pipe_wm /* out */)
3515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3516 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3518 skl_allocate_pipe_ddb(cstate, ddb);
3519 skl_compute_pipe_wm(cstate, ddb, pipe_wm);
3521 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3524 intel_crtc->wm.active.skl = *pipe_wm;
3529 static void skl_update_other_pipe_wm(struct drm_device *dev,
3530 struct drm_crtc *crtc,
3531 struct skl_wm_values *r)
3533 struct intel_crtc *intel_crtc;
3534 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3537 * If the WM update hasn't changed the allocation for this_crtc (the
3538 * crtc we are currently computing the new WM values for), other
3539 * enabled crtcs will keep the same allocation and we don't need to
3540 * recompute anything for them.
3542 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3546 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3547 * other active pipes need new DDB allocation and WM values.
3549 for_each_intel_crtc(dev, intel_crtc) {
3550 struct skl_pipe_wm pipe_wm = {};
3553 if (this_crtc->pipe == intel_crtc->pipe)
3556 if (!intel_crtc->active)
3559 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3563 * If we end up re-computing the other pipe WM values, it's
3564 * because it was really needed, so we expect the WM values to
3567 WARN_ON(!wm_changed);
3569 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3570 r->dirty[intel_crtc->pipe] = true;
3574 static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3576 watermarks->wm_linetime[pipe] = 0;
3577 memset(watermarks->plane[pipe], 0,
3578 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3579 memset(watermarks->plane_trans[pipe],
3580 0, sizeof(uint32_t) * I915_MAX_PLANES);
3581 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
3583 /* Clear ddb entries for pipe */
3584 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3585 memset(&watermarks->ddb.plane[pipe], 0,
3586 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3587 memset(&watermarks->ddb.y_plane[pipe], 0,
3588 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3589 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3590 sizeof(struct skl_ddb_entry));
3594 static void skl_update_wm(struct drm_crtc *crtc)
3596 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3597 struct drm_device *dev = crtc->dev;
3598 struct drm_i915_private *dev_priv = dev->dev_private;
3599 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3600 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3601 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
3604 /* Clear all dirty flags */
3605 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3607 skl_clear_wm(results, intel_crtc->pipe);
3609 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3612 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
3613 results->dirty[intel_crtc->pipe] = true;
3615 skl_update_other_pipe_wm(dev, crtc, results);
3616 skl_write_wm_values(dev_priv, results);
3617 skl_flush_wm_values(dev_priv, results);
3619 /* store the new configuration */
3620 dev_priv->wm.skl_hw = *results;
3623 static void ilk_compute_wm_config(struct drm_device *dev,
3624 struct intel_wm_config *config)
3626 struct intel_crtc *crtc;
3628 /* Compute the currently _active_ config */
3629 for_each_intel_crtc(dev, crtc) {
3630 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3632 if (!wm->pipe_enabled)
3635 config->sprites_enabled |= wm->sprites_enabled;
3636 config->sprites_scaled |= wm->sprites_scaled;
3637 config->num_pipes_active++;
3641 static void ilk_program_watermarks(struct intel_crtc_state *cstate)
3643 struct drm_crtc *crtc = cstate->base.crtc;
3644 struct drm_device *dev = crtc->dev;
3645 struct drm_i915_private *dev_priv = to_i915(dev);
3646 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3647 struct ilk_wm_maximums max;
3648 struct intel_wm_config config = {};
3649 struct ilk_wm_values results = {};
3650 enum intel_ddb_partitioning partitioning;
3652 ilk_compute_wm_config(dev, &config);
3654 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3655 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3657 /* 5/6 split only in single pipe config on IVB+ */
3658 if (INTEL_INFO(dev)->gen >= 7 &&
3659 config.num_pipes_active == 1 && config.sprites_enabled) {
3660 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3661 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3663 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3665 best_lp_wm = &lp_wm_1_2;
3668 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3669 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3671 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3673 ilk_write_wm_values(dev_priv, &results);
3676 static void ilk_update_wm(struct drm_crtc *crtc)
3678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3679 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3681 WARN_ON(cstate->base.active != intel_crtc->active);
3684 * IVB workaround: must disable low power watermarks for at least
3685 * one frame before enabling scaling. LP watermarks can be re-enabled
3686 * when scaling is disabled.
3688 * WaCxSRDisabledForSpriteScaling:ivb
3690 if (cstate->disable_lp_wm) {
3691 ilk_disable_lp_wm(crtc->dev);
3692 intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
3695 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
3697 ilk_program_watermarks(cstate);
3700 static void skl_pipe_wm_active_state(uint32_t val,
3701 struct skl_pipe_wm *active,
3707 bool is_enabled = (val & PLANE_WM_EN) != 0;
3711 active->wm[level].plane_en[i] = is_enabled;
3712 active->wm[level].plane_res_b[i] =
3713 val & PLANE_WM_BLOCKS_MASK;
3714 active->wm[level].plane_res_l[i] =
3715 (val >> PLANE_WM_LINES_SHIFT) &
3716 PLANE_WM_LINES_MASK;
3718 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3719 active->wm[level].plane_res_b[PLANE_CURSOR] =
3720 val & PLANE_WM_BLOCKS_MASK;
3721 active->wm[level].plane_res_l[PLANE_CURSOR] =
3722 (val >> PLANE_WM_LINES_SHIFT) &
3723 PLANE_WM_LINES_MASK;
3727 active->trans_wm.plane_en[i] = is_enabled;
3728 active->trans_wm.plane_res_b[i] =
3729 val & PLANE_WM_BLOCKS_MASK;
3730 active->trans_wm.plane_res_l[i] =
3731 (val >> PLANE_WM_LINES_SHIFT) &
3732 PLANE_WM_LINES_MASK;
3734 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3735 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3736 val & PLANE_WM_BLOCKS_MASK;
3737 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3738 (val >> PLANE_WM_LINES_SHIFT) &
3739 PLANE_WM_LINES_MASK;
3744 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3746 struct drm_device *dev = crtc->dev;
3747 struct drm_i915_private *dev_priv = dev->dev_private;
3748 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3750 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3751 struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
3752 enum pipe pipe = intel_crtc->pipe;
3753 int level, i, max_level;
3756 max_level = ilk_wm_max_level(dev);
3758 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3760 for (level = 0; level <= max_level; level++) {
3761 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3762 hw->plane[pipe][i][level] =
3763 I915_READ(PLANE_WM(pipe, i, level));
3764 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3767 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3768 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3769 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3771 if (!intel_crtc->active)
3774 hw->dirty[pipe] = true;
3776 active->linetime = hw->wm_linetime[pipe];
3778 for (level = 0; level <= max_level; level++) {
3779 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3780 temp = hw->plane[pipe][i][level];
3781 skl_pipe_wm_active_state(temp, active, false,
3784 temp = hw->plane[pipe][PLANE_CURSOR][level];
3785 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3788 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3789 temp = hw->plane_trans[pipe][i];
3790 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3793 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3794 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3796 intel_crtc->wm.active.skl = *active;
3799 void skl_wm_get_hw_state(struct drm_device *dev)
3801 struct drm_i915_private *dev_priv = dev->dev_private;
3802 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3803 struct drm_crtc *crtc;
3805 skl_ddb_get_hw_state(dev_priv, ddb);
3806 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3807 skl_pipe_wm_get_hw_state(crtc);
3810 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3812 struct drm_device *dev = crtc->dev;
3813 struct drm_i915_private *dev_priv = dev->dev_private;
3814 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3816 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3817 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
3818 enum pipe pipe = intel_crtc->pipe;
3819 static const i915_reg_t wm0_pipe_reg[] = {
3820 [PIPE_A] = WM0_PIPEA_ILK,
3821 [PIPE_B] = WM0_PIPEB_ILK,
3822 [PIPE_C] = WM0_PIPEC_IVB,
3825 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3826 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3827 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3829 active->pipe_enabled = intel_crtc->active;
3831 if (active->pipe_enabled) {
3832 u32 tmp = hw->wm_pipe[pipe];
3835 * For active pipes LP0 watermark is marked as
3836 * enabled, and LP1+ watermaks as disabled since
3837 * we can't really reverse compute them in case
3838 * multiple pipes are active.
3840 active->wm[0].enable = true;
3841 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3842 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3843 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3844 active->linetime = hw->wm_linetime[pipe];
3846 int level, max_level = ilk_wm_max_level(dev);
3849 * For inactive pipes, all watermark levels
3850 * should be marked as enabled but zeroed,
3851 * which is what we'd compute them to.
3853 for (level = 0; level <= max_level; level++)
3854 active->wm[level].enable = true;
3857 intel_crtc->wm.active.ilk = *active;
3860 #define _FW_WM(value, plane) \
3861 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3862 #define _FW_WM_VLV(value, plane) \
3863 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3865 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3866 struct vlv_wm_values *wm)
3871 for_each_pipe(dev_priv, pipe) {
3872 tmp = I915_READ(VLV_DDL(pipe));
3874 wm->ddl[pipe].primary =
3875 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3876 wm->ddl[pipe].cursor =
3877 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3878 wm->ddl[pipe].sprite[0] =
3879 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3880 wm->ddl[pipe].sprite[1] =
3881 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3884 tmp = I915_READ(DSPFW1);
3885 wm->sr.plane = _FW_WM(tmp, SR);
3886 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3887 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3888 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3890 tmp = I915_READ(DSPFW2);
3891 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
3892 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
3893 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
3895 tmp = I915_READ(DSPFW3);
3896 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3898 if (IS_CHERRYVIEW(dev_priv)) {
3899 tmp = I915_READ(DSPFW7_CHV);
3900 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3901 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3903 tmp = I915_READ(DSPFW8_CHV);
3904 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
3905 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
3907 tmp = I915_READ(DSPFW9_CHV);
3908 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
3909 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
3911 tmp = I915_READ(DSPHOWM);
3912 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3913 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
3914 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
3915 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
3916 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3917 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3918 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3919 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3920 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3921 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3923 tmp = I915_READ(DSPFW7);
3924 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3925 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
3927 tmp = I915_READ(DSPHOWM);
3928 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3929 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3930 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3931 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
3932 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3933 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3934 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
3941 void vlv_wm_get_hw_state(struct drm_device *dev)
3943 struct drm_i915_private *dev_priv = to_i915(dev);
3944 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
3945 struct intel_plane *plane;
3949 vlv_read_wm_values(dev_priv, wm);
3951 for_each_intel_plane(dev, plane) {
3952 switch (plane->base.type) {
3954 case DRM_PLANE_TYPE_CURSOR:
3955 plane->wm.fifo_size = 63;
3957 case DRM_PLANE_TYPE_PRIMARY:
3958 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
3960 case DRM_PLANE_TYPE_OVERLAY:
3961 sprite = plane->plane;
3962 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
3967 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
3968 wm->level = VLV_WM_LEVEL_PM2;
3970 if (IS_CHERRYVIEW(dev_priv)) {
3971 mutex_lock(&dev_priv->rps.hw_lock);
3973 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
3974 if (val & DSP_MAXFIFO_PM5_ENABLE)
3975 wm->level = VLV_WM_LEVEL_PM5;
3978 * If DDR DVFS is disabled in the BIOS, Punit
3979 * will never ack the request. So if that happens
3980 * assume we don't have to enable/disable DDR DVFS
3981 * dynamically. To test that just set the REQ_ACK
3982 * bit to poke the Punit, but don't change the
3983 * HIGH/LOW bits so that we don't actually change
3984 * the current state.
3986 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3987 val |= FORCE_DDR_FREQ_REQ_ACK;
3988 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
3990 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
3991 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
3992 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
3993 "assuming DDR DVFS is disabled\n");
3994 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
3996 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3997 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
3998 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4001 mutex_unlock(&dev_priv->rps.hw_lock);
4004 for_each_pipe(dev_priv, pipe)
4005 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4006 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4007 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4009 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4010 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4013 void ilk_wm_get_hw_state(struct drm_device *dev)
4015 struct drm_i915_private *dev_priv = dev->dev_private;
4016 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4017 struct drm_crtc *crtc;
4019 for_each_crtc(dev, crtc)
4020 ilk_pipe_wm_get_hw_state(crtc);
4022 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4023 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4024 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4026 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4027 if (INTEL_INFO(dev)->gen >= 7) {
4028 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4029 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4032 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4033 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4034 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4035 else if (IS_IVYBRIDGE(dev))
4036 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4037 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4040 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4044 * intel_update_watermarks - update FIFO watermark values based on current modes
4046 * Calculate watermark values for the various WM regs based on current mode
4047 * and plane configuration.
4049 * There are several cases to deal with here:
4050 * - normal (i.e. non-self-refresh)
4051 * - self-refresh (SR) mode
4052 * - lines are large relative to FIFO size (buffer can hold up to 2)
4053 * - lines are small relative to FIFO size (buffer can hold more than 2
4054 * lines), so need to account for TLB latency
4056 * The normal calculation is:
4057 * watermark = dotclock * bytes per pixel * latency
4058 * where latency is platform & configuration dependent (we assume pessimal
4061 * The SR calculation is:
4062 * watermark = (trunc(latency/line time)+1) * surface width *
4065 * line time = htotal / dotclock
4066 * surface width = hdisplay for normal plane and 64 for cursor
4067 * and latency is assumed to be high, as above.
4069 * The final value programmed to the register should always be rounded up,
4070 * and include an extra 2 entries to account for clock crossings.
4072 * We don't use the sprite, so we can ignore that. And on Crestline we have
4073 * to set the non-SR watermarks to 8.
4075 void intel_update_watermarks(struct drm_crtc *crtc)
4077 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4079 if (dev_priv->display.update_wm)
4080 dev_priv->display.update_wm(crtc);
4084 * Lock protecting IPS related data structures
4086 DEFINE_SPINLOCK(mchdev_lock);
4088 /* Global for IPS driver to get at the current i915 device. Protected by
4090 static struct drm_i915_private *i915_mch_dev;
4092 bool ironlake_set_drps(struct drm_device *dev, u8 val)
4094 struct drm_i915_private *dev_priv = dev->dev_private;
4097 assert_spin_locked(&mchdev_lock);
4099 rgvswctl = I915_READ16(MEMSWCTL);
4100 if (rgvswctl & MEMCTL_CMD_STS) {
4101 DRM_DEBUG("gpu busy, RCS change rejected\n");
4102 return false; /* still busy with another command */
4105 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4106 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4107 I915_WRITE16(MEMSWCTL, rgvswctl);
4108 POSTING_READ16(MEMSWCTL);
4110 rgvswctl |= MEMCTL_CMD_STS;
4111 I915_WRITE16(MEMSWCTL, rgvswctl);
4116 static void ironlake_enable_drps(struct drm_device *dev)
4118 struct drm_i915_private *dev_priv = dev->dev_private;
4119 u32 rgvmodectl = I915_READ(MEMMODECTL);
4120 u8 fmax, fmin, fstart, vstart;
4122 spin_lock_irq(&mchdev_lock);
4124 /* Enable temp reporting */
4125 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4126 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4128 /* 100ms RC evaluation intervals */
4129 I915_WRITE(RCUPEI, 100000);
4130 I915_WRITE(RCDNEI, 100000);
4132 /* Set max/min thresholds to 90ms and 80ms respectively */
4133 I915_WRITE(RCBMAXAVG, 90000);
4134 I915_WRITE(RCBMINAVG, 80000);
4136 I915_WRITE(MEMIHYST, 1);
4138 /* Set up min, max, and cur for interrupt handling */
4139 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4140 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4141 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4142 MEMMODE_FSTART_SHIFT;
4144 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4147 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4148 dev_priv->ips.fstart = fstart;
4150 dev_priv->ips.max_delay = fstart;
4151 dev_priv->ips.min_delay = fmin;
4152 dev_priv->ips.cur_delay = fstart;
4154 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4155 fmax, fmin, fstart);
4157 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4160 * Interrupts will be enabled in ironlake_irq_postinstall
4163 I915_WRITE(VIDSTART, vstart);
4164 POSTING_READ(VIDSTART);
4166 rgvmodectl |= MEMMODE_SWMODE_EN;
4167 I915_WRITE(MEMMODECTL, rgvmodectl);
4169 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4170 DRM_ERROR("stuck trying to change perf mode\n");
4173 ironlake_set_drps(dev, fstart);
4175 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4176 I915_READ(DDREC) + I915_READ(CSIEC);
4177 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4178 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4179 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4181 spin_unlock_irq(&mchdev_lock);
4184 static void ironlake_disable_drps(struct drm_device *dev)
4186 struct drm_i915_private *dev_priv = dev->dev_private;
4189 spin_lock_irq(&mchdev_lock);
4191 rgvswctl = I915_READ16(MEMSWCTL);
4193 /* Ack interrupts, disable EFC interrupt */
4194 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4195 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4196 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4197 I915_WRITE(DEIIR, DE_PCU_EVENT);
4198 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4200 /* Go back to the starting frequency */
4201 ironlake_set_drps(dev, dev_priv->ips.fstart);
4203 rgvswctl |= MEMCTL_CMD_STS;
4204 I915_WRITE(MEMSWCTL, rgvswctl);
4207 spin_unlock_irq(&mchdev_lock);
4210 /* There's a funny hw issue where the hw returns all 0 when reading from
4211 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4212 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4213 * all limits and the gpu stuck at whatever frequency it is at atm).
4215 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4219 /* Only set the down limit when we've reached the lowest level to avoid
4220 * getting more interrupts, otherwise leave this clear. This prevents a
4221 * race in the hw when coming out of rc6: There's a tiny window where
4222 * the hw runs at the minimal clock before selecting the desired
4223 * frequency, if the down threshold expires in that window we will not
4224 * receive a down interrupt. */
4225 if (IS_GEN9(dev_priv->dev)) {
4226 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4227 if (val <= dev_priv->rps.min_freq_softlimit)
4228 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4230 limits = dev_priv->rps.max_freq_softlimit << 24;
4231 if (val <= dev_priv->rps.min_freq_softlimit)
4232 limits |= dev_priv->rps.min_freq_softlimit << 16;
4238 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4241 u32 threshold_up = 0, threshold_down = 0; /* in % */
4242 u32 ei_up = 0, ei_down = 0;
4244 new_power = dev_priv->rps.power;
4245 switch (dev_priv->rps.power) {
4247 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
4248 new_power = BETWEEN;
4252 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
4253 new_power = LOW_POWER;
4254 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
4255 new_power = HIGH_POWER;
4259 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
4260 new_power = BETWEEN;
4263 /* Max/min bins are special */
4264 if (val <= dev_priv->rps.min_freq_softlimit)
4265 new_power = LOW_POWER;
4266 if (val >= dev_priv->rps.max_freq_softlimit)
4267 new_power = HIGH_POWER;
4268 if (new_power == dev_priv->rps.power)
4271 /* Note the units here are not exactly 1us, but 1280ns. */
4272 switch (new_power) {
4274 /* Upclock if more than 95% busy over 16ms */
4278 /* Downclock if less than 85% busy over 32ms */
4280 threshold_down = 85;
4284 /* Upclock if more than 90% busy over 13ms */
4288 /* Downclock if less than 75% busy over 32ms */
4290 threshold_down = 75;
4294 /* Upclock if more than 85% busy over 10ms */
4298 /* Downclock if less than 60% busy over 32ms */
4300 threshold_down = 60;
4304 I915_WRITE(GEN6_RP_UP_EI,
4305 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4306 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4307 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4309 I915_WRITE(GEN6_RP_DOWN_EI,
4310 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4311 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4312 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4314 I915_WRITE(GEN6_RP_CONTROL,
4315 GEN6_RP_MEDIA_TURBO |
4316 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4317 GEN6_RP_MEDIA_IS_GFX |
4319 GEN6_RP_UP_BUSY_AVG |
4320 GEN6_RP_DOWN_IDLE_AVG);
4322 dev_priv->rps.power = new_power;
4323 dev_priv->rps.up_threshold = threshold_up;
4324 dev_priv->rps.down_threshold = threshold_down;
4325 dev_priv->rps.last_adj = 0;
4328 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4332 if (val > dev_priv->rps.min_freq_softlimit)
4333 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4334 if (val < dev_priv->rps.max_freq_softlimit)
4335 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4337 mask &= dev_priv->pm_rps_events;
4339 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4342 /* gen6_set_rps is called to update the frequency request, but should also be
4343 * called when the range (min_delay and max_delay) is modified so that we can
4344 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4345 static void gen6_set_rps(struct drm_device *dev, u8 val)
4347 struct drm_i915_private *dev_priv = dev->dev_private;
4349 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4350 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
4353 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4354 WARN_ON(val > dev_priv->rps.max_freq);
4355 WARN_ON(val < dev_priv->rps.min_freq);
4357 /* min/max delay may still have been modified so be sure to
4358 * write the limits value.
4360 if (val != dev_priv->rps.cur_freq) {
4361 gen6_set_rps_thresholds(dev_priv, val);
4364 I915_WRITE(GEN6_RPNSWREQ,
4365 GEN9_FREQUENCY(val));
4366 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4367 I915_WRITE(GEN6_RPNSWREQ,
4368 HSW_FREQUENCY(val));
4370 I915_WRITE(GEN6_RPNSWREQ,
4371 GEN6_FREQUENCY(val) |
4373 GEN6_AGGRESSIVE_TURBO);
4376 /* Make sure we continue to get interrupts
4377 * until we hit the minimum or maximum frequencies.
4379 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4380 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4382 POSTING_READ(GEN6_RPNSWREQ);
4384 dev_priv->rps.cur_freq = val;
4385 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4388 static void valleyview_set_rps(struct drm_device *dev, u8 val)
4390 struct drm_i915_private *dev_priv = dev->dev_private;
4392 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4393 WARN_ON(val > dev_priv->rps.max_freq);
4394 WARN_ON(val < dev_priv->rps.min_freq);
4396 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4397 "Odd GPU freq value\n"))
4400 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4402 if (val != dev_priv->rps.cur_freq) {
4403 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4404 if (!IS_CHERRYVIEW(dev_priv))
4405 gen6_set_rps_thresholds(dev_priv, val);
4408 dev_priv->rps.cur_freq = val;
4409 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4412 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4414 * * If Gfx is Idle, then
4415 * 1. Forcewake Media well.
4416 * 2. Request idle freq.
4417 * 3. Release Forcewake of Media well.
4419 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4421 u32 val = dev_priv->rps.idle_freq;
4423 if (dev_priv->rps.cur_freq <= val)
4426 /* Wake up the media well, as that takes a lot less
4427 * power than the Render well. */
4428 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4429 valleyview_set_rps(dev_priv->dev, val);
4430 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4433 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4435 mutex_lock(&dev_priv->rps.hw_lock);
4436 if (dev_priv->rps.enabled) {
4437 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4438 gen6_rps_reset_ei(dev_priv);
4439 I915_WRITE(GEN6_PMINTRMSK,
4440 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4442 mutex_unlock(&dev_priv->rps.hw_lock);
4445 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4447 struct drm_device *dev = dev_priv->dev;
4449 mutex_lock(&dev_priv->rps.hw_lock);
4450 if (dev_priv->rps.enabled) {
4451 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4452 vlv_set_rps_idle(dev_priv);
4454 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4455 dev_priv->rps.last_adj = 0;
4456 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4458 mutex_unlock(&dev_priv->rps.hw_lock);
4460 spin_lock(&dev_priv->rps.client_lock);
4461 while (!list_empty(&dev_priv->rps.clients))
4462 list_del_init(dev_priv->rps.clients.next);
4463 spin_unlock(&dev_priv->rps.client_lock);
4466 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4467 struct intel_rps_client *rps,
4468 unsigned long submitted)
4470 /* This is intentionally racy! We peek at the state here, then
4471 * validate inside the RPS worker.
4473 if (!(dev_priv->mm.busy &&
4474 dev_priv->rps.enabled &&
4475 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4478 /* Force a RPS boost (and don't count it against the client) if
4479 * the GPU is severely congested.
4481 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4484 spin_lock(&dev_priv->rps.client_lock);
4485 if (rps == NULL || list_empty(&rps->link)) {
4486 spin_lock_irq(&dev_priv->irq_lock);
4487 if (dev_priv->rps.interrupts_enabled) {
4488 dev_priv->rps.client_boost = true;
4489 queue_work(dev_priv->wq, &dev_priv->rps.work);
4491 spin_unlock_irq(&dev_priv->irq_lock);
4494 list_add(&rps->link, &dev_priv->rps.clients);
4497 dev_priv->rps.boosts++;
4499 spin_unlock(&dev_priv->rps.client_lock);
4502 void intel_set_rps(struct drm_device *dev, u8 val)
4504 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4505 valleyview_set_rps(dev, val);
4507 gen6_set_rps(dev, val);
4510 static void gen9_disable_rps(struct drm_device *dev)
4512 struct drm_i915_private *dev_priv = dev->dev_private;
4514 I915_WRITE(GEN6_RC_CONTROL, 0);
4515 I915_WRITE(GEN9_PG_ENABLE, 0);
4518 static void gen6_disable_rps(struct drm_device *dev)
4520 struct drm_i915_private *dev_priv = dev->dev_private;
4522 I915_WRITE(GEN6_RC_CONTROL, 0);
4523 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4526 static void cherryview_disable_rps(struct drm_device *dev)
4528 struct drm_i915_private *dev_priv = dev->dev_private;
4530 I915_WRITE(GEN6_RC_CONTROL, 0);
4533 static void valleyview_disable_rps(struct drm_device *dev)
4535 struct drm_i915_private *dev_priv = dev->dev_private;
4537 /* we're doing forcewake before Disabling RC6,
4538 * This what the BIOS expects when going into suspend */
4539 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4541 I915_WRITE(GEN6_RC_CONTROL, 0);
4543 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4546 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4548 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4549 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4550 mode = GEN6_RC_CTL_RC6_ENABLE;
4555 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4556 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4557 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4558 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4561 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4562 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4565 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4567 /* No RC6 before Ironlake and code is gone for ilk. */
4568 if (INTEL_INFO(dev)->gen < 6)
4571 /* Respect the kernel parameter if it is set */
4572 if (enable_rc6 >= 0) {
4576 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4579 mask = INTEL_RC6_ENABLE;
4581 if ((enable_rc6 & mask) != enable_rc6)
4582 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4583 enable_rc6 & mask, enable_rc6, mask);
4585 return enable_rc6 & mask;
4588 if (IS_IVYBRIDGE(dev))
4589 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4591 return INTEL_RC6_ENABLE;
4594 int intel_enable_rc6(const struct drm_device *dev)
4596 return i915.enable_rc6;
4599 static void gen6_init_rps_frequencies(struct drm_device *dev)
4601 struct drm_i915_private *dev_priv = dev->dev_private;
4602 uint32_t rp_state_cap;
4603 u32 ddcc_status = 0;
4606 /* All of these values are in units of 50MHz */
4607 dev_priv->rps.cur_freq = 0;
4608 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4609 if (IS_BROXTON(dev)) {
4610 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4611 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4612 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4613 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4615 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4616 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4617 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4618 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4621 /* hw_max = RP0 until we check for overclocking */
4622 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4624 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4625 if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
4626 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4627 ret = sandybridge_pcode_read(dev_priv,
4628 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4631 dev_priv->rps.efficient_freq =
4633 ((ddcc_status >> 8) & 0xff),
4634 dev_priv->rps.min_freq,
4635 dev_priv->rps.max_freq);
4638 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4639 /* Store the frequency values in 16.66 MHZ units, which is
4640 the natural hardware unit for SKL */
4641 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4642 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4643 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4644 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4645 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4648 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4650 /* Preserve min/max settings in case of re-init */
4651 if (dev_priv->rps.max_freq_softlimit == 0)
4652 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4654 if (dev_priv->rps.min_freq_softlimit == 0) {
4655 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4656 dev_priv->rps.min_freq_softlimit =
4657 max_t(int, dev_priv->rps.efficient_freq,
4658 intel_freq_opcode(dev_priv, 450));
4660 dev_priv->rps.min_freq_softlimit =
4661 dev_priv->rps.min_freq;
4665 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4666 static void gen9_enable_rps(struct drm_device *dev)
4668 struct drm_i915_private *dev_priv = dev->dev_private;
4670 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4672 gen6_init_rps_frequencies(dev);
4674 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4675 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4676 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4680 /* Program defaults and thresholds for RPS*/
4681 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4682 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4684 /* 1 second timeout*/
4685 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4686 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4688 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4690 /* Leaning on the below call to gen6_set_rps to program/setup the
4691 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4692 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4693 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4694 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4696 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4699 static void gen9_enable_rc6(struct drm_device *dev)
4701 struct drm_i915_private *dev_priv = dev->dev_private;
4702 struct intel_engine_cs *ring;
4703 uint32_t rc6_mask = 0;
4706 /* 1a: Software RC state - RC0 */
4707 I915_WRITE(GEN6_RC_STATE, 0);
4709 /* 1b: Get forcewake during program sequence. Although the driver
4710 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4711 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4713 /* 2a: Disable RC states. */
4714 I915_WRITE(GEN6_RC_CONTROL, 0);
4716 /* 2b: Program RC6 thresholds.*/
4718 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4719 if (IS_SKYLAKE(dev))
4720 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4722 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4723 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4724 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4725 for_each_ring(ring, dev_priv, unused)
4726 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4728 if (HAS_GUC_UCODE(dev))
4729 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4731 I915_WRITE(GEN6_RC_SLEEP, 0);
4733 /* 2c: Program Coarse Power Gating Policies. */
4734 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4735 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4737 /* 3a: Enable RC6 */
4738 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4739 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4740 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4741 /* WaRsUseTimeoutMode */
4742 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
4743 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
4744 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4745 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4746 GEN7_RC_CTL_TO_MODE |
4749 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4750 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4751 GEN6_RC_CTL_EI_MODE(1) |
4756 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4757 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4759 if (NEEDS_WaRsDisableCoarsePowerGating(dev))
4760 I915_WRITE(GEN9_PG_ENABLE, 0);
4762 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4763 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4765 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4769 static void gen8_enable_rps(struct drm_device *dev)
4771 struct drm_i915_private *dev_priv = dev->dev_private;
4772 struct intel_engine_cs *ring;
4773 uint32_t rc6_mask = 0;
4776 /* 1a: Software RC state - RC0 */
4777 I915_WRITE(GEN6_RC_STATE, 0);
4779 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4780 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4781 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4783 /* 2a: Disable RC states. */
4784 I915_WRITE(GEN6_RC_CONTROL, 0);
4786 /* Initialize rps frequencies */
4787 gen6_init_rps_frequencies(dev);
4789 /* 2b: Program RC6 thresholds.*/
4790 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4791 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4792 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4793 for_each_ring(ring, dev_priv, unused)
4794 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4795 I915_WRITE(GEN6_RC_SLEEP, 0);
4796 if (IS_BROADWELL(dev))
4797 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4799 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4802 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4803 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4804 intel_print_rc6_info(dev, rc6_mask);
4805 if (IS_BROADWELL(dev))
4806 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4807 GEN7_RC_CTL_TO_MODE |
4810 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4811 GEN6_RC_CTL_EI_MODE(1) |
4814 /* 4 Program defaults and thresholds for RPS*/
4815 I915_WRITE(GEN6_RPNSWREQ,
4816 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4817 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4818 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4819 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4820 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4822 /* Docs recommend 900MHz, and 300 MHz respectively */
4823 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4824 dev_priv->rps.max_freq_softlimit << 24 |
4825 dev_priv->rps.min_freq_softlimit << 16);
4827 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4828 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4829 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4830 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4832 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4835 I915_WRITE(GEN6_RP_CONTROL,
4836 GEN6_RP_MEDIA_TURBO |
4837 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4838 GEN6_RP_MEDIA_IS_GFX |
4840 GEN6_RP_UP_BUSY_AVG |
4841 GEN6_RP_DOWN_IDLE_AVG);
4843 /* 6: Ring frequency + overclocking (our driver does this later */
4845 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4846 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4848 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4851 static void gen6_enable_rps(struct drm_device *dev)
4853 struct drm_i915_private *dev_priv = dev->dev_private;
4854 struct intel_engine_cs *ring;
4855 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4860 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4862 /* Here begins a magic sequence of register writes to enable
4863 * auto-downclocking.
4865 * Perhaps there might be some value in exposing these to
4868 I915_WRITE(GEN6_RC_STATE, 0);
4870 /* Clear the DBG now so we don't confuse earlier errors */
4871 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4872 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4873 I915_WRITE(GTFIFODBG, gtfifodbg);
4876 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4878 /* Initialize rps frequencies */
4879 gen6_init_rps_frequencies(dev);
4881 /* disable the counters and set deterministic thresholds */
4882 I915_WRITE(GEN6_RC_CONTROL, 0);
4884 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4885 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4886 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4887 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4888 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4890 for_each_ring(ring, dev_priv, i)
4891 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4893 I915_WRITE(GEN6_RC_SLEEP, 0);
4894 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4895 if (IS_IVYBRIDGE(dev))
4896 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4898 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4899 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4900 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4902 /* Check if we are enabling RC6 */
4903 rc6_mode = intel_enable_rc6(dev_priv->dev);
4904 if (rc6_mode & INTEL_RC6_ENABLE)
4905 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4907 /* We don't use those on Haswell */
4908 if (!IS_HASWELL(dev)) {
4909 if (rc6_mode & INTEL_RC6p_ENABLE)
4910 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4912 if (rc6_mode & INTEL_RC6pp_ENABLE)
4913 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4916 intel_print_rc6_info(dev, rc6_mask);
4918 I915_WRITE(GEN6_RC_CONTROL,
4920 GEN6_RC_CTL_EI_MODE(1) |
4921 GEN6_RC_CTL_HW_ENABLE);
4923 /* Power down if completely idle for over 50ms */
4924 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4925 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4927 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4929 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4931 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4932 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4933 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4934 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4935 (pcu_mbox & 0xff) * 50);
4936 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4939 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4940 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4943 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4944 if (IS_GEN6(dev) && ret) {
4945 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4946 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4947 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4948 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4949 rc6vids &= 0xffff00;
4950 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4951 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4953 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4956 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4959 static void __gen6_update_ring_freq(struct drm_device *dev)
4961 struct drm_i915_private *dev_priv = dev->dev_private;
4963 unsigned int gpu_freq;
4964 unsigned int max_ia_freq, min_ring_freq;
4965 unsigned int max_gpu_freq, min_gpu_freq;
4966 int scaling_factor = 180;
4967 struct cpufreq_policy *policy;
4969 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4971 policy = cpufreq_cpu_get(0);
4973 max_ia_freq = policy->cpuinfo.max_freq;
4974 cpufreq_cpu_put(policy);
4977 * Default to measured freq if none found, PCU will ensure we
4980 max_ia_freq = tsc_khz;
4983 /* Convert from kHz to MHz */
4984 max_ia_freq /= 1000;
4986 min_ring_freq = I915_READ(DCLK) & 0xf;
4987 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4988 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4990 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
4991 /* Convert GT frequency to 50 HZ units */
4992 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
4993 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
4995 min_gpu_freq = dev_priv->rps.min_freq;
4996 max_gpu_freq = dev_priv->rps.max_freq;
5000 * For each potential GPU frequency, load a ring frequency we'd like
5001 * to use for memory access. We do this by specifying the IA frequency
5002 * the PCU should use as a reference to determine the ring frequency.
5004 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5005 int diff = max_gpu_freq - gpu_freq;
5006 unsigned int ia_freq = 0, ring_freq = 0;
5008 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5010 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5011 * No floor required for ring frequency on SKL.
5013 ring_freq = gpu_freq;
5014 } else if (INTEL_INFO(dev)->gen >= 8) {
5015 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5016 ring_freq = max(min_ring_freq, gpu_freq);
5017 } else if (IS_HASWELL(dev)) {
5018 ring_freq = mult_frac(gpu_freq, 5, 4);
5019 ring_freq = max(min_ring_freq, ring_freq);
5020 /* leave ia_freq as the default, chosen by cpufreq */
5022 /* On older processors, there is no separate ring
5023 * clock domain, so in order to boost the bandwidth
5024 * of the ring, we need to upclock the CPU (ia_freq).
5026 * For GPU frequencies less than 750MHz,
5027 * just use the lowest ring freq.
5029 if (gpu_freq < min_freq)
5032 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5033 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5036 sandybridge_pcode_write(dev_priv,
5037 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5038 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5039 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5044 void gen6_update_ring_freq(struct drm_device *dev)
5046 struct drm_i915_private *dev_priv = dev->dev_private;
5048 if (!HAS_CORE_RING_FREQ(dev))
5051 mutex_lock(&dev_priv->rps.hw_lock);
5052 __gen6_update_ring_freq(dev);
5053 mutex_unlock(&dev_priv->rps.hw_lock);
5056 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5058 struct drm_device *dev = dev_priv->dev;
5061 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5063 switch (INTEL_INFO(dev)->eu_total) {
5065 /* (2 * 4) config */
5066 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5069 /* (2 * 6) config */
5070 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5073 /* (2 * 8) config */
5075 /* Setting (2 * 8) Min RP0 for any other combination */
5076 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5080 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5085 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5089 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5090 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5095 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5099 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5100 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5105 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5109 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5111 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5116 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5120 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5122 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5124 rp0 = min_t(u32, rp0, 0xea);
5129 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5133 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5134 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5135 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5136 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5141 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5145 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5147 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5148 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5149 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5150 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5151 * to make sure it matches what Punit accepts.
5153 return max_t(u32, val, 0xc0);
5156 /* Check that the pctx buffer wasn't move under us. */
5157 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5159 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5161 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5162 dev_priv->vlv_pctx->stolen->start);
5166 /* Check that the pcbr address is not empty. */
5167 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5169 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5171 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5174 static void cherryview_setup_pctx(struct drm_device *dev)
5176 struct drm_i915_private *dev_priv = dev->dev_private;
5177 unsigned long pctx_paddr, paddr;
5178 struct i915_gtt *gtt = &dev_priv->gtt;
5180 int pctx_size = 32*1024;
5182 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5184 pcbr = I915_READ(VLV_PCBR);
5185 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5186 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5187 paddr = (dev_priv->mm.stolen_base +
5188 (gtt->stolen_size - pctx_size));
5190 pctx_paddr = (paddr & (~4095));
5191 I915_WRITE(VLV_PCBR, pctx_paddr);
5194 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5197 static void valleyview_setup_pctx(struct drm_device *dev)
5199 struct drm_i915_private *dev_priv = dev->dev_private;
5200 struct drm_i915_gem_object *pctx;
5201 unsigned long pctx_paddr;
5203 int pctx_size = 24*1024;
5205 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5207 pcbr = I915_READ(VLV_PCBR);
5209 /* BIOS set it up already, grab the pre-alloc'd space */
5212 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5213 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5215 I915_GTT_OFFSET_NONE,
5220 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5223 * From the Gunit register HAS:
5224 * The Gfx driver is expected to program this register and ensure
5225 * proper allocation within Gfx stolen memory. For example, this
5226 * register should be programmed such than the PCBR range does not
5227 * overlap with other ranges, such as the frame buffer, protected
5228 * memory, or any other relevant ranges.
5230 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5232 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5236 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5237 I915_WRITE(VLV_PCBR, pctx_paddr);
5240 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5241 dev_priv->vlv_pctx = pctx;
5244 static void valleyview_cleanup_pctx(struct drm_device *dev)
5246 struct drm_i915_private *dev_priv = dev->dev_private;
5248 if (WARN_ON(!dev_priv->vlv_pctx))
5251 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5252 dev_priv->vlv_pctx = NULL;
5255 static void valleyview_init_gt_powersave(struct drm_device *dev)
5257 struct drm_i915_private *dev_priv = dev->dev_private;
5260 valleyview_setup_pctx(dev);
5262 mutex_lock(&dev_priv->rps.hw_lock);
5264 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5265 switch ((val >> 6) & 3) {
5268 dev_priv->mem_freq = 800;
5271 dev_priv->mem_freq = 1066;
5274 dev_priv->mem_freq = 1333;
5277 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5279 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5280 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5281 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5282 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5283 dev_priv->rps.max_freq);
5285 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5286 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5287 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5288 dev_priv->rps.efficient_freq);
5290 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5291 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5292 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5293 dev_priv->rps.rp1_freq);
5295 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5296 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5297 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5298 dev_priv->rps.min_freq);
5300 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5302 /* Preserve min/max settings in case of re-init */
5303 if (dev_priv->rps.max_freq_softlimit == 0)
5304 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5306 if (dev_priv->rps.min_freq_softlimit == 0)
5307 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5309 mutex_unlock(&dev_priv->rps.hw_lock);
5312 static void cherryview_init_gt_powersave(struct drm_device *dev)
5314 struct drm_i915_private *dev_priv = dev->dev_private;
5317 cherryview_setup_pctx(dev);
5319 mutex_lock(&dev_priv->rps.hw_lock);
5321 mutex_lock(&dev_priv->sb_lock);
5322 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5323 mutex_unlock(&dev_priv->sb_lock);
5325 switch ((val >> 2) & 0x7) {
5327 dev_priv->mem_freq = 2000;
5330 dev_priv->mem_freq = 1600;
5333 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5335 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5336 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5337 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5338 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5339 dev_priv->rps.max_freq);
5341 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5342 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5343 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5344 dev_priv->rps.efficient_freq);
5346 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5347 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5348 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5349 dev_priv->rps.rp1_freq);
5351 /* PUnit validated range is only [RPe, RP0] */
5352 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5353 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5354 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5355 dev_priv->rps.min_freq);
5357 WARN_ONCE((dev_priv->rps.max_freq |
5358 dev_priv->rps.efficient_freq |
5359 dev_priv->rps.rp1_freq |
5360 dev_priv->rps.min_freq) & 1,
5361 "Odd GPU freq values\n");
5363 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5365 /* Preserve min/max settings in case of re-init */
5366 if (dev_priv->rps.max_freq_softlimit == 0)
5367 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5369 if (dev_priv->rps.min_freq_softlimit == 0)
5370 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5372 mutex_unlock(&dev_priv->rps.hw_lock);
5375 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5377 valleyview_cleanup_pctx(dev);
5380 static void cherryview_enable_rps(struct drm_device *dev)
5382 struct drm_i915_private *dev_priv = dev->dev_private;
5383 struct intel_engine_cs *ring;
5384 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5387 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5389 gtfifodbg = I915_READ(GTFIFODBG);
5391 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5393 I915_WRITE(GTFIFODBG, gtfifodbg);
5396 cherryview_check_pctx(dev_priv);
5398 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5399 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5400 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5402 /* Disable RC states. */
5403 I915_WRITE(GEN6_RC_CONTROL, 0);
5405 /* 2a: Program RC6 thresholds.*/
5406 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5407 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5408 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5410 for_each_ring(ring, dev_priv, i)
5411 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5412 I915_WRITE(GEN6_RC_SLEEP, 0);
5414 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5415 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5417 /* allows RC6 residency counter to work */
5418 I915_WRITE(VLV_COUNTER_CONTROL,
5419 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5420 VLV_MEDIA_RC6_COUNT_EN |
5421 VLV_RENDER_RC6_COUNT_EN));
5423 /* For now we assume BIOS is allocating and populating the PCBR */
5424 pcbr = I915_READ(VLV_PCBR);
5427 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5428 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5429 rc6_mode = GEN7_RC_CTL_TO_MODE;
5431 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5433 /* 4 Program defaults and thresholds for RPS*/
5434 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5435 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5436 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5437 I915_WRITE(GEN6_RP_UP_EI, 66000);
5438 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5440 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5443 I915_WRITE(GEN6_RP_CONTROL,
5444 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5445 GEN6_RP_MEDIA_IS_GFX |
5447 GEN6_RP_UP_BUSY_AVG |
5448 GEN6_RP_DOWN_IDLE_AVG);
5450 /* Setting Fixed Bias */
5451 val = VLV_OVERRIDE_EN |
5453 CHV_BIAS_CPU_50_SOC_50;
5454 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5456 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5458 /* RPS code assumes GPLL is used */
5459 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5461 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5462 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5464 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5465 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5466 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5467 dev_priv->rps.cur_freq);
5469 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5470 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5471 dev_priv->rps.efficient_freq);
5473 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5475 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5478 static void valleyview_enable_rps(struct drm_device *dev)
5480 struct drm_i915_private *dev_priv = dev->dev_private;
5481 struct intel_engine_cs *ring;
5482 u32 gtfifodbg, val, rc6_mode = 0;
5485 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5487 valleyview_check_pctx(dev_priv);
5489 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
5490 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5492 I915_WRITE(GTFIFODBG, gtfifodbg);
5495 /* If VLV, Forcewake all wells, else re-direct to regular path */
5496 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5498 /* Disable RC states. */
5499 I915_WRITE(GEN6_RC_CONTROL, 0);
5501 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5502 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5503 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5504 I915_WRITE(GEN6_RP_UP_EI, 66000);
5505 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5507 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5509 I915_WRITE(GEN6_RP_CONTROL,
5510 GEN6_RP_MEDIA_TURBO |
5511 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5512 GEN6_RP_MEDIA_IS_GFX |
5514 GEN6_RP_UP_BUSY_AVG |
5515 GEN6_RP_DOWN_IDLE_CONT);
5517 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5518 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5519 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5521 for_each_ring(ring, dev_priv, i)
5522 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5524 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5526 /* allows RC6 residency counter to work */
5527 I915_WRITE(VLV_COUNTER_CONTROL,
5528 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5529 VLV_RENDER_RC0_COUNT_EN |
5530 VLV_MEDIA_RC6_COUNT_EN |
5531 VLV_RENDER_RC6_COUNT_EN));
5533 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5534 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5536 intel_print_rc6_info(dev, rc6_mode);
5538 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5540 /* Setting Fixed Bias */
5541 val = VLV_OVERRIDE_EN |
5543 VLV_BIAS_CPU_125_SOC_875;
5544 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5546 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5548 /* RPS code assumes GPLL is used */
5549 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5551 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5552 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5554 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5555 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5556 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5557 dev_priv->rps.cur_freq);
5559 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5560 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5561 dev_priv->rps.efficient_freq);
5563 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5565 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5568 static unsigned long intel_pxfreq(u32 vidfreq)
5571 int div = (vidfreq & 0x3f0000) >> 16;
5572 int post = (vidfreq & 0x3000) >> 12;
5573 int pre = (vidfreq & 0x7);
5578 freq = ((div * 133333) / ((1<<post) * pre));
5583 static const struct cparams {
5589 { 1, 1333, 301, 28664 },
5590 { 1, 1066, 294, 24460 },
5591 { 1, 800, 294, 25192 },
5592 { 0, 1333, 276, 27605 },
5593 { 0, 1066, 276, 27605 },
5594 { 0, 800, 231, 23784 },
5597 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5599 u64 total_count, diff, ret;
5600 u32 count1, count2, count3, m = 0, c = 0;
5601 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5604 assert_spin_locked(&mchdev_lock);
5606 diff1 = now - dev_priv->ips.last_time1;
5608 /* Prevent division-by-zero if we are asking too fast.
5609 * Also, we don't get interesting results if we are polling
5610 * faster than once in 10ms, so just return the saved value
5614 return dev_priv->ips.chipset_power;
5616 count1 = I915_READ(DMIEC);
5617 count2 = I915_READ(DDREC);
5618 count3 = I915_READ(CSIEC);
5620 total_count = count1 + count2 + count3;
5622 /* FIXME: handle per-counter overflow */
5623 if (total_count < dev_priv->ips.last_count1) {
5624 diff = ~0UL - dev_priv->ips.last_count1;
5625 diff += total_count;
5627 diff = total_count - dev_priv->ips.last_count1;
5630 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5631 if (cparams[i].i == dev_priv->ips.c_m &&
5632 cparams[i].t == dev_priv->ips.r_t) {
5639 diff = div_u64(diff, diff1);
5640 ret = ((m * diff) + c);
5641 ret = div_u64(ret, 10);
5643 dev_priv->ips.last_count1 = total_count;
5644 dev_priv->ips.last_time1 = now;
5646 dev_priv->ips.chipset_power = ret;
5651 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5653 struct drm_device *dev = dev_priv->dev;
5656 if (INTEL_INFO(dev)->gen != 5)
5659 spin_lock_irq(&mchdev_lock);
5661 val = __i915_chipset_val(dev_priv);
5663 spin_unlock_irq(&mchdev_lock);
5668 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5670 unsigned long m, x, b;
5673 tsfs = I915_READ(TSFS);
5675 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5676 x = I915_READ8(TR1);
5678 b = tsfs & TSFS_INTR_MASK;
5680 return ((m * x) / 127) - b;
5683 static int _pxvid_to_vd(u8 pxvid)
5688 if (pxvid >= 8 && pxvid < 31)
5691 return (pxvid + 2) * 125;
5694 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5696 struct drm_device *dev = dev_priv->dev;
5697 const int vd = _pxvid_to_vd(pxvid);
5698 const int vm = vd - 1125;
5700 if (INTEL_INFO(dev)->is_mobile)
5701 return vm > 0 ? vm : 0;
5706 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5708 u64 now, diff, diffms;
5711 assert_spin_locked(&mchdev_lock);
5713 now = ktime_get_raw_ns();
5714 diffms = now - dev_priv->ips.last_time2;
5715 do_div(diffms, NSEC_PER_MSEC);
5717 /* Don't divide by 0 */
5721 count = I915_READ(GFXEC);
5723 if (count < dev_priv->ips.last_count2) {
5724 diff = ~0UL - dev_priv->ips.last_count2;
5727 diff = count - dev_priv->ips.last_count2;
5730 dev_priv->ips.last_count2 = count;
5731 dev_priv->ips.last_time2 = now;
5733 /* More magic constants... */
5735 diff = div_u64(diff, diffms * 10);
5736 dev_priv->ips.gfx_power = diff;
5739 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5741 struct drm_device *dev = dev_priv->dev;
5743 if (INTEL_INFO(dev)->gen != 5)
5746 spin_lock_irq(&mchdev_lock);
5748 __i915_update_gfx_val(dev_priv);
5750 spin_unlock_irq(&mchdev_lock);
5753 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5755 unsigned long t, corr, state1, corr2, state2;
5758 assert_spin_locked(&mchdev_lock);
5760 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
5761 pxvid = (pxvid >> 24) & 0x7f;
5762 ext_v = pvid_to_extvid(dev_priv, pxvid);
5766 t = i915_mch_val(dev_priv);
5768 /* Revel in the empirically derived constants */
5770 /* Correction factor in 1/100000 units */
5772 corr = ((t * 2349) + 135940);
5774 corr = ((t * 964) + 29317);
5776 corr = ((t * 301) + 1004);
5778 corr = corr * ((150142 * state1) / 10000 - 78642);
5780 corr2 = (corr * dev_priv->ips.corr);
5782 state2 = (corr2 * state1) / 10000;
5783 state2 /= 100; /* convert to mW */
5785 __i915_update_gfx_val(dev_priv);
5787 return dev_priv->ips.gfx_power + state2;
5790 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5792 struct drm_device *dev = dev_priv->dev;
5795 if (INTEL_INFO(dev)->gen != 5)
5798 spin_lock_irq(&mchdev_lock);
5800 val = __i915_gfx_val(dev_priv);
5802 spin_unlock_irq(&mchdev_lock);
5808 * i915_read_mch_val - return value for IPS use
5810 * Calculate and return a value for the IPS driver to use when deciding whether
5811 * we have thermal and power headroom to increase CPU or GPU power budget.
5813 unsigned long i915_read_mch_val(void)
5815 struct drm_i915_private *dev_priv;
5816 unsigned long chipset_val, graphics_val, ret = 0;
5818 spin_lock_irq(&mchdev_lock);
5821 dev_priv = i915_mch_dev;
5823 chipset_val = __i915_chipset_val(dev_priv);
5824 graphics_val = __i915_gfx_val(dev_priv);
5826 ret = chipset_val + graphics_val;
5829 spin_unlock_irq(&mchdev_lock);
5833 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5836 * i915_gpu_raise - raise GPU frequency limit
5838 * Raise the limit; IPS indicates we have thermal headroom.
5840 bool i915_gpu_raise(void)
5842 struct drm_i915_private *dev_priv;
5845 spin_lock_irq(&mchdev_lock);
5846 if (!i915_mch_dev) {
5850 dev_priv = i915_mch_dev;
5852 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5853 dev_priv->ips.max_delay--;
5856 spin_unlock_irq(&mchdev_lock);
5860 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5863 * i915_gpu_lower - lower GPU frequency limit
5865 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5866 * frequency maximum.
5868 bool i915_gpu_lower(void)
5870 struct drm_i915_private *dev_priv;
5873 spin_lock_irq(&mchdev_lock);
5874 if (!i915_mch_dev) {
5878 dev_priv = i915_mch_dev;
5880 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5881 dev_priv->ips.max_delay++;
5884 spin_unlock_irq(&mchdev_lock);
5888 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5891 * i915_gpu_busy - indicate GPU business to IPS
5893 * Tell the IPS driver whether or not the GPU is busy.
5895 bool i915_gpu_busy(void)
5897 struct drm_i915_private *dev_priv;
5898 struct intel_engine_cs *ring;
5902 spin_lock_irq(&mchdev_lock);
5905 dev_priv = i915_mch_dev;
5907 for_each_ring(ring, dev_priv, i)
5908 ret |= !list_empty(&ring->request_list);
5911 spin_unlock_irq(&mchdev_lock);
5915 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5918 * i915_gpu_turbo_disable - disable graphics turbo
5920 * Disable graphics turbo by resetting the max frequency and setting the
5921 * current frequency to the default.
5923 bool i915_gpu_turbo_disable(void)
5925 struct drm_i915_private *dev_priv;
5928 spin_lock_irq(&mchdev_lock);
5929 if (!i915_mch_dev) {
5933 dev_priv = i915_mch_dev;
5935 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5937 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5941 spin_unlock_irq(&mchdev_lock);
5945 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5948 * Tells the intel_ips driver that the i915 driver is now loaded, if
5949 * IPS got loaded first.
5951 * This awkward dance is so that neither module has to depend on the
5952 * other in order for IPS to do the appropriate communication of
5953 * GPU turbo limits to i915.
5956 ips_ping_for_i915_load(void)
5960 link = symbol_get(ips_link_to_i915_driver);
5963 symbol_put(ips_link_to_i915_driver);
5967 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5969 /* We only register the i915 ips part with intel-ips once everything is
5970 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5971 spin_lock_irq(&mchdev_lock);
5972 i915_mch_dev = dev_priv;
5973 spin_unlock_irq(&mchdev_lock);
5975 ips_ping_for_i915_load();
5978 void intel_gpu_ips_teardown(void)
5980 spin_lock_irq(&mchdev_lock);
5981 i915_mch_dev = NULL;
5982 spin_unlock_irq(&mchdev_lock);
5985 static void intel_init_emon(struct drm_device *dev)
5987 struct drm_i915_private *dev_priv = dev->dev_private;
5992 /* Disable to program */
5996 /* Program energy weights for various events */
5997 I915_WRITE(SDEW, 0x15040d00);
5998 I915_WRITE(CSIEW0, 0x007f0000);
5999 I915_WRITE(CSIEW1, 0x1e220004);
6000 I915_WRITE(CSIEW2, 0x04000004);
6002 for (i = 0; i < 5; i++)
6003 I915_WRITE(PEW(i), 0);
6004 for (i = 0; i < 3; i++)
6005 I915_WRITE(DEW(i), 0);
6007 /* Program P-state weights to account for frequency power adjustment */
6008 for (i = 0; i < 16; i++) {
6009 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6010 unsigned long freq = intel_pxfreq(pxvidfreq);
6011 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6016 val *= (freq / 1000);
6018 val /= (127*127*900);
6020 DRM_ERROR("bad pxval: %ld\n", val);
6023 /* Render standby states get 0 weight */
6027 for (i = 0; i < 4; i++) {
6028 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6029 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6030 I915_WRITE(PXW(i), val);
6033 /* Adjust magic regs to magic values (more experimental results) */
6034 I915_WRITE(OGW0, 0);
6035 I915_WRITE(OGW1, 0);
6036 I915_WRITE(EG0, 0x00007f00);
6037 I915_WRITE(EG1, 0x0000000e);
6038 I915_WRITE(EG2, 0x000e0000);
6039 I915_WRITE(EG3, 0x68000300);
6040 I915_WRITE(EG4, 0x42000000);
6041 I915_WRITE(EG5, 0x00140031);
6045 for (i = 0; i < 8; i++)
6046 I915_WRITE(PXWL(i), 0);
6048 /* Enable PMON + select events */
6049 I915_WRITE(ECR, 0x80000019);
6051 lcfuse = I915_READ(LCFUSE02);
6053 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6056 void intel_init_gt_powersave(struct drm_device *dev)
6058 struct drm_i915_private *dev_priv = dev->dev_private;
6060 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6062 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6065 if (!i915.enable_rc6) {
6066 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6067 intel_runtime_pm_get(dev_priv);
6070 if (IS_CHERRYVIEW(dev))
6071 cherryview_init_gt_powersave(dev);
6072 else if (IS_VALLEYVIEW(dev))
6073 valleyview_init_gt_powersave(dev);
6076 void intel_cleanup_gt_powersave(struct drm_device *dev)
6078 struct drm_i915_private *dev_priv = dev->dev_private;
6080 if (IS_CHERRYVIEW(dev))
6082 else if (IS_VALLEYVIEW(dev))
6083 valleyview_cleanup_gt_powersave(dev);
6085 if (!i915.enable_rc6)
6086 intel_runtime_pm_put(dev_priv);
6089 static void gen6_suspend_rps(struct drm_device *dev)
6091 struct drm_i915_private *dev_priv = dev->dev_private;
6093 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6095 gen6_disable_rps_interrupts(dev);
6099 * intel_suspend_gt_powersave - suspend PM work and helper threads
6102 * We don't want to disable RC6 or other features here, we just want
6103 * to make sure any work we've queued has finished and won't bother
6104 * us while we're suspended.
6106 void intel_suspend_gt_powersave(struct drm_device *dev)
6108 struct drm_i915_private *dev_priv = dev->dev_private;
6110 if (INTEL_INFO(dev)->gen < 6)
6113 gen6_suspend_rps(dev);
6115 /* Force GPU to min freq during suspend */
6116 gen6_rps_idle(dev_priv);
6119 void intel_disable_gt_powersave(struct drm_device *dev)
6121 struct drm_i915_private *dev_priv = dev->dev_private;
6123 if (IS_IRONLAKE_M(dev)) {
6124 ironlake_disable_drps(dev);
6125 } else if (INTEL_INFO(dev)->gen >= 6) {
6126 intel_suspend_gt_powersave(dev);
6128 mutex_lock(&dev_priv->rps.hw_lock);
6129 if (INTEL_INFO(dev)->gen >= 9)
6130 gen9_disable_rps(dev);
6131 else if (IS_CHERRYVIEW(dev))
6132 cherryview_disable_rps(dev);
6133 else if (IS_VALLEYVIEW(dev))
6134 valleyview_disable_rps(dev);
6136 gen6_disable_rps(dev);
6138 dev_priv->rps.enabled = false;
6139 mutex_unlock(&dev_priv->rps.hw_lock);
6143 static void intel_gen6_powersave_work(struct work_struct *work)
6145 struct drm_i915_private *dev_priv =
6146 container_of(work, struct drm_i915_private,
6147 rps.delayed_resume_work.work);
6148 struct drm_device *dev = dev_priv->dev;
6150 mutex_lock(&dev_priv->rps.hw_lock);
6152 gen6_reset_rps_interrupts(dev);
6154 if (IS_CHERRYVIEW(dev)) {
6155 cherryview_enable_rps(dev);
6156 } else if (IS_VALLEYVIEW(dev)) {
6157 valleyview_enable_rps(dev);
6158 } else if (INTEL_INFO(dev)->gen >= 9) {
6159 gen9_enable_rc6(dev);
6160 gen9_enable_rps(dev);
6161 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
6162 __gen6_update_ring_freq(dev);
6163 } else if (IS_BROADWELL(dev)) {
6164 gen8_enable_rps(dev);
6165 __gen6_update_ring_freq(dev);
6167 gen6_enable_rps(dev);
6168 __gen6_update_ring_freq(dev);
6171 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6172 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6174 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6175 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6177 dev_priv->rps.enabled = true;
6179 gen6_enable_rps_interrupts(dev);
6181 mutex_unlock(&dev_priv->rps.hw_lock);
6183 intel_runtime_pm_put(dev_priv);
6186 void intel_enable_gt_powersave(struct drm_device *dev)
6188 struct drm_i915_private *dev_priv = dev->dev_private;
6190 /* Powersaving is controlled by the host when inside a VM */
6191 if (intel_vgpu_active(dev))
6194 if (IS_IRONLAKE_M(dev)) {
6195 mutex_lock(&dev->struct_mutex);
6196 ironlake_enable_drps(dev);
6197 intel_init_emon(dev);
6198 mutex_unlock(&dev->struct_mutex);
6199 } else if (INTEL_INFO(dev)->gen >= 6) {
6201 * PCU communication is slow and this doesn't need to be
6202 * done at any specific time, so do this out of our fast path
6203 * to make resume and init faster.
6205 * We depend on the HW RC6 power context save/restore
6206 * mechanism when entering D3 through runtime PM suspend. So
6207 * disable RPM until RPS/RC6 is properly setup. We can only
6208 * get here via the driver load/system resume/runtime resume
6209 * paths, so the _noresume version is enough (and in case of
6210 * runtime resume it's necessary).
6212 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6213 round_jiffies_up_relative(HZ)))
6214 intel_runtime_pm_get_noresume(dev_priv);
6218 void intel_reset_gt_powersave(struct drm_device *dev)
6220 struct drm_i915_private *dev_priv = dev->dev_private;
6222 if (INTEL_INFO(dev)->gen < 6)
6225 gen6_suspend_rps(dev);
6226 dev_priv->rps.enabled = false;
6229 static void ibx_init_clock_gating(struct drm_device *dev)
6231 struct drm_i915_private *dev_priv = dev->dev_private;
6234 * On Ibex Peak and Cougar Point, we need to disable clock
6235 * gating for the panel power sequencer or it will fail to
6236 * start up when no ports are active.
6238 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6241 static void g4x_disable_trickle_feed(struct drm_device *dev)
6243 struct drm_i915_private *dev_priv = dev->dev_private;
6246 for_each_pipe(dev_priv, pipe) {
6247 I915_WRITE(DSPCNTR(pipe),
6248 I915_READ(DSPCNTR(pipe)) |
6249 DISPPLANE_TRICKLE_FEED_DISABLE);
6251 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6252 POSTING_READ(DSPSURF(pipe));
6256 static void ilk_init_lp_watermarks(struct drm_device *dev)
6258 struct drm_i915_private *dev_priv = dev->dev_private;
6260 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6261 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6262 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6265 * Don't touch WM1S_LP_EN here.
6266 * Doing so could cause underruns.
6270 static void ironlake_init_clock_gating(struct drm_device *dev)
6272 struct drm_i915_private *dev_priv = dev->dev_private;
6273 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6277 * WaFbcDisableDpfcClockGating:ilk
6279 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6280 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6281 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6283 I915_WRITE(PCH_3DCGDIS0,
6284 MARIUNIT_CLOCK_GATE_DISABLE |
6285 SVSMUNIT_CLOCK_GATE_DISABLE);
6286 I915_WRITE(PCH_3DCGDIS1,
6287 VFMUNIT_CLOCK_GATE_DISABLE);
6290 * According to the spec the following bits should be set in
6291 * order to enable memory self-refresh
6292 * The bit 22/21 of 0x42004
6293 * The bit 5 of 0x42020
6294 * The bit 15 of 0x45000
6296 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6297 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6298 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6299 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6300 I915_WRITE(DISP_ARB_CTL,
6301 (I915_READ(DISP_ARB_CTL) |
6304 ilk_init_lp_watermarks(dev);
6307 * Based on the document from hardware guys the following bits
6308 * should be set unconditionally in order to enable FBC.
6309 * The bit 22 of 0x42000
6310 * The bit 22 of 0x42004
6311 * The bit 7,8,9 of 0x42020.
6313 if (IS_IRONLAKE_M(dev)) {
6314 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6315 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6316 I915_READ(ILK_DISPLAY_CHICKEN1) |
6318 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6319 I915_READ(ILK_DISPLAY_CHICKEN2) |
6323 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6325 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6326 I915_READ(ILK_DISPLAY_CHICKEN2) |
6327 ILK_ELPIN_409_SELECT);
6328 I915_WRITE(_3D_CHICKEN2,
6329 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6330 _3D_CHICKEN2_WM_READ_PIPELINED);
6332 /* WaDisableRenderCachePipelinedFlush:ilk */
6333 I915_WRITE(CACHE_MODE_0,
6334 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6336 /* WaDisable_RenderCache_OperationalFlush:ilk */
6337 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6339 g4x_disable_trickle_feed(dev);
6341 ibx_init_clock_gating(dev);
6344 static void cpt_init_clock_gating(struct drm_device *dev)
6346 struct drm_i915_private *dev_priv = dev->dev_private;
6351 * On Ibex Peak and Cougar Point, we need to disable clock
6352 * gating for the panel power sequencer or it will fail to
6353 * start up when no ports are active.
6355 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6356 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6357 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6358 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6359 DPLS_EDP_PPS_FIX_DIS);
6360 /* The below fixes the weird display corruption, a few pixels shifted
6361 * downward, on (only) LVDS of some HP laptops with IVY.
6363 for_each_pipe(dev_priv, pipe) {
6364 val = I915_READ(TRANS_CHICKEN2(pipe));
6365 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6366 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6367 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6368 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6369 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6370 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6371 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6372 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6374 /* WADP0ClockGatingDisable */
6375 for_each_pipe(dev_priv, pipe) {
6376 I915_WRITE(TRANS_CHICKEN1(pipe),
6377 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6381 static void gen6_check_mch_setup(struct drm_device *dev)
6383 struct drm_i915_private *dev_priv = dev->dev_private;
6386 tmp = I915_READ(MCH_SSKPD);
6387 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6388 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6392 static void gen6_init_clock_gating(struct drm_device *dev)
6394 struct drm_i915_private *dev_priv = dev->dev_private;
6395 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6397 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6399 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6400 I915_READ(ILK_DISPLAY_CHICKEN2) |
6401 ILK_ELPIN_409_SELECT);
6403 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6404 I915_WRITE(_3D_CHICKEN,
6405 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6407 /* WaDisable_RenderCache_OperationalFlush:snb */
6408 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6411 * BSpec recoomends 8x4 when MSAA is used,
6412 * however in practice 16x4 seems fastest.
6414 * Note that PS/WM thread counts depend on the WIZ hashing
6415 * disable bit, which we don't touch here, but it's good
6416 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6418 I915_WRITE(GEN6_GT_MODE,
6419 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6421 ilk_init_lp_watermarks(dev);
6423 I915_WRITE(CACHE_MODE_0,
6424 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6426 I915_WRITE(GEN6_UCGCTL1,
6427 I915_READ(GEN6_UCGCTL1) |
6428 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6429 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6431 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6432 * gating disable must be set. Failure to set it results in
6433 * flickering pixels due to Z write ordering failures after
6434 * some amount of runtime in the Mesa "fire" demo, and Unigine
6435 * Sanctuary and Tropics, and apparently anything else with
6436 * alpha test or pixel discard.
6438 * According to the spec, bit 11 (RCCUNIT) must also be set,
6439 * but we didn't debug actual testcases to find it out.
6441 * WaDisableRCCUnitClockGating:snb
6442 * WaDisableRCPBUnitClockGating:snb
6444 I915_WRITE(GEN6_UCGCTL2,
6445 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6446 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6448 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6449 I915_WRITE(_3D_CHICKEN3,
6450 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6454 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6455 * 3DSTATE_SF number of SF output attributes is more than 16."
6457 I915_WRITE(_3D_CHICKEN3,
6458 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6461 * According to the spec the following bits should be
6462 * set in order to enable memory self-refresh and fbc:
6463 * The bit21 and bit22 of 0x42000
6464 * The bit21 and bit22 of 0x42004
6465 * The bit5 and bit7 of 0x42020
6466 * The bit14 of 0x70180
6467 * The bit14 of 0x71180
6469 * WaFbcAsynchFlipDisableFbcQueue:snb
6471 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6472 I915_READ(ILK_DISPLAY_CHICKEN1) |
6473 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6474 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6475 I915_READ(ILK_DISPLAY_CHICKEN2) |
6476 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6477 I915_WRITE(ILK_DSPCLK_GATE_D,
6478 I915_READ(ILK_DSPCLK_GATE_D) |
6479 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6480 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6482 g4x_disable_trickle_feed(dev);
6484 cpt_init_clock_gating(dev);
6486 gen6_check_mch_setup(dev);
6489 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6491 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6494 * WaVSThreadDispatchOverride:ivb,vlv
6496 * This actually overrides the dispatch
6497 * mode for all thread types.
6499 reg &= ~GEN7_FF_SCHED_MASK;
6500 reg |= GEN7_FF_TS_SCHED_HW;
6501 reg |= GEN7_FF_VS_SCHED_HW;
6502 reg |= GEN7_FF_DS_SCHED_HW;
6504 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6507 static void lpt_init_clock_gating(struct drm_device *dev)
6509 struct drm_i915_private *dev_priv = dev->dev_private;
6512 * TODO: this bit should only be enabled when really needed, then
6513 * disabled when not needed anymore in order to save power.
6515 if (HAS_PCH_LPT_LP(dev))
6516 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6517 I915_READ(SOUTH_DSPCLK_GATE_D) |
6518 PCH_LP_PARTITION_LEVEL_DISABLE);
6520 /* WADPOClockGatingDisable:hsw */
6521 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6522 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6523 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6526 static void lpt_suspend_hw(struct drm_device *dev)
6528 struct drm_i915_private *dev_priv = dev->dev_private;
6530 if (HAS_PCH_LPT_LP(dev)) {
6531 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6533 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6534 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6538 static void broadwell_init_clock_gating(struct drm_device *dev)
6540 struct drm_i915_private *dev_priv = dev->dev_private;
6544 ilk_init_lp_watermarks(dev);
6546 /* WaSwitchSolVfFArbitrationPriority:bdw */
6547 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6549 /* WaPsrDPAMaskVBlankInSRD:bdw */
6550 I915_WRITE(CHICKEN_PAR1_1,
6551 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6553 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6554 for_each_pipe(dev_priv, pipe) {
6555 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6556 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6557 BDW_DPRS_MASK_VBLANK_SRD);
6560 /* WaVSRefCountFullforceMissDisable:bdw */
6561 /* WaDSRefCountFullforceMissDisable:bdw */
6562 I915_WRITE(GEN7_FF_THREAD_MODE,
6563 I915_READ(GEN7_FF_THREAD_MODE) &
6564 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6566 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6567 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6569 /* WaDisableSDEUnitClockGating:bdw */
6570 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6571 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6574 * WaProgramL3SqcReg1Default:bdw
6575 * WaTempDisableDOPClkGating:bdw
6577 misccpctl = I915_READ(GEN7_MISCCPCTL);
6578 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6579 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6580 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6583 * WaGttCachingOffByDefault:bdw
6584 * GTT cache may not work with big pages, so if those
6585 * are ever enabled GTT cache may need to be disabled.
6587 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6589 lpt_init_clock_gating(dev);
6592 static void haswell_init_clock_gating(struct drm_device *dev)
6594 struct drm_i915_private *dev_priv = dev->dev_private;
6596 ilk_init_lp_watermarks(dev);
6598 /* L3 caching of data atomics doesn't work -- disable it. */
6599 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6600 I915_WRITE(HSW_ROW_CHICKEN3,
6601 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6603 /* This is required by WaCatErrorRejectionIssue:hsw */
6604 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6605 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6606 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6608 /* WaVSRefCountFullforceMissDisable:hsw */
6609 I915_WRITE(GEN7_FF_THREAD_MODE,
6610 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6612 /* WaDisable_RenderCache_OperationalFlush:hsw */
6613 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6615 /* enable HiZ Raw Stall Optimization */
6616 I915_WRITE(CACHE_MODE_0_GEN7,
6617 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6619 /* WaDisable4x2SubspanOptimization:hsw */
6620 I915_WRITE(CACHE_MODE_1,
6621 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6624 * BSpec recommends 8x4 when MSAA is used,
6625 * however in practice 16x4 seems fastest.
6627 * Note that PS/WM thread counts depend on the WIZ hashing
6628 * disable bit, which we don't touch here, but it's good
6629 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6631 I915_WRITE(GEN7_GT_MODE,
6632 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6634 /* WaSampleCChickenBitEnable:hsw */
6635 I915_WRITE(HALF_SLICE_CHICKEN3,
6636 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6638 /* WaSwitchSolVfFArbitrationPriority:hsw */
6639 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6641 /* WaRsPkgCStateDisplayPMReq:hsw */
6642 I915_WRITE(CHICKEN_PAR1_1,
6643 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6645 lpt_init_clock_gating(dev);
6648 static void ivybridge_init_clock_gating(struct drm_device *dev)
6650 struct drm_i915_private *dev_priv = dev->dev_private;
6653 ilk_init_lp_watermarks(dev);
6655 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6657 /* WaDisableEarlyCull:ivb */
6658 I915_WRITE(_3D_CHICKEN3,
6659 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6661 /* WaDisableBackToBackFlipFix:ivb */
6662 I915_WRITE(IVB_CHICKEN3,
6663 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6664 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6666 /* WaDisablePSDDualDispatchEnable:ivb */
6667 if (IS_IVB_GT1(dev))
6668 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6669 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6671 /* WaDisable_RenderCache_OperationalFlush:ivb */
6672 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6674 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6675 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6676 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6678 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6679 I915_WRITE(GEN7_L3CNTLREG1,
6680 GEN7_WA_FOR_GEN7_L3_CONTROL);
6681 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6682 GEN7_WA_L3_CHICKEN_MODE);
6683 if (IS_IVB_GT1(dev))
6684 I915_WRITE(GEN7_ROW_CHICKEN2,
6685 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6687 /* must write both registers */
6688 I915_WRITE(GEN7_ROW_CHICKEN2,
6689 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6690 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6691 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6694 /* WaForceL3Serialization:ivb */
6695 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6696 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6699 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6700 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6702 I915_WRITE(GEN6_UCGCTL2,
6703 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6705 /* This is required by WaCatErrorRejectionIssue:ivb */
6706 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6707 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6708 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6710 g4x_disable_trickle_feed(dev);
6712 gen7_setup_fixed_func_scheduler(dev_priv);
6714 if (0) { /* causes HiZ corruption on ivb:gt1 */
6715 /* enable HiZ Raw Stall Optimization */
6716 I915_WRITE(CACHE_MODE_0_GEN7,
6717 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6720 /* WaDisable4x2SubspanOptimization:ivb */
6721 I915_WRITE(CACHE_MODE_1,
6722 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6725 * BSpec recommends 8x4 when MSAA is used,
6726 * however in practice 16x4 seems fastest.
6728 * Note that PS/WM thread counts depend on the WIZ hashing
6729 * disable bit, which we don't touch here, but it's good
6730 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6732 I915_WRITE(GEN7_GT_MODE,
6733 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6735 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6736 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6737 snpcr |= GEN6_MBC_SNPCR_MED;
6738 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6740 if (!HAS_PCH_NOP(dev))
6741 cpt_init_clock_gating(dev);
6743 gen6_check_mch_setup(dev);
6746 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6748 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6751 * Disable trickle feed and enable pnd deadline calculation
6753 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6754 I915_WRITE(CBR1_VLV, 0);
6757 static void valleyview_init_clock_gating(struct drm_device *dev)
6759 struct drm_i915_private *dev_priv = dev->dev_private;
6761 vlv_init_display_clock_gating(dev_priv);
6763 /* WaDisableEarlyCull:vlv */
6764 I915_WRITE(_3D_CHICKEN3,
6765 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6767 /* WaDisableBackToBackFlipFix:vlv */
6768 I915_WRITE(IVB_CHICKEN3,
6769 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6770 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6772 /* WaPsdDispatchEnable:vlv */
6773 /* WaDisablePSDDualDispatchEnable:vlv */
6774 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6775 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6776 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6778 /* WaDisable_RenderCache_OperationalFlush:vlv */
6779 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6781 /* WaForceL3Serialization:vlv */
6782 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6783 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6785 /* WaDisableDopClockGating:vlv */
6786 I915_WRITE(GEN7_ROW_CHICKEN2,
6787 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6789 /* This is required by WaCatErrorRejectionIssue:vlv */
6790 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6791 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6792 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6794 gen7_setup_fixed_func_scheduler(dev_priv);
6797 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6798 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6800 I915_WRITE(GEN6_UCGCTL2,
6801 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6803 /* WaDisableL3Bank2xClockGate:vlv
6804 * Disabling L3 clock gating- MMIO 940c[25] = 1
6805 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6806 I915_WRITE(GEN7_UCGCTL4,
6807 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6810 * BSpec says this must be set, even though
6811 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6813 I915_WRITE(CACHE_MODE_1,
6814 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6817 * BSpec recommends 8x4 when MSAA is used,
6818 * however in practice 16x4 seems fastest.
6820 * Note that PS/WM thread counts depend on the WIZ hashing
6821 * disable bit, which we don't touch here, but it's good
6822 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6824 I915_WRITE(GEN7_GT_MODE,
6825 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6828 * WaIncreaseL3CreditsForVLVB0:vlv
6829 * This is the hardware default actually.
6831 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6834 * WaDisableVLVClockGating_VBIIssue:vlv
6835 * Disable clock gating on th GCFG unit to prevent a delay
6836 * in the reporting of vblank events.
6838 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6841 static void cherryview_init_clock_gating(struct drm_device *dev)
6843 struct drm_i915_private *dev_priv = dev->dev_private;
6845 vlv_init_display_clock_gating(dev_priv);
6847 /* WaVSRefCountFullforceMissDisable:chv */
6848 /* WaDSRefCountFullforceMissDisable:chv */
6849 I915_WRITE(GEN7_FF_THREAD_MODE,
6850 I915_READ(GEN7_FF_THREAD_MODE) &
6851 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6853 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6854 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6855 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6857 /* WaDisableCSUnitClockGating:chv */
6858 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6859 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6861 /* WaDisableSDEUnitClockGating:chv */
6862 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6863 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6866 * GTT cache may not work with big pages, so if those
6867 * are ever enabled GTT cache may need to be disabled.
6869 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6872 static void g4x_init_clock_gating(struct drm_device *dev)
6874 struct drm_i915_private *dev_priv = dev->dev_private;
6875 uint32_t dspclk_gate;
6877 I915_WRITE(RENCLK_GATE_D1, 0);
6878 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6879 GS_UNIT_CLOCK_GATE_DISABLE |
6880 CL_UNIT_CLOCK_GATE_DISABLE);
6881 I915_WRITE(RAMCLK_GATE_D, 0);
6882 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6883 OVRUNIT_CLOCK_GATE_DISABLE |
6884 OVCUNIT_CLOCK_GATE_DISABLE;
6886 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6887 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6889 /* WaDisableRenderCachePipelinedFlush */
6890 I915_WRITE(CACHE_MODE_0,
6891 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6893 /* WaDisable_RenderCache_OperationalFlush:g4x */
6894 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6896 g4x_disable_trickle_feed(dev);
6899 static void crestline_init_clock_gating(struct drm_device *dev)
6901 struct drm_i915_private *dev_priv = dev->dev_private;
6903 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6904 I915_WRITE(RENCLK_GATE_D2, 0);
6905 I915_WRITE(DSPCLK_GATE_D, 0);
6906 I915_WRITE(RAMCLK_GATE_D, 0);
6907 I915_WRITE16(DEUC, 0);
6908 I915_WRITE(MI_ARB_STATE,
6909 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6911 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6912 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6915 static void broadwater_init_clock_gating(struct drm_device *dev)
6917 struct drm_i915_private *dev_priv = dev->dev_private;
6919 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6920 I965_RCC_CLOCK_GATE_DISABLE |
6921 I965_RCPB_CLOCK_GATE_DISABLE |
6922 I965_ISC_CLOCK_GATE_DISABLE |
6923 I965_FBC_CLOCK_GATE_DISABLE);
6924 I915_WRITE(RENCLK_GATE_D2, 0);
6925 I915_WRITE(MI_ARB_STATE,
6926 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6928 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6929 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6932 static void gen3_init_clock_gating(struct drm_device *dev)
6934 struct drm_i915_private *dev_priv = dev->dev_private;
6935 u32 dstate = I915_READ(D_STATE);
6937 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6938 DSTATE_DOT_CLOCK_GATING;
6939 I915_WRITE(D_STATE, dstate);
6941 if (IS_PINEVIEW(dev))
6942 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6944 /* IIR "flip pending" means done if this bit is set */
6945 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6947 /* interrupts should cause a wake up from C3 */
6948 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6950 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6951 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6953 I915_WRITE(MI_ARB_STATE,
6954 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6957 static void i85x_init_clock_gating(struct drm_device *dev)
6959 struct drm_i915_private *dev_priv = dev->dev_private;
6961 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6963 /* interrupts should cause a wake up from C3 */
6964 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6965 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6967 I915_WRITE(MEM_MODE,
6968 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6971 static void i830_init_clock_gating(struct drm_device *dev)
6973 struct drm_i915_private *dev_priv = dev->dev_private;
6975 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6977 I915_WRITE(MEM_MODE,
6978 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6979 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6982 void intel_init_clock_gating(struct drm_device *dev)
6984 struct drm_i915_private *dev_priv = dev->dev_private;
6986 if (dev_priv->display.init_clock_gating)
6987 dev_priv->display.init_clock_gating(dev);
6990 void intel_suspend_hw(struct drm_device *dev)
6992 if (HAS_PCH_LPT(dev))
6993 lpt_suspend_hw(dev);
6996 /* Set up chip specific power management-related functions */
6997 void intel_init_pm(struct drm_device *dev)
6999 struct drm_i915_private *dev_priv = dev->dev_private;
7001 intel_fbc_init(dev_priv);
7004 if (IS_PINEVIEW(dev))
7005 i915_pineview_get_mem_freq(dev);
7006 else if (IS_GEN5(dev))
7007 i915_ironlake_get_mem_freq(dev);
7009 /* For FIFO watermark updates */
7010 if (INTEL_INFO(dev)->gen >= 9) {
7011 skl_setup_wm_latency(dev);
7013 if (IS_BROXTON(dev))
7014 dev_priv->display.init_clock_gating =
7015 bxt_init_clock_gating;
7016 dev_priv->display.update_wm = skl_update_wm;
7017 } else if (HAS_PCH_SPLIT(dev)) {
7018 ilk_setup_wm_latency(dev);
7020 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7021 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7022 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7023 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7024 dev_priv->display.update_wm = ilk_update_wm;
7025 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7026 dev_priv->display.program_watermarks = ilk_program_watermarks;
7028 DRM_DEBUG_KMS("Failed to read display plane latency. "
7033 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7034 else if (IS_GEN6(dev))
7035 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7036 else if (IS_IVYBRIDGE(dev))
7037 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7038 else if (IS_HASWELL(dev))
7039 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7040 else if (INTEL_INFO(dev)->gen == 8)
7041 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7042 } else if (IS_CHERRYVIEW(dev)) {
7043 vlv_setup_wm_latency(dev);
7045 dev_priv->display.update_wm = vlv_update_wm;
7046 dev_priv->display.init_clock_gating =
7047 cherryview_init_clock_gating;
7048 } else if (IS_VALLEYVIEW(dev)) {
7049 vlv_setup_wm_latency(dev);
7051 dev_priv->display.update_wm = vlv_update_wm;
7052 dev_priv->display.init_clock_gating =
7053 valleyview_init_clock_gating;
7054 } else if (IS_PINEVIEW(dev)) {
7055 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7058 dev_priv->mem_freq)) {
7059 DRM_INFO("failed to find known CxSR latency "
7060 "(found ddr%s fsb freq %d, mem freq %d), "
7062 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7063 dev_priv->fsb_freq, dev_priv->mem_freq);
7064 /* Disable CxSR and never update its watermark again */
7065 intel_set_memory_cxsr(dev_priv, false);
7066 dev_priv->display.update_wm = NULL;
7068 dev_priv->display.update_wm = pineview_update_wm;
7069 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7070 } else if (IS_G4X(dev)) {
7071 dev_priv->display.update_wm = g4x_update_wm;
7072 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7073 } else if (IS_GEN4(dev)) {
7074 dev_priv->display.update_wm = i965_update_wm;
7075 if (IS_CRESTLINE(dev))
7076 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7077 else if (IS_BROADWATER(dev))
7078 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7079 } else if (IS_GEN3(dev)) {
7080 dev_priv->display.update_wm = i9xx_update_wm;
7081 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7082 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7083 } else if (IS_GEN2(dev)) {
7084 if (INTEL_INFO(dev)->num_pipes == 1) {
7085 dev_priv->display.update_wm = i845_update_wm;
7086 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7088 dev_priv->display.update_wm = i9xx_update_wm;
7089 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7092 if (IS_I85X(dev) || IS_I865G(dev))
7093 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7095 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7097 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7101 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7103 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7105 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7106 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7110 I915_WRITE(GEN6_PCODE_DATA, *val);
7111 I915_WRITE(GEN6_PCODE_DATA1, 0);
7112 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7114 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7116 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7120 *val = I915_READ(GEN6_PCODE_DATA);
7121 I915_WRITE(GEN6_PCODE_DATA, 0);
7126 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7128 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7130 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7131 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7135 I915_WRITE(GEN6_PCODE_DATA, val);
7136 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7138 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7140 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7144 I915_WRITE(GEN6_PCODE_DATA, 0);
7149 static int vlv_gpu_freq_div(unsigned int czclk_freq)
7151 switch (czclk_freq) {
7166 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7168 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7170 div = vlv_gpu_freq_div(czclk_freq);
7174 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
7177 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7179 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7181 mul = vlv_gpu_freq_div(czclk_freq);
7185 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7188 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7190 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7192 div = vlv_gpu_freq_div(czclk_freq) / 2;
7196 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7199 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7201 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7203 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7207 /* CHV needs even values */
7208 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
7211 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7213 if (IS_GEN9(dev_priv->dev))
7214 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7216 else if (IS_CHERRYVIEW(dev_priv->dev))
7217 return chv_gpu_freq(dev_priv, val);
7218 else if (IS_VALLEYVIEW(dev_priv->dev))
7219 return byt_gpu_freq(dev_priv, val);
7221 return val * GT_FREQUENCY_MULTIPLIER;
7224 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7226 if (IS_GEN9(dev_priv->dev))
7227 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7228 GT_FREQUENCY_MULTIPLIER);
7229 else if (IS_CHERRYVIEW(dev_priv->dev))
7230 return chv_freq_opcode(dev_priv, val);
7231 else if (IS_VALLEYVIEW(dev_priv->dev))
7232 return byt_freq_opcode(dev_priv, val);
7234 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7237 struct request_boost {
7238 struct work_struct work;
7239 struct drm_i915_gem_request *req;
7242 static void __intel_rps_boost_work(struct work_struct *work)
7244 struct request_boost *boost = container_of(work, struct request_boost, work);
7245 struct drm_i915_gem_request *req = boost->req;
7247 if (!i915_gem_request_completed(req, true))
7248 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7249 req->emitted_jiffies);
7251 i915_gem_request_unreference__unlocked(req);
7255 void intel_queue_rps_boost_for_request(struct drm_device *dev,
7256 struct drm_i915_gem_request *req)
7258 struct request_boost *boost;
7260 if (req == NULL || INTEL_INFO(dev)->gen < 6)
7263 if (i915_gem_request_completed(req, true))
7266 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7270 i915_gem_request_reference(req);
7273 INIT_WORK(&boost->work, __intel_rps_boost_work);
7274 queue_work(to_i915(dev)->wq, &boost->work);
7277 void intel_pm_setup(struct drm_device *dev)
7279 struct drm_i915_private *dev_priv = dev->dev_private;
7281 mutex_init(&dev_priv->rps.hw_lock);
7282 spin_lock_init(&dev_priv->rps.client_lock);
7284 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7285 intel_gen6_powersave_work);
7286 INIT_LIST_HEAD(&dev_priv->rps.clients);
7287 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7288 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
7290 dev_priv->pm.suspended = false;
7291 atomic_set(&dev_priv->pm.wakeref_count, 0);
7292 atomic_set(&dev_priv->pm.atomic_seq, 0);