2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp *intel_dp)
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118 return intel_dig_port->base.base.dev;
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
135 return ~((1 << lane_count) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw = DP_LINK_BW_1_62;
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 u8 source_max, sink_max;
162 source_max = intel_dig_port->max_lanes;
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165 return min(source_max, sink_max);
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 * 270000 * 1 * 8 / 10 == 216000
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
186 intel_dp_link_required(int pixel_clock, int bpp)
188 return (pixel_clock * bpp + 9) / 10;
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 return (max_link_clock * max_lanes * 8) / 10;
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
201 struct intel_dp *intel_dp = intel_attached_dp(connector);
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
211 if (mode->vdisplay > fixed_mode->vdisplay)
214 target_clock = fixed_mode->clock;
217 max_link_clock = intel_dp_max_link_rate(intel_dp);
218 max_lanes = intel_dp_max_lane_count(intel_dp);
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
223 if (mode_rate > max_rate)
224 return MODE_CLOCK_HIGH;
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
235 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
257 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
258 struct intel_dp *intel_dp);
260 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
261 struct intel_dp *intel_dp);
263 static void pps_lock(struct intel_dp *intel_dp)
265 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266 struct intel_encoder *encoder = &intel_dig_port->base;
267 struct drm_device *dev = encoder->base.dev;
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 enum intel_display_power_domain power_domain;
272 * See vlv_power_sequencer_reset() why we need
273 * a power domain reference here.
275 power_domain = intel_display_port_aux_power_domain(encoder);
276 intel_display_power_get(dev_priv, power_domain);
278 mutex_lock(&dev_priv->pps_mutex);
281 static void pps_unlock(struct intel_dp *intel_dp)
283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
284 struct intel_encoder *encoder = &intel_dig_port->base;
285 struct drm_device *dev = encoder->base.dev;
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 enum intel_display_power_domain power_domain;
289 mutex_unlock(&dev_priv->pps_mutex);
291 power_domain = intel_display_port_aux_power_domain(encoder);
292 intel_display_power_put(dev_priv, power_domain);
296 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299 struct drm_device *dev = intel_dig_port->base.base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum pipe pipe = intel_dp->pps_pipe;
302 bool pll_enabled, release_cl_override = false;
303 enum dpio_phy phy = DPIO_PHY(pipe);
304 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
307 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
308 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
309 pipe_name(pipe), port_name(intel_dig_port->port)))
312 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
313 pipe_name(pipe), port_name(intel_dig_port->port));
315 /* Preserve the BIOS-computed detected bit. This is
316 * supposed to be read-only.
318 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
319 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
320 DP |= DP_PORT_WIDTH(1);
321 DP |= DP_LINK_TRAIN_PAT_1;
323 if (IS_CHERRYVIEW(dev))
324 DP |= DP_PIPE_SELECT_CHV(pipe);
325 else if (pipe == PIPE_B)
326 DP |= DP_PIPEB_SELECT;
328 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
331 * The DPLL for the pipe must be enabled for this to work.
332 * So enable temporarily it if it's not already enabled.
335 release_cl_override = IS_CHERRYVIEW(dev) &&
336 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
339 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
340 DRM_ERROR("Failed to force on pll for pipe %c!\n",
347 * Similar magic as in intel_dp_enable_port().
348 * We _must_ do this port enable + disable trick
349 * to make this power seqeuencer lock onto the port.
350 * Otherwise even VDD force bit won't work.
352 I915_WRITE(intel_dp->output_reg, DP);
353 POSTING_READ(intel_dp->output_reg);
355 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
356 POSTING_READ(intel_dp->output_reg);
358 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
359 POSTING_READ(intel_dp->output_reg);
362 vlv_force_pll_off(dev, pipe);
364 if (release_cl_override)
365 chv_phy_powergate_ch(dev_priv, phy, ch, false);
370 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
373 struct drm_device *dev = intel_dig_port->base.base.dev;
374 struct drm_i915_private *dev_priv = dev->dev_private;
375 struct intel_encoder *encoder;
376 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
379 lockdep_assert_held(&dev_priv->pps_mutex);
381 /* We should never land here with regular DP ports */
382 WARN_ON(!is_edp(intel_dp));
384 if (intel_dp->pps_pipe != INVALID_PIPE)
385 return intel_dp->pps_pipe;
388 * We don't have power sequencer currently.
389 * Pick one that's not used by other ports.
391 for_each_intel_encoder(dev, encoder) {
392 struct intel_dp *tmp;
394 if (encoder->type != INTEL_OUTPUT_EDP)
397 tmp = enc_to_intel_dp(&encoder->base);
399 if (tmp->pps_pipe != INVALID_PIPE)
400 pipes &= ~(1 << tmp->pps_pipe);
404 * Didn't find one. This should not happen since there
405 * are two power sequencers and up to two eDP ports.
407 if (WARN_ON(pipes == 0))
410 pipe = ffs(pipes) - 1;
412 vlv_steal_power_sequencer(dev, pipe);
413 intel_dp->pps_pipe = pipe;
415 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416 pipe_name(intel_dp->pps_pipe),
417 port_name(intel_dig_port->port));
419 /* init power sequencer on this pipe and port */
420 intel_dp_init_panel_power_sequencer(dev, intel_dp);
421 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424 * Even vdd force doesn't work until we've made
425 * the power sequencer lock in on the port.
427 vlv_power_sequencer_kick(intel_dp);
429 return intel_dp->pps_pipe;
432 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
454 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 vlv_pipe_check pipe_check)
460 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462 PANEL_PORT_SELECT_MASK;
464 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 if (!pipe_check(dev_priv, pipe))
477 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480 struct drm_device *dev = intel_dig_port->base.base.dev;
481 struct drm_i915_private *dev_priv = dev->dev_private;
482 enum port port = intel_dig_port->port;
484 lockdep_assert_held(&dev_priv->pps_mutex);
486 /* try to find a pipe with this port selected */
487 /* first pick one where the panel is on */
488 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 /* didn't find one? pick one where vdd is on */
491 if (intel_dp->pps_pipe == INVALID_PIPE)
492 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 vlv_pipe_has_vdd_on);
494 /* didn't find one? pick one with just the correct port */
495 if (intel_dp->pps_pipe == INVALID_PIPE)
496 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500 if (intel_dp->pps_pipe == INVALID_PIPE) {
501 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507 port_name(port), pipe_name(intel_dp->pps_pipe));
509 intel_dp_init_panel_power_sequencer(dev, intel_dp);
510 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515 struct drm_device *dev = dev_priv->dev;
516 struct intel_encoder *encoder;
518 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
522 * We can't grab pps_mutex here due to deadlock with power_domain
523 * mutex when power_domain functions are called while holding pps_mutex.
524 * That also means that in order to use pps_pipe the code needs to
525 * hold both a power domain reference and pps_mutex, and the power domain
526 * reference get/put must be done while _not_ holding pps_mutex.
527 * pps_{lock,unlock}() do these steps in the correct order, so one
528 * should use them always.
531 for_each_intel_encoder(dev, encoder) {
532 struct intel_dp *intel_dp;
534 if (encoder->type != INTEL_OUTPUT_EDP)
537 intel_dp = enc_to_intel_dp(&encoder->base);
538 intel_dp->pps_pipe = INVALID_PIPE;
543 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 struct drm_device *dev = intel_dp_to_dev(intel_dp);
548 return BXT_PP_CONTROL(0);
549 else if (HAS_PCH_SPLIT(dev))
550 return PCH_PP_CONTROL;
552 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 _pp_stat_reg(struct intel_dp *intel_dp)
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_STATUS;
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
583 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 i915_reg_t pp_ctrl_reg, pp_div_reg;
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
599 pps_unlock(intel_dp);
604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
607 struct drm_i915_private *dev_priv = dev->dev_private;
609 lockdep_assert_held(&dev_priv->pps_mutex);
611 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
621 struct drm_i915_private *dev_priv = dev->dev_private;
623 lockdep_assert_held(&dev_priv->pps_mutex);
625 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
633 intel_dp_check_edp(struct intel_dp *intel_dp)
635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
636 struct drm_i915_private *dev_priv = dev->dev_private;
638 if (!is_edp(intel_dp))
641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662 msecs_to_jiffies_timeout(10));
664 done = wait_for_atomic(C, 10) == 0;
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
682 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
694 if (intel_dig_port->port == PORT_A) {
695 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
698 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
708 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 } else if (HAS_PCH_LPT_H(dev_priv)) {
713 /* Workaround for non-ULT HSW */
720 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726 return index ? 0 : 100;
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 return index ? 0 : 1;
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 uint32_t aux_clock_divider)
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
753 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758 return DP_AUX_CH_CTL_SEND_BUSY |
760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
763 DP_AUX_CH_CTL_RECEIVE_ERROR |
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774 return DP_AUX_CH_CTL_SEND_BUSY |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 const uint8_t *send, int send_bytes,
787 uint8_t *recv, int recv_size)
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t aux_clock_divider;
794 int i, ret, recv_bytes;
797 bool has_aux_irq = HAS_AUX_IRQ(dev);
803 * We will be called with VDD already enabled for dpcd/edid/oui reads.
804 * In such cases we want to leave VDD enabled and it's up to upper layers
805 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 vdd = edp_panel_vdd_on(intel_dp);
810 /* dp aux is extremely sensitive to irq latency, hence request the
811 * lowest possible wakeup latency and so prevent the cpu from going into
814 pm_qos_update_request(&dev_priv->pm_qos, 0);
816 intel_dp_check_edp(intel_dp);
818 /* Try to wait for any previous AUX channel activity */
819 for (try = 0; try < 3; try++) {
820 status = I915_READ_NOTRACE(ch_ctl);
821 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
827 static u32 last_status = -1;
828 const u32 status = I915_READ(ch_ctl);
830 if (status != last_status) {
831 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 last_status = status;
840 /* Only 5 data registers! */
841 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
847 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 /* Must try at least 3 times according to DP spec */
853 for (try = 0; try < 5; try++) {
854 /* Load the send data into the aux channel data registers */
855 for (i = 0; i < send_bytes; i += 4)
856 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
857 intel_dp_pack_aux(send + i,
860 /* Send the command and wait for it to complete */
861 I915_WRITE(ch_ctl, send_ctl);
863 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865 /* Clear done status and any errors */
869 DP_AUX_CH_CTL_TIME_OUT_ERROR |
870 DP_AUX_CH_CTL_RECEIVE_ERROR);
872 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
875 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
876 * 400us delay required for errors and timeouts
877 * Timeout errors from the HW already meet this
878 * requirement so skip to next iteration
880 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
881 usleep_range(400, 500);
884 if (status & DP_AUX_CH_CTL_DONE)
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
896 /* Check for timeout or receive error.
897 * Timeouts occur when the sink is not connected
899 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
900 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
905 /* Timeouts occur when the device isn't connected, so they're
906 * "normal" -- don't fill the kernel log with these */
907 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
908 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
913 /* Unload any bytes sent back from the other side */
914 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
915 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
918 * By BSpec: "Message sizes of 0 or >20 are not allowed."
919 * We have no idea of what happened so we return -EBUSY so
920 * drm layer takes care for the necessary retries.
922 if (recv_bytes == 0 || recv_bytes > 20) {
923 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
926 * FIXME: This patch was created on top of a series that
927 * organize the retries at drm level. There EBUSY should
928 * also take care for 1ms wait before retrying.
929 * That aux retries re-org is still needed and after that is
930 * merged we remove this sleep from here.
932 usleep_range(1000, 1500);
937 if (recv_bytes > recv_size)
938 recv_bytes = recv_size;
940 for (i = 0; i < recv_bytes; i += 4)
941 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
942 recv + i, recv_bytes - i);
946 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
949 edp_panel_vdd_off(intel_dp, false);
951 pps_unlock(intel_dp);
956 #define BARE_ADDRESS_SIZE 3
957 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
959 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
961 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
962 uint8_t txbuf[20], rxbuf[20];
963 size_t txsize, rxsize;
966 txbuf[0] = (msg->request << 4) |
967 ((msg->address >> 16) & 0xf);
968 txbuf[1] = (msg->address >> 8) & 0xff;
969 txbuf[2] = msg->address & 0xff;
970 txbuf[3] = msg->size - 1;
972 switch (msg->request & ~DP_AUX_I2C_MOT) {
973 case DP_AUX_NATIVE_WRITE:
974 case DP_AUX_I2C_WRITE:
975 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
976 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
977 rxsize = 2; /* 0 or 1 data bytes */
979 if (WARN_ON(txsize > 20))
983 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
987 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 msg->reply = rxbuf[0] >> 4;
992 /* Number of bytes written in a short write. */
993 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 /* Return payload size. */
1001 case DP_AUX_NATIVE_READ:
1002 case DP_AUX_I2C_READ:
1003 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1004 rxsize = msg->size + 1;
1006 if (WARN_ON(rxsize > 20))
1009 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 msg->reply = rxbuf[0] >> 4;
1013 * Assume happy day, and copy the data. The caller is
1014 * expected to check msg->reply before touching it.
1016 * Return payload size.
1019 memcpy(msg->buffer, rxbuf + 1, ret);
1031 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 return DP_AUX_CH_CTL(port);
1041 return DP_AUX_CH_CTL(PORT_B);
1045 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1046 enum port port, int index)
1052 return DP_AUX_CH_DATA(port, index);
1055 return DP_AUX_CH_DATA(PORT_B, index);
1059 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1064 return DP_AUX_CH_CTL(port);
1068 return PCH_DP_AUX_CH_CTL(port);
1071 return DP_AUX_CH_CTL(PORT_A);
1075 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1076 enum port port, int index)
1080 return DP_AUX_CH_DATA(port, index);
1084 return PCH_DP_AUX_CH_DATA(port, index);
1087 return DP_AUX_CH_DATA(PORT_A, index);
1092 * On SKL we don't have Aux for port E so we rely
1093 * on VBT to set a proper alternate aux channel.
1095 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097 const struct ddi_vbt_port_info *info =
1098 &dev_priv->vbt.ddi_port_info[PORT_E];
1100 switch (info->alternate_aux_channel) {
1110 MISSING_CASE(info->alternate_aux_channel);
1115 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1119 port = skl_porte_aux_port(dev_priv);
1126 return DP_AUX_CH_CTL(port);
1129 return DP_AUX_CH_CTL(PORT_A);
1133 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1134 enum port port, int index)
1137 port = skl_porte_aux_port(dev_priv);
1144 return DP_AUX_CH_DATA(port, index);
1147 return DP_AUX_CH_DATA(PORT_A, index);
1151 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1154 if (INTEL_INFO(dev_priv)->gen >= 9)
1155 return skl_aux_ctl_reg(dev_priv, port);
1156 else if (HAS_PCH_SPLIT(dev_priv))
1157 return ilk_aux_ctl_reg(dev_priv, port);
1159 return g4x_aux_ctl_reg(dev_priv, port);
1162 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1163 enum port port, int index)
1165 if (INTEL_INFO(dev_priv)->gen >= 9)
1166 return skl_aux_data_reg(dev_priv, port, index);
1167 else if (HAS_PCH_SPLIT(dev_priv))
1168 return ilk_aux_data_reg(dev_priv, port, index);
1170 return g4x_aux_data_reg(dev_priv, port, index);
1173 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1176 enum port port = dp_to_dig_port(intel_dp)->port;
1179 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1180 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1181 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1185 intel_dp_aux_fini(struct intel_dp *intel_dp)
1187 drm_dp_aux_unregister(&intel_dp->aux);
1188 kfree(intel_dp->aux.name);
1192 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1195 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196 enum port port = intel_dig_port->port;
1199 intel_aux_reg_init(intel_dp);
1201 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202 if (!intel_dp->aux.name)
1205 intel_dp->aux.dev = dev->dev;
1206 intel_dp->aux.transfer = intel_dp_aux_transfer;
1208 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 connector->base.kdev->kobj.name);
1212 ret = drm_dp_aux_register(&intel_dp->aux);
1214 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1215 intel_dp->aux.name, ret);
1216 kfree(intel_dp->aux.name);
1220 ret = sysfs_create_link(&connector->base.kdev->kobj,
1221 &intel_dp->aux.ddc.dev.kobj,
1222 intel_dp->aux.ddc.dev.kobj.name);
1224 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1225 intel_dp->aux.name, ret);
1226 intel_dp_aux_fini(intel_dp);
1234 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1238 if (!intel_connector->mst_port)
1239 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1240 intel_dp->aux.ddc.dev.kobj.name);
1241 intel_connector_unregister(intel_connector);
1245 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1249 memset(&pipe_config->dpll_hw_state, 0,
1250 sizeof(pipe_config->dpll_hw_state));
1252 pipe_config->ddi_pll_sel = SKL_DPLL0;
1253 pipe_config->dpll_hw_state.cfgcr1 = 0;
1254 pipe_config->dpll_hw_state.cfgcr2 = 0;
1256 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1257 switch (pipe_config->port_clock / 2) {
1259 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1263 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1267 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1271 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1274 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1275 results in CDCLK change. Need to handle the change of CDCLK by
1276 disabling pipes and re-enabling them */
1278 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1282 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1287 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1291 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1293 memset(&pipe_config->dpll_hw_state, 0,
1294 sizeof(pipe_config->dpll_hw_state));
1296 switch (pipe_config->port_clock / 2) {
1298 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1301 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1304 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1310 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1312 if (intel_dp->num_sink_rates) {
1313 *sink_rates = intel_dp->sink_rates;
1314 return intel_dp->num_sink_rates;
1317 *sink_rates = default_rates;
1319 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1322 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1324 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1325 struct drm_device *dev = dig_port->base.base.dev;
1327 /* WaDisableHBR2:skl */
1328 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1331 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1332 (INTEL_INFO(dev)->gen >= 9))
1339 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1341 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1342 struct drm_device *dev = dig_port->base.base.dev;
1345 if (IS_BROXTON(dev)) {
1346 *source_rates = bxt_rates;
1347 size = ARRAY_SIZE(bxt_rates);
1348 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1349 *source_rates = skl_rates;
1350 size = ARRAY_SIZE(skl_rates);
1352 *source_rates = default_rates;
1353 size = ARRAY_SIZE(default_rates);
1356 /* This depends on the fact that 5.4 is last value in the array */
1357 if (!intel_dp_source_supports_hbr2(intel_dp))
1364 intel_dp_set_clock(struct intel_encoder *encoder,
1365 struct intel_crtc_state *pipe_config)
1367 struct drm_device *dev = encoder->base.dev;
1368 const struct dp_link_dpll *divisor = NULL;
1372 divisor = gen4_dpll;
1373 count = ARRAY_SIZE(gen4_dpll);
1374 } else if (HAS_PCH_SPLIT(dev)) {
1376 count = ARRAY_SIZE(pch_dpll);
1377 } else if (IS_CHERRYVIEW(dev)) {
1379 count = ARRAY_SIZE(chv_dpll);
1380 } else if (IS_VALLEYVIEW(dev)) {
1382 count = ARRAY_SIZE(vlv_dpll);
1385 if (divisor && count) {
1386 for (i = 0; i < count; i++) {
1387 if (pipe_config->port_clock == divisor[i].clock) {
1388 pipe_config->dpll = divisor[i].dpll;
1389 pipe_config->clock_set = true;
1396 static int intersect_rates(const int *source_rates, int source_len,
1397 const int *sink_rates, int sink_len,
1400 int i = 0, j = 0, k = 0;
1402 while (i < source_len && j < sink_len) {
1403 if (source_rates[i] == sink_rates[j]) {
1404 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1406 common_rates[k] = source_rates[i];
1410 } else if (source_rates[i] < sink_rates[j]) {
1419 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1422 const int *source_rates, *sink_rates;
1423 int source_len, sink_len;
1425 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1426 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1428 return intersect_rates(source_rates, source_len,
1429 sink_rates, sink_len,
1433 static void snprintf_int_array(char *str, size_t len,
1434 const int *array, int nelem)
1440 for (i = 0; i < nelem; i++) {
1441 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1449 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1451 const int *source_rates, *sink_rates;
1452 int source_len, sink_len, common_len;
1453 int common_rates[DP_MAX_SUPPORTED_RATES];
1454 char str[128]; /* FIXME: too big for stack? */
1456 if ((drm_debug & DRM_UT_KMS) == 0)
1459 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1460 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1461 DRM_DEBUG_KMS("source rates: %s\n", str);
1463 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1464 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1465 DRM_DEBUG_KMS("sink rates: %s\n", str);
1467 common_len = intel_dp_common_rates(intel_dp, common_rates);
1468 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1469 DRM_DEBUG_KMS("common rates: %s\n", str);
1472 static int rate_to_index(int find, const int *rates)
1476 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1477 if (find == rates[i])
1484 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1486 int rates[DP_MAX_SUPPORTED_RATES] = {};
1489 len = intel_dp_common_rates(intel_dp, rates);
1490 if (WARN_ON(len <= 0))
1493 return rates[rate_to_index(0, rates) - 1];
1496 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1498 return rate_to_index(rate, intel_dp->sink_rates);
1501 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1502 uint8_t *link_bw, uint8_t *rate_select)
1504 if (intel_dp->num_sink_rates) {
1507 intel_dp_rate_select(intel_dp, port_clock);
1509 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1515 intel_dp_compute_config(struct intel_encoder *encoder,
1516 struct intel_crtc_state *pipe_config)
1518 struct drm_device *dev = encoder->base.dev;
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1521 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1522 enum port port = dp_to_dig_port(intel_dp)->port;
1523 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1524 struct intel_connector *intel_connector = intel_dp->attached_connector;
1525 int lane_count, clock;
1526 int min_lane_count = 1;
1527 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1528 /* Conveniently, the link BW constants become indices with a shift...*/
1532 int link_avail, link_clock;
1533 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1535 uint8_t link_bw, rate_select;
1537 common_len = intel_dp_common_rates(intel_dp, common_rates);
1539 /* No common link rates between source and sink */
1540 WARN_ON(common_len <= 0);
1542 max_clock = common_len - 1;
1544 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1545 pipe_config->has_pch_encoder = true;
1547 pipe_config->has_dp_encoder = true;
1548 pipe_config->has_drrs = false;
1549 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1551 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1552 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1555 if (INTEL_INFO(dev)->gen >= 9) {
1557 ret = skl_update_scaler_crtc(pipe_config);
1562 if (HAS_GMCH_DISPLAY(dev))
1563 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1564 intel_connector->panel.fitting_mode);
1566 intel_pch_panel_fitting(intel_crtc, pipe_config,
1567 intel_connector->panel.fitting_mode);
1570 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1573 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1574 "max bw %d pixel clock %iKHz\n",
1575 max_lane_count, common_rates[max_clock],
1576 adjusted_mode->crtc_clock);
1578 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1579 * bpc in between. */
1580 bpp = pipe_config->pipe_bpp;
1581 if (is_edp(intel_dp)) {
1583 /* Get bpp from vbt only for panels that dont have bpp in edid */
1584 if (intel_connector->base.display_info.bpc == 0 &&
1585 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1586 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1587 dev_priv->vbt.edp_bpp);
1588 bpp = dev_priv->vbt.edp_bpp;
1592 * Use the maximum clock and number of lanes the eDP panel
1593 * advertizes being capable of. The panels are generally
1594 * designed to support only a single clock and lane
1595 * configuration, and typically these values correspond to the
1596 * native resolution of the panel.
1598 min_lane_count = max_lane_count;
1599 min_clock = max_clock;
1602 for (; bpp >= 6*3; bpp -= 2*3) {
1603 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1606 for (clock = min_clock; clock <= max_clock; clock++) {
1607 for (lane_count = min_lane_count;
1608 lane_count <= max_lane_count;
1611 link_clock = common_rates[clock];
1612 link_avail = intel_dp_max_data_rate(link_clock,
1615 if (mode_rate <= link_avail) {
1625 if (intel_dp->color_range_auto) {
1628 * CEA-861-E - 5.1 Default Encoding Parameters
1629 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1631 pipe_config->limited_color_range =
1632 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1634 pipe_config->limited_color_range =
1635 intel_dp->limited_color_range;
1638 pipe_config->lane_count = lane_count;
1640 pipe_config->pipe_bpp = bpp;
1641 pipe_config->port_clock = common_rates[clock];
1643 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1644 &link_bw, &rate_select);
1646 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1647 link_bw, rate_select, pipe_config->lane_count,
1648 pipe_config->port_clock, bpp);
1649 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1650 mode_rate, link_avail);
1652 intel_link_compute_m_n(bpp, lane_count,
1653 adjusted_mode->crtc_clock,
1654 pipe_config->port_clock,
1655 &pipe_config->dp_m_n);
1657 if (intel_connector->panel.downclock_mode != NULL &&
1658 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1659 pipe_config->has_drrs = true;
1660 intel_link_compute_m_n(bpp, lane_count,
1661 intel_connector->panel.downclock_mode->clock,
1662 pipe_config->port_clock,
1663 &pipe_config->dp_m2_n2);
1666 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1667 skl_edp_set_pll_config(pipe_config);
1668 else if (IS_BROXTON(dev))
1669 /* handled in ddi */;
1670 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1671 hsw_dp_set_ddi_pll_sel(pipe_config);
1673 intel_dp_set_clock(encoder, pipe_config);
1678 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1679 const struct intel_crtc_state *pipe_config)
1681 intel_dp->link_rate = pipe_config->port_clock;
1682 intel_dp->lane_count = pipe_config->lane_count;
1685 static void intel_dp_prepare(struct intel_encoder *encoder)
1687 struct drm_device *dev = encoder->base.dev;
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1690 enum port port = dp_to_dig_port(intel_dp)->port;
1691 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1692 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1694 intel_dp_set_link_params(intel_dp, crtc->config);
1697 * There are four kinds of DP registers:
1704 * IBX PCH and CPU are the same for almost everything,
1705 * except that the CPU DP PLL is configured in this
1708 * CPT PCH is quite different, having many bits moved
1709 * to the TRANS_DP_CTL register instead. That
1710 * configuration happens (oddly) in ironlake_pch_enable
1713 /* Preserve the BIOS-computed detected bit. This is
1714 * supposed to be read-only.
1716 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1718 /* Handle DP bits in common between all three register formats */
1719 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1720 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1722 /* Split out the IBX/CPU vs CPT settings */
1724 if (IS_GEN7(dev) && port == PORT_A) {
1725 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1726 intel_dp->DP |= DP_SYNC_HS_HIGH;
1727 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1728 intel_dp->DP |= DP_SYNC_VS_HIGH;
1729 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1731 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1732 intel_dp->DP |= DP_ENHANCED_FRAMING;
1734 intel_dp->DP |= crtc->pipe << 29;
1735 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1738 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1740 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1741 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1742 trans_dp |= TRANS_DP_ENH_FRAMING;
1744 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1745 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1747 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1748 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1749 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1751 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1752 intel_dp->DP |= DP_SYNC_HS_HIGH;
1753 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1754 intel_dp->DP |= DP_SYNC_VS_HIGH;
1755 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1757 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1758 intel_dp->DP |= DP_ENHANCED_FRAMING;
1760 if (IS_CHERRYVIEW(dev))
1761 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1762 else if (crtc->pipe == PIPE_B)
1763 intel_dp->DP |= DP_PIPEB_SELECT;
1767 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1768 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1770 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1771 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1773 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1774 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1776 static void wait_panel_status(struct intel_dp *intel_dp,
1780 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1781 struct drm_i915_private *dev_priv = dev->dev_private;
1782 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1784 lockdep_assert_held(&dev_priv->pps_mutex);
1786 pp_stat_reg = _pp_stat_reg(intel_dp);
1787 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1789 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1791 I915_READ(pp_stat_reg),
1792 I915_READ(pp_ctrl_reg));
1794 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1795 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1796 I915_READ(pp_stat_reg),
1797 I915_READ(pp_ctrl_reg));
1800 DRM_DEBUG_KMS("Wait complete\n");
1803 static void wait_panel_on(struct intel_dp *intel_dp)
1805 DRM_DEBUG_KMS("Wait for panel power on\n");
1806 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1809 static void wait_panel_off(struct intel_dp *intel_dp)
1811 DRM_DEBUG_KMS("Wait for panel power off time\n");
1812 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1815 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1817 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1819 /* When we disable the VDD override bit last we have to do the manual
1821 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1822 intel_dp->panel_power_cycle_delay);
1824 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1827 static void wait_backlight_on(struct intel_dp *intel_dp)
1829 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1830 intel_dp->backlight_on_delay);
1833 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1835 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1836 intel_dp->backlight_off_delay);
1839 /* Read the current pp_control value, unlocking the register if it
1843 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1846 struct drm_i915_private *dev_priv = dev->dev_private;
1849 lockdep_assert_held(&dev_priv->pps_mutex);
1851 control = I915_READ(_pp_ctrl_reg(intel_dp));
1852 if (!IS_BROXTON(dev)) {
1853 control &= ~PANEL_UNLOCK_MASK;
1854 control |= PANEL_UNLOCK_REGS;
1860 * Must be paired with edp_panel_vdd_off().
1861 * Must hold pps_mutex around the whole on/off sequence.
1862 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1864 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1866 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1867 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1868 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1869 struct drm_i915_private *dev_priv = dev->dev_private;
1870 enum intel_display_power_domain power_domain;
1872 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1873 bool need_to_disable = !intel_dp->want_panel_vdd;
1875 lockdep_assert_held(&dev_priv->pps_mutex);
1877 if (!is_edp(intel_dp))
1880 cancel_delayed_work(&intel_dp->panel_vdd_work);
1881 intel_dp->want_panel_vdd = true;
1883 if (edp_have_panel_vdd(intel_dp))
1884 return need_to_disable;
1886 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1887 intel_display_power_get(dev_priv, power_domain);
1889 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1890 port_name(intel_dig_port->port));
1892 if (!edp_have_panel_power(intel_dp))
1893 wait_panel_power_cycle(intel_dp);
1895 pp = ironlake_get_pp_control(intel_dp);
1896 pp |= EDP_FORCE_VDD;
1898 pp_stat_reg = _pp_stat_reg(intel_dp);
1899 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1901 I915_WRITE(pp_ctrl_reg, pp);
1902 POSTING_READ(pp_ctrl_reg);
1903 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1904 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1906 * If the panel wasn't on, delay before accessing aux channel
1908 if (!edp_have_panel_power(intel_dp)) {
1909 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1910 port_name(intel_dig_port->port));
1911 msleep(intel_dp->panel_power_up_delay);
1914 return need_to_disable;
1918 * Must be paired with intel_edp_panel_vdd_off() or
1919 * intel_edp_panel_off().
1920 * Nested calls to these functions are not allowed since
1921 * we drop the lock. Caller must use some higher level
1922 * locking to prevent nested calls from other threads.
1924 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1928 if (!is_edp(intel_dp))
1932 vdd = edp_panel_vdd_on(intel_dp);
1933 pps_unlock(intel_dp);
1935 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1936 port_name(dp_to_dig_port(intel_dp)->port));
1939 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1941 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1943 struct intel_digital_port *intel_dig_port =
1944 dp_to_dig_port(intel_dp);
1945 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1946 enum intel_display_power_domain power_domain;
1948 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1950 lockdep_assert_held(&dev_priv->pps_mutex);
1952 WARN_ON(intel_dp->want_panel_vdd);
1954 if (!edp_have_panel_vdd(intel_dp))
1957 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1958 port_name(intel_dig_port->port));
1960 pp = ironlake_get_pp_control(intel_dp);
1961 pp &= ~EDP_FORCE_VDD;
1963 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1964 pp_stat_reg = _pp_stat_reg(intel_dp);
1966 I915_WRITE(pp_ctrl_reg, pp);
1967 POSTING_READ(pp_ctrl_reg);
1969 /* Make sure sequencer is idle before allowing subsequent activity */
1970 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1971 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1973 if ((pp & POWER_TARGET_ON) == 0)
1974 intel_dp->last_power_cycle = jiffies;
1976 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1977 intel_display_power_put(dev_priv, power_domain);
1980 static void edp_panel_vdd_work(struct work_struct *__work)
1982 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1983 struct intel_dp, panel_vdd_work);
1986 if (!intel_dp->want_panel_vdd)
1987 edp_panel_vdd_off_sync(intel_dp);
1988 pps_unlock(intel_dp);
1991 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1993 unsigned long delay;
1996 * Queue the timer to fire a long time from now (relative to the power
1997 * down delay) to keep the panel power up across a sequence of
2000 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2001 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2005 * Must be paired with edp_panel_vdd_on().
2006 * Must hold pps_mutex around the whole on/off sequence.
2007 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2009 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2011 struct drm_i915_private *dev_priv =
2012 intel_dp_to_dev(intel_dp)->dev_private;
2014 lockdep_assert_held(&dev_priv->pps_mutex);
2016 if (!is_edp(intel_dp))
2019 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2020 port_name(dp_to_dig_port(intel_dp)->port));
2022 intel_dp->want_panel_vdd = false;
2025 edp_panel_vdd_off_sync(intel_dp);
2027 edp_panel_vdd_schedule_off(intel_dp);
2030 static void edp_panel_on(struct intel_dp *intel_dp)
2032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2033 struct drm_i915_private *dev_priv = dev->dev_private;
2035 i915_reg_t pp_ctrl_reg;
2037 lockdep_assert_held(&dev_priv->pps_mutex);
2039 if (!is_edp(intel_dp))
2042 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2043 port_name(dp_to_dig_port(intel_dp)->port));
2045 if (WARN(edp_have_panel_power(intel_dp),
2046 "eDP port %c panel power already on\n",
2047 port_name(dp_to_dig_port(intel_dp)->port)))
2050 wait_panel_power_cycle(intel_dp);
2052 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2053 pp = ironlake_get_pp_control(intel_dp);
2055 /* ILK workaround: disable reset around power sequence */
2056 pp &= ~PANEL_POWER_RESET;
2057 I915_WRITE(pp_ctrl_reg, pp);
2058 POSTING_READ(pp_ctrl_reg);
2061 pp |= POWER_TARGET_ON;
2063 pp |= PANEL_POWER_RESET;
2065 I915_WRITE(pp_ctrl_reg, pp);
2066 POSTING_READ(pp_ctrl_reg);
2068 wait_panel_on(intel_dp);
2069 intel_dp->last_power_on = jiffies;
2072 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2073 I915_WRITE(pp_ctrl_reg, pp);
2074 POSTING_READ(pp_ctrl_reg);
2078 void intel_edp_panel_on(struct intel_dp *intel_dp)
2080 if (!is_edp(intel_dp))
2084 edp_panel_on(intel_dp);
2085 pps_unlock(intel_dp);
2089 static void edp_panel_off(struct intel_dp *intel_dp)
2091 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2092 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2093 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2094 struct drm_i915_private *dev_priv = dev->dev_private;
2095 enum intel_display_power_domain power_domain;
2097 i915_reg_t pp_ctrl_reg;
2099 lockdep_assert_held(&dev_priv->pps_mutex);
2101 if (!is_edp(intel_dp))
2104 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2105 port_name(dp_to_dig_port(intel_dp)->port));
2107 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2108 port_name(dp_to_dig_port(intel_dp)->port));
2110 pp = ironlake_get_pp_control(intel_dp);
2111 /* We need to switch off panel power _and_ force vdd, for otherwise some
2112 * panels get very unhappy and cease to work. */
2113 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2116 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2118 intel_dp->want_panel_vdd = false;
2120 I915_WRITE(pp_ctrl_reg, pp);
2121 POSTING_READ(pp_ctrl_reg);
2123 intel_dp->last_power_cycle = jiffies;
2124 wait_panel_off(intel_dp);
2126 /* We got a reference when we enabled the VDD. */
2127 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2128 intel_display_power_put(dev_priv, power_domain);
2131 void intel_edp_panel_off(struct intel_dp *intel_dp)
2133 if (!is_edp(intel_dp))
2137 edp_panel_off(intel_dp);
2138 pps_unlock(intel_dp);
2141 /* Enable backlight in the panel power control. */
2142 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2145 struct drm_device *dev = intel_dig_port->base.base.dev;
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2148 i915_reg_t pp_ctrl_reg;
2151 * If we enable the backlight right away following a panel power
2152 * on, we may see slight flicker as the panel syncs with the eDP
2153 * link. So delay a bit to make sure the image is solid before
2154 * allowing it to appear.
2156 wait_backlight_on(intel_dp);
2160 pp = ironlake_get_pp_control(intel_dp);
2161 pp |= EDP_BLC_ENABLE;
2163 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2165 I915_WRITE(pp_ctrl_reg, pp);
2166 POSTING_READ(pp_ctrl_reg);
2168 pps_unlock(intel_dp);
2171 /* Enable backlight PWM and backlight PP control. */
2172 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2174 if (!is_edp(intel_dp))
2177 DRM_DEBUG_KMS("\n");
2179 intel_panel_enable_backlight(intel_dp->attached_connector);
2180 _intel_edp_backlight_on(intel_dp);
2183 /* Disable backlight in the panel power control. */
2184 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2186 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2187 struct drm_i915_private *dev_priv = dev->dev_private;
2189 i915_reg_t pp_ctrl_reg;
2191 if (!is_edp(intel_dp))
2196 pp = ironlake_get_pp_control(intel_dp);
2197 pp &= ~EDP_BLC_ENABLE;
2199 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2201 I915_WRITE(pp_ctrl_reg, pp);
2202 POSTING_READ(pp_ctrl_reg);
2204 pps_unlock(intel_dp);
2206 intel_dp->last_backlight_off = jiffies;
2207 edp_wait_backlight_off(intel_dp);
2210 /* Disable backlight PP control and backlight PWM. */
2211 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2213 if (!is_edp(intel_dp))
2216 DRM_DEBUG_KMS("\n");
2218 _intel_edp_backlight_off(intel_dp);
2219 intel_panel_disable_backlight(intel_dp->attached_connector);
2223 * Hook for controlling the panel power control backlight through the bl_power
2224 * sysfs attribute. Take care to handle multiple calls.
2226 static void intel_edp_backlight_power(struct intel_connector *connector,
2229 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2233 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2234 pps_unlock(intel_dp);
2236 if (is_enabled == enable)
2239 DRM_DEBUG_KMS("panel power control backlight %s\n",
2240 enable ? "enable" : "disable");
2243 _intel_edp_backlight_on(intel_dp);
2245 _intel_edp_backlight_off(intel_dp);
2248 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2250 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2251 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2252 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2254 I915_STATE_WARN(cur_state != state,
2255 "DP port %c state assertion failure (expected %s, current %s)\n",
2256 port_name(dig_port->port),
2257 onoff(state), onoff(cur_state));
2259 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2261 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2263 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2265 I915_STATE_WARN(cur_state != state,
2266 "eDP PLL state assertion failure (expected %s, current %s)\n",
2267 onoff(state), onoff(cur_state));
2269 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2270 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2272 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2274 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2275 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2276 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2278 assert_pipe_disabled(dev_priv, crtc->pipe);
2279 assert_dp_port_disabled(intel_dp);
2280 assert_edp_pll_disabled(dev_priv);
2282 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2283 crtc->config->port_clock);
2285 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2287 if (crtc->config->port_clock == 162000)
2288 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2290 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2292 I915_WRITE(DP_A, intel_dp->DP);
2296 intel_dp->DP |= DP_PLL_ENABLE;
2298 I915_WRITE(DP_A, intel_dp->DP);
2303 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2306 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2309 assert_pipe_disabled(dev_priv, crtc->pipe);
2310 assert_dp_port_disabled(intel_dp);
2311 assert_edp_pll_enabled(dev_priv);
2313 DRM_DEBUG_KMS("disabling eDP PLL\n");
2315 intel_dp->DP &= ~DP_PLL_ENABLE;
2317 I915_WRITE(DP_A, intel_dp->DP);
2322 /* If the sink supports it, try to set the power state appropriately */
2323 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2327 /* Should have a valid DPCD by this point */
2328 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2331 if (mode != DRM_MODE_DPMS_ON) {
2332 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2336 * When turning on, we need to retry for 1ms to give the sink
2339 for (i = 0; i < 3; i++) {
2340 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2349 DRM_DEBUG_KMS("failed to %s sink power state\n",
2350 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2353 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2357 enum port port = dp_to_dig_port(intel_dp)->port;
2358 struct drm_device *dev = encoder->base.dev;
2359 struct drm_i915_private *dev_priv = dev->dev_private;
2360 enum intel_display_power_domain power_domain;
2363 power_domain = intel_display_port_power_domain(encoder);
2364 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2367 tmp = I915_READ(intel_dp->output_reg);
2369 if (!(tmp & DP_PORT_EN))
2372 if (IS_GEN7(dev) && port == PORT_A) {
2373 *pipe = PORT_TO_PIPE_CPT(tmp);
2374 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2377 for_each_pipe(dev_priv, p) {
2378 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2385 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2386 i915_mmio_reg_offset(intel_dp->output_reg));
2387 } else if (IS_CHERRYVIEW(dev)) {
2388 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2390 *pipe = PORT_TO_PIPE(tmp);
2396 static void intel_dp_get_config(struct intel_encoder *encoder,
2397 struct intel_crtc_state *pipe_config)
2399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2401 struct drm_device *dev = encoder->base.dev;
2402 struct drm_i915_private *dev_priv = dev->dev_private;
2403 enum port port = dp_to_dig_port(intel_dp)->port;
2404 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2407 tmp = I915_READ(intel_dp->output_reg);
2409 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2411 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2412 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2414 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2415 flags |= DRM_MODE_FLAG_PHSYNC;
2417 flags |= DRM_MODE_FLAG_NHSYNC;
2419 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2420 flags |= DRM_MODE_FLAG_PVSYNC;
2422 flags |= DRM_MODE_FLAG_NVSYNC;
2424 if (tmp & DP_SYNC_HS_HIGH)
2425 flags |= DRM_MODE_FLAG_PHSYNC;
2427 flags |= DRM_MODE_FLAG_NHSYNC;
2429 if (tmp & DP_SYNC_VS_HIGH)
2430 flags |= DRM_MODE_FLAG_PVSYNC;
2432 flags |= DRM_MODE_FLAG_NVSYNC;
2435 pipe_config->base.adjusted_mode.flags |= flags;
2437 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2438 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2439 pipe_config->limited_color_range = true;
2441 pipe_config->has_dp_encoder = true;
2443 pipe_config->lane_count =
2444 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2446 intel_dp_get_m_n(crtc, pipe_config);
2448 if (port == PORT_A) {
2449 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2450 pipe_config->port_clock = 162000;
2452 pipe_config->port_clock = 270000;
2455 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2456 &pipe_config->dp_m_n);
2458 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2459 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2461 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2463 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2464 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2466 * This is a big fat ugly hack.
2468 * Some machines in UEFI boot mode provide us a VBT that has 18
2469 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2470 * unknown we fail to light up. Yet the same BIOS boots up with
2471 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2472 * max, not what it tells us to use.
2474 * Note: This will still be broken if the eDP panel is not lit
2475 * up by the BIOS, and thus we can't get the mode at module
2478 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2479 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2480 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2484 static void intel_disable_dp(struct intel_encoder *encoder)
2486 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487 struct drm_device *dev = encoder->base.dev;
2488 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2490 if (crtc->config->has_audio)
2491 intel_audio_codec_disable(encoder);
2493 if (HAS_PSR(dev) && !HAS_DDI(dev))
2494 intel_psr_disable(intel_dp);
2496 /* Make sure the panel is off before trying to change the mode. But also
2497 * ensure that we have vdd while we switch off the panel. */
2498 intel_edp_panel_vdd_on(intel_dp);
2499 intel_edp_backlight_off(intel_dp);
2500 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2501 intel_edp_panel_off(intel_dp);
2503 /* disable the port before the pipe on g4x */
2504 if (INTEL_INFO(dev)->gen < 5)
2505 intel_dp_link_down(intel_dp);
2508 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511 enum port port = dp_to_dig_port(intel_dp)->port;
2513 intel_dp_link_down(intel_dp);
2515 /* Only ilk+ has port A */
2517 ironlake_edp_pll_off(intel_dp);
2520 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2524 intel_dp_link_down(intel_dp);
2527 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2530 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2531 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2532 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2533 enum pipe pipe = crtc->pipe;
2536 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2538 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2540 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2541 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2543 if (crtc->config->lane_count > 2) {
2544 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2546 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2548 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2549 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2552 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2553 val |= CHV_PCS_REQ_SOFTRESET_EN;
2555 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2557 val |= DPIO_PCS_CLK_SOFT_RESET;
2558 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2560 if (crtc->config->lane_count > 2) {
2561 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2562 val |= CHV_PCS_REQ_SOFTRESET_EN;
2564 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2566 val |= DPIO_PCS_CLK_SOFT_RESET;
2567 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2571 static void chv_post_disable_dp(struct intel_encoder *encoder)
2573 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574 struct drm_device *dev = encoder->base.dev;
2575 struct drm_i915_private *dev_priv = dev->dev_private;
2577 intel_dp_link_down(intel_dp);
2579 mutex_lock(&dev_priv->sb_lock);
2581 /* Assert data lane reset */
2582 chv_data_lane_soft_reset(encoder, true);
2584 mutex_unlock(&dev_priv->sb_lock);
2588 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2590 uint8_t dp_train_pat)
2592 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2593 struct drm_device *dev = intel_dig_port->base.base.dev;
2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 enum port port = intel_dig_port->port;
2598 uint32_t temp = I915_READ(DP_TP_CTL(port));
2600 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2601 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2603 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2605 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2606 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2607 case DP_TRAINING_PATTERN_DISABLE:
2608 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2611 case DP_TRAINING_PATTERN_1:
2612 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2614 case DP_TRAINING_PATTERN_2:
2615 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2617 case DP_TRAINING_PATTERN_3:
2618 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2621 I915_WRITE(DP_TP_CTL(port), temp);
2623 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2624 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2625 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2627 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2628 case DP_TRAINING_PATTERN_DISABLE:
2629 *DP |= DP_LINK_TRAIN_OFF_CPT;
2631 case DP_TRAINING_PATTERN_1:
2632 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2634 case DP_TRAINING_PATTERN_2:
2635 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2637 case DP_TRAINING_PATTERN_3:
2638 DRM_ERROR("DP training pattern 3 not supported\n");
2639 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2644 if (IS_CHERRYVIEW(dev))
2645 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2647 *DP &= ~DP_LINK_TRAIN_MASK;
2649 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2650 case DP_TRAINING_PATTERN_DISABLE:
2651 *DP |= DP_LINK_TRAIN_OFF;
2653 case DP_TRAINING_PATTERN_1:
2654 *DP |= DP_LINK_TRAIN_PAT_1;
2656 case DP_TRAINING_PATTERN_2:
2657 *DP |= DP_LINK_TRAIN_PAT_2;
2659 case DP_TRAINING_PATTERN_3:
2660 if (IS_CHERRYVIEW(dev)) {
2661 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2663 DRM_ERROR("DP training pattern 3 not supported\n");
2664 *DP |= DP_LINK_TRAIN_PAT_2;
2671 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2673 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2674 struct drm_i915_private *dev_priv = dev->dev_private;
2675 struct intel_crtc *crtc =
2676 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2678 /* enable with pattern 1 (as per spec) */
2679 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2680 DP_TRAINING_PATTERN_1);
2682 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2683 POSTING_READ(intel_dp->output_reg);
2686 * Magic for VLV/CHV. We _must_ first set up the register
2687 * without actually enabling the port, and then do another
2688 * write to enable the port. Otherwise link training will
2689 * fail when the power sequencer is freshly used for this port.
2691 intel_dp->DP |= DP_PORT_EN;
2692 if (crtc->config->has_audio)
2693 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2695 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2696 POSTING_READ(intel_dp->output_reg);
2699 static void intel_enable_dp(struct intel_encoder *encoder)
2701 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2702 struct drm_device *dev = encoder->base.dev;
2703 struct drm_i915_private *dev_priv = dev->dev_private;
2704 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2705 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2706 enum port port = dp_to_dig_port(intel_dp)->port;
2707 enum pipe pipe = crtc->pipe;
2709 if (WARN_ON(dp_reg & DP_PORT_EN))
2714 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2715 vlv_init_panel_power_sequencer(intel_dp);
2718 * We get an occasional spurious underrun between the port
2719 * enable and vdd enable, when enabling port A eDP.
2721 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2724 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2726 intel_dp_enable_port(intel_dp);
2728 if (port == PORT_A && IS_GEN5(dev_priv)) {
2730 * Underrun reporting for the other pipe was disabled in
2731 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2732 * enabled, so it's now safe to re-enable underrun reporting.
2734 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2735 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2736 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2739 edp_panel_vdd_on(intel_dp);
2740 edp_panel_on(intel_dp);
2741 edp_panel_vdd_off(intel_dp, true);
2744 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2746 pps_unlock(intel_dp);
2748 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2749 unsigned int lane_mask = 0x0;
2751 if (IS_CHERRYVIEW(dev))
2752 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2754 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2758 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2759 intel_dp_start_link_train(intel_dp);
2760 intel_dp_stop_link_train(intel_dp);
2762 if (crtc->config->has_audio) {
2763 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2765 intel_audio_codec_enable(encoder);
2769 static void g4x_enable_dp(struct intel_encoder *encoder)
2771 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2773 intel_enable_dp(encoder);
2774 intel_edp_backlight_on(intel_dp);
2777 static void vlv_enable_dp(struct intel_encoder *encoder)
2779 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2781 intel_edp_backlight_on(intel_dp);
2782 intel_psr_enable(intel_dp);
2785 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2787 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2788 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2789 enum port port = dp_to_dig_port(intel_dp)->port;
2790 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2792 intel_dp_prepare(encoder);
2794 if (port == PORT_A && IS_GEN5(dev_priv)) {
2796 * We get FIFO underruns on the other pipe when
2797 * enabling the CPU eDP PLL, and when enabling CPU
2798 * eDP port. We could potentially avoid the PLL
2799 * underrun with a vblank wait just prior to enabling
2800 * the PLL, but that doesn't appear to help the port
2801 * enable case. Just sweep it all under the rug.
2803 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2804 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2807 /* Only ilk+ has port A */
2809 ironlake_edp_pll_on(intel_dp);
2812 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2814 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2815 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2816 enum pipe pipe = intel_dp->pps_pipe;
2817 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2819 edp_panel_vdd_off_sync(intel_dp);
2822 * VLV seems to get confused when multiple power seqeuencers
2823 * have the same port selected (even if only one has power/vdd
2824 * enabled). The failure manifests as vlv_wait_port_ready() failing
2825 * CHV on the other hand doesn't seem to mind having the same port
2826 * selected in multiple power seqeuencers, but let's clear the
2827 * port select always when logically disconnecting a power sequencer
2830 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2831 pipe_name(pipe), port_name(intel_dig_port->port));
2832 I915_WRITE(pp_on_reg, 0);
2833 POSTING_READ(pp_on_reg);
2835 intel_dp->pps_pipe = INVALID_PIPE;
2838 static void vlv_steal_power_sequencer(struct drm_device *dev,
2841 struct drm_i915_private *dev_priv = dev->dev_private;
2842 struct intel_encoder *encoder;
2844 lockdep_assert_held(&dev_priv->pps_mutex);
2846 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2849 for_each_intel_encoder(dev, encoder) {
2850 struct intel_dp *intel_dp;
2853 if (encoder->type != INTEL_OUTPUT_EDP)
2856 intel_dp = enc_to_intel_dp(&encoder->base);
2857 port = dp_to_dig_port(intel_dp)->port;
2859 if (intel_dp->pps_pipe != pipe)
2862 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2863 pipe_name(pipe), port_name(port));
2865 WARN(encoder->base.crtc,
2866 "stealing pipe %c power sequencer from active eDP port %c\n",
2867 pipe_name(pipe), port_name(port));
2869 /* make sure vdd is off before we steal it */
2870 vlv_detach_power_sequencer(intel_dp);
2874 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2876 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2877 struct intel_encoder *encoder = &intel_dig_port->base;
2878 struct drm_device *dev = encoder->base.dev;
2879 struct drm_i915_private *dev_priv = dev->dev_private;
2880 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2882 lockdep_assert_held(&dev_priv->pps_mutex);
2884 if (!is_edp(intel_dp))
2887 if (intel_dp->pps_pipe == crtc->pipe)
2891 * If another power sequencer was being used on this
2892 * port previously make sure to turn off vdd there while
2893 * we still have control of it.
2895 if (intel_dp->pps_pipe != INVALID_PIPE)
2896 vlv_detach_power_sequencer(intel_dp);
2899 * We may be stealing the power
2900 * sequencer from another port.
2902 vlv_steal_power_sequencer(dev, crtc->pipe);
2904 /* now it's all ours */
2905 intel_dp->pps_pipe = crtc->pipe;
2907 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2908 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2910 /* init power sequencer on this pipe and port */
2911 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2912 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2915 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2917 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2918 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2919 struct drm_device *dev = encoder->base.dev;
2920 struct drm_i915_private *dev_priv = dev->dev_private;
2921 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2922 enum dpio_channel port = vlv_dport_to_channel(dport);
2923 int pipe = intel_crtc->pipe;
2926 mutex_lock(&dev_priv->sb_lock);
2928 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2935 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2936 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2937 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2939 mutex_unlock(&dev_priv->sb_lock);
2941 intel_enable_dp(encoder);
2944 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2946 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2947 struct drm_device *dev = encoder->base.dev;
2948 struct drm_i915_private *dev_priv = dev->dev_private;
2949 struct intel_crtc *intel_crtc =
2950 to_intel_crtc(encoder->base.crtc);
2951 enum dpio_channel port = vlv_dport_to_channel(dport);
2952 int pipe = intel_crtc->pipe;
2954 intel_dp_prepare(encoder);
2956 /* Program Tx lane resets to default */
2957 mutex_lock(&dev_priv->sb_lock);
2958 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2959 DPIO_PCS_TX_LANE2_RESET |
2960 DPIO_PCS_TX_LANE1_RESET);
2961 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2962 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2963 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2964 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2965 DPIO_PCS_CLK_SOFT_RESET);
2967 /* Fix up inter-pair skew failure */
2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2969 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2970 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2971 mutex_unlock(&dev_priv->sb_lock);
2974 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2976 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2977 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2978 struct drm_device *dev = encoder->base.dev;
2979 struct drm_i915_private *dev_priv = dev->dev_private;
2980 struct intel_crtc *intel_crtc =
2981 to_intel_crtc(encoder->base.crtc);
2982 enum dpio_channel ch = vlv_dport_to_channel(dport);
2983 int pipe = intel_crtc->pipe;
2984 int data, i, stagger;
2987 mutex_lock(&dev_priv->sb_lock);
2989 /* allow hardware to manage TX FIFO reset source */
2990 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2991 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2992 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2994 if (intel_crtc->config->lane_count > 2) {
2995 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2996 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2997 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3000 /* Program Tx lane latency optimal setting*/
3001 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3002 /* Set the upar bit */
3003 if (intel_crtc->config->lane_count == 1)
3006 data = (i == 1) ? 0x0 : 0x1;
3007 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3008 data << DPIO_UPAR_SHIFT);
3011 /* Data lane stagger programming */
3012 if (intel_crtc->config->port_clock > 270000)
3014 else if (intel_crtc->config->port_clock > 135000)
3016 else if (intel_crtc->config->port_clock > 67500)
3018 else if (intel_crtc->config->port_clock > 33750)
3023 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3024 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3027 if (intel_crtc->config->lane_count > 2) {
3028 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3029 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3030 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3033 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3034 DPIO_LANESTAGGER_STRAP(stagger) |
3035 DPIO_LANESTAGGER_STRAP_OVRD |
3036 DPIO_TX1_STAGGER_MASK(0x1f) |
3037 DPIO_TX1_STAGGER_MULT(6) |
3038 DPIO_TX2_STAGGER_MULT(0));
3040 if (intel_crtc->config->lane_count > 2) {
3041 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3042 DPIO_LANESTAGGER_STRAP(stagger) |
3043 DPIO_LANESTAGGER_STRAP_OVRD |
3044 DPIO_TX1_STAGGER_MASK(0x1f) |
3045 DPIO_TX1_STAGGER_MULT(7) |
3046 DPIO_TX2_STAGGER_MULT(5));
3049 /* Deassert data lane reset */
3050 chv_data_lane_soft_reset(encoder, false);
3052 mutex_unlock(&dev_priv->sb_lock);
3054 intel_enable_dp(encoder);
3056 /* Second common lane will stay alive on its own now */
3057 if (dport->release_cl2_override) {
3058 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3059 dport->release_cl2_override = false;
3063 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3065 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3066 struct drm_device *dev = encoder->base.dev;
3067 struct drm_i915_private *dev_priv = dev->dev_private;
3068 struct intel_crtc *intel_crtc =
3069 to_intel_crtc(encoder->base.crtc);
3070 enum dpio_channel ch = vlv_dport_to_channel(dport);
3071 enum pipe pipe = intel_crtc->pipe;
3072 unsigned int lane_mask =
3073 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3076 intel_dp_prepare(encoder);
3079 * Must trick the second common lane into life.
3080 * Otherwise we can't even access the PLL.
3082 if (ch == DPIO_CH0 && pipe == PIPE_B)
3083 dport->release_cl2_override =
3084 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3086 chv_phy_powergate_lanes(encoder, true, lane_mask);
3088 mutex_lock(&dev_priv->sb_lock);
3090 /* Assert data lane reset */
3091 chv_data_lane_soft_reset(encoder, true);
3093 /* program left/right clock distribution */
3094 if (pipe != PIPE_B) {
3095 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3096 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3098 val |= CHV_BUFLEFTENA1_FORCE;
3100 val |= CHV_BUFRIGHTENA1_FORCE;
3101 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3103 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3104 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3106 val |= CHV_BUFLEFTENA2_FORCE;
3108 val |= CHV_BUFRIGHTENA2_FORCE;
3109 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3112 /* program clock channel usage */
3113 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3114 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3116 val &= ~CHV_PCS_USEDCLKCHANNEL;
3118 val |= CHV_PCS_USEDCLKCHANNEL;
3119 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3121 if (intel_crtc->config->lane_count > 2) {
3122 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3123 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3125 val &= ~CHV_PCS_USEDCLKCHANNEL;
3127 val |= CHV_PCS_USEDCLKCHANNEL;
3128 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3132 * This a a bit weird since generally CL
3133 * matches the pipe, but here we need to
3134 * pick the CL based on the port.
3136 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3138 val &= ~CHV_CMN_USEDCLKCHANNEL;
3140 val |= CHV_CMN_USEDCLKCHANNEL;
3141 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3143 mutex_unlock(&dev_priv->sb_lock);
3146 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3148 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3149 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3152 mutex_lock(&dev_priv->sb_lock);
3154 /* disable left/right clock distribution */
3155 if (pipe != PIPE_B) {
3156 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3157 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3158 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3160 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3161 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3162 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3165 mutex_unlock(&dev_priv->sb_lock);
3168 * Leave the power down bit cleared for at least one
3169 * lane so that chv_powergate_phy_ch() will power
3170 * on something when the channel is otherwise unused.
3171 * When the port is off and the override is removed
3172 * the lanes power down anyway, so otherwise it doesn't
3173 * really matter what the state of power down bits is
3176 chv_phy_powergate_lanes(encoder, false, 0x0);
3180 * Native read with retry for link status and receiver capability reads for
3181 * cases where the sink may still be asleep.
3183 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3184 * supposed to retry 3 times per the spec.
3187 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3188 void *buffer, size_t size)
3194 * Sometime we just get the same incorrect byte repeated
3195 * over the entire buffer. Doing just one throw away read
3196 * initially seems to "solve" it.
3198 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3200 for (i = 0; i < 3; i++) {
3201 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3211 * Fetch AUX CH registers 0x202 - 0x207 which contain
3212 * link status information
3215 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3217 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3220 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3223 /* These are source-specific values. */
3225 intel_dp_voltage_max(struct intel_dp *intel_dp)
3227 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3228 struct drm_i915_private *dev_priv = dev->dev_private;
3229 enum port port = dp_to_dig_port(intel_dp)->port;
3231 if (IS_BROXTON(dev))
3232 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3233 else if (INTEL_INFO(dev)->gen >= 9) {
3234 if (dev_priv->edp_low_vswing && port == PORT_A)
3235 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3236 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3237 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3238 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3239 else if (IS_GEN7(dev) && port == PORT_A)
3240 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3241 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3244 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3248 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3250 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3251 enum port port = dp_to_dig_port(intel_dp)->port;
3253 if (INTEL_INFO(dev)->gen >= 9) {
3254 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3258 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3260 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3262 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3264 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3266 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3267 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3278 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3279 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3285 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3290 } else if (IS_GEN7(dev) && port == PORT_A) {
3291 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3296 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3298 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3301 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3307 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3310 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3315 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3317 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3318 struct drm_i915_private *dev_priv = dev->dev_private;
3319 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3320 struct intel_crtc *intel_crtc =
3321 to_intel_crtc(dport->base.base.crtc);
3322 unsigned long demph_reg_value, preemph_reg_value,
3323 uniqtranscale_reg_value;
3324 uint8_t train_set = intel_dp->train_set[0];
3325 enum dpio_channel port = vlv_dport_to_channel(dport);
3326 int pipe = intel_crtc->pipe;
3328 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3329 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3330 preemph_reg_value = 0x0004000;
3331 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3333 demph_reg_value = 0x2B405555;
3334 uniqtranscale_reg_value = 0x552AB83A;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3337 demph_reg_value = 0x2B404040;
3338 uniqtranscale_reg_value = 0x5548B83A;
3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3341 demph_reg_value = 0x2B245555;
3342 uniqtranscale_reg_value = 0x5560B83A;
3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3345 demph_reg_value = 0x2B405555;
3346 uniqtranscale_reg_value = 0x5598DA3A;
3352 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3353 preemph_reg_value = 0x0002000;
3354 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3356 demph_reg_value = 0x2B404040;
3357 uniqtranscale_reg_value = 0x5552B83A;
3359 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3360 demph_reg_value = 0x2B404848;
3361 uniqtranscale_reg_value = 0x5580B83A;
3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3364 demph_reg_value = 0x2B404040;
3365 uniqtranscale_reg_value = 0x55ADDA3A;
3371 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3372 preemph_reg_value = 0x0000000;
3373 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3375 demph_reg_value = 0x2B305555;
3376 uniqtranscale_reg_value = 0x5570B83A;
3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3379 demph_reg_value = 0x2B2B4040;
3380 uniqtranscale_reg_value = 0x55ADDA3A;
3386 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3387 preemph_reg_value = 0x0006000;
3388 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3390 demph_reg_value = 0x1B405555;
3391 uniqtranscale_reg_value = 0x55ADDA3A;
3401 mutex_lock(&dev_priv->sb_lock);
3402 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3403 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3404 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3405 uniqtranscale_reg_value);
3406 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3407 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3408 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3409 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3410 mutex_unlock(&dev_priv->sb_lock);
3415 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3417 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3418 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3421 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3423 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3424 struct drm_i915_private *dev_priv = dev->dev_private;
3425 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3426 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3427 u32 deemph_reg_value, margin_reg_value, val;
3428 uint8_t train_set = intel_dp->train_set[0];
3429 enum dpio_channel ch = vlv_dport_to_channel(dport);
3430 enum pipe pipe = intel_crtc->pipe;
3433 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3434 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3435 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3437 deemph_reg_value = 128;
3438 margin_reg_value = 52;
3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3441 deemph_reg_value = 128;
3442 margin_reg_value = 77;
3444 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3445 deemph_reg_value = 128;
3446 margin_reg_value = 102;
3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3449 deemph_reg_value = 128;
3450 margin_reg_value = 154;
3451 /* FIXME extra to set for 1200 */
3457 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3458 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3460 deemph_reg_value = 85;
3461 margin_reg_value = 78;
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3464 deemph_reg_value = 85;
3465 margin_reg_value = 116;
3467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3468 deemph_reg_value = 85;
3469 margin_reg_value = 154;
3475 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3476 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3478 deemph_reg_value = 64;
3479 margin_reg_value = 104;
3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3482 deemph_reg_value = 64;
3483 margin_reg_value = 154;
3489 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3490 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3492 deemph_reg_value = 43;
3493 margin_reg_value = 154;
3503 mutex_lock(&dev_priv->sb_lock);
3505 /* Clear calc init */
3506 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3507 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3508 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3509 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3510 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3512 if (intel_crtc->config->lane_count > 2) {
3513 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3514 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3515 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3517 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3520 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3521 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3522 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3523 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3525 if (intel_crtc->config->lane_count > 2) {
3526 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3527 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3528 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3529 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3532 /* Program swing deemph */
3533 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3534 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3535 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3536 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3537 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3540 /* Program swing margin */
3541 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3542 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3544 val &= ~DPIO_SWING_MARGIN000_MASK;
3545 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3548 * Supposedly this value shouldn't matter when unique transition
3549 * scale is disabled, but in fact it does matter. Let's just
3550 * always program the same value and hope it's OK.
3552 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3553 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3555 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3559 * The document said it needs to set bit 27 for ch0 and bit 26
3560 * for ch1. Might be a typo in the doc.
3561 * For now, for this unique transition scale selection, set bit
3562 * 27 for ch0 and ch1.
3564 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3565 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3566 if (chv_need_uniq_trans_scale(train_set))
3567 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3569 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3570 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3573 /* Start swing calculation */
3574 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3575 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3576 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3578 if (intel_crtc->config->lane_count > 2) {
3579 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3580 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3581 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3584 mutex_unlock(&dev_priv->sb_lock);
3590 gen4_signal_levels(uint8_t train_set)
3592 uint32_t signal_levels = 0;
3594 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3595 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3597 signal_levels |= DP_VOLTAGE_0_4;
3599 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3600 signal_levels |= DP_VOLTAGE_0_6;
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3603 signal_levels |= DP_VOLTAGE_0_8;
3605 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3606 signal_levels |= DP_VOLTAGE_1_2;
3609 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3610 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3612 signal_levels |= DP_PRE_EMPHASIS_0;
3614 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3615 signal_levels |= DP_PRE_EMPHASIS_3_5;
3617 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3618 signal_levels |= DP_PRE_EMPHASIS_6;
3620 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3621 signal_levels |= DP_PRE_EMPHASIS_9_5;
3624 return signal_levels;
3627 /* Gen6's DP voltage swing and pre-emphasis control */
3629 gen6_edp_signal_levels(uint8_t train_set)
3631 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3632 DP_TRAIN_PRE_EMPHASIS_MASK);
3633 switch (signal_levels) {
3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3635 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3636 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3638 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3639 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3641 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3644 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3649 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3650 "0x%x\n", signal_levels);
3651 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3655 /* Gen7's DP voltage swing and pre-emphasis control */
3657 gen7_edp_signal_levels(uint8_t train_set)
3659 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3660 DP_TRAIN_PRE_EMPHASIS_MASK);
3661 switch (signal_levels) {
3662 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3663 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3664 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3665 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3666 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3667 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3675 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3677 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3680 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3681 "0x%x\n", signal_levels);
3682 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3687 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3690 enum port port = intel_dig_port->port;
3691 struct drm_device *dev = intel_dig_port->base.base.dev;
3692 struct drm_i915_private *dev_priv = to_i915(dev);
3693 uint32_t signal_levels, mask = 0;
3694 uint8_t train_set = intel_dp->train_set[0];
3697 signal_levels = ddi_signal_levels(intel_dp);
3699 if (IS_BROXTON(dev))
3702 mask = DDI_BUF_EMP_MASK;
3703 } else if (IS_CHERRYVIEW(dev)) {
3704 signal_levels = chv_signal_levels(intel_dp);
3705 } else if (IS_VALLEYVIEW(dev)) {
3706 signal_levels = vlv_signal_levels(intel_dp);
3707 } else if (IS_GEN7(dev) && port == PORT_A) {
3708 signal_levels = gen7_edp_signal_levels(train_set);
3709 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3710 } else if (IS_GEN6(dev) && port == PORT_A) {
3711 signal_levels = gen6_edp_signal_levels(train_set);
3712 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3714 signal_levels = gen4_signal_levels(train_set);
3715 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3719 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3721 DRM_DEBUG_KMS("Using vswing level %d\n",
3722 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3723 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3724 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3725 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3727 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3729 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3730 POSTING_READ(intel_dp->output_reg);
3734 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3735 uint8_t dp_train_pat)
3737 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3738 struct drm_i915_private *dev_priv =
3739 to_i915(intel_dig_port->base.base.dev);
3741 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3743 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3744 POSTING_READ(intel_dp->output_reg);
3747 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3749 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3750 struct drm_device *dev = intel_dig_port->base.base.dev;
3751 struct drm_i915_private *dev_priv = dev->dev_private;
3752 enum port port = intel_dig_port->port;
3758 val = I915_READ(DP_TP_CTL(port));
3759 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3760 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3761 I915_WRITE(DP_TP_CTL(port), val);
3764 * On PORT_A we can have only eDP in SST mode. There the only reason
3765 * we need to set idle transmission mode is to work around a HW issue
3766 * where we enable the pipe while not in idle link-training mode.
3767 * In this case there is requirement to wait for a minimum number of
3768 * idle patterns to be sent.
3773 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3775 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3779 intel_dp_link_down(struct intel_dp *intel_dp)
3781 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3782 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3783 enum port port = intel_dig_port->port;
3784 struct drm_device *dev = intel_dig_port->base.base.dev;
3785 struct drm_i915_private *dev_priv = dev->dev_private;
3786 uint32_t DP = intel_dp->DP;
3788 if (WARN_ON(HAS_DDI(dev)))
3791 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3794 DRM_DEBUG_KMS("\n");
3796 if ((IS_GEN7(dev) && port == PORT_A) ||
3797 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3798 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3799 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3801 if (IS_CHERRYVIEW(dev))
3802 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3804 DP &= ~DP_LINK_TRAIN_MASK;
3805 DP |= DP_LINK_TRAIN_PAT_IDLE;
3807 I915_WRITE(intel_dp->output_reg, DP);
3808 POSTING_READ(intel_dp->output_reg);
3810 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3811 I915_WRITE(intel_dp->output_reg, DP);
3812 POSTING_READ(intel_dp->output_reg);
3815 * HW workaround for IBX, we need to move the port
3816 * to transcoder A after disabling it to allow the
3817 * matching HDMI port to be enabled on transcoder A.
3819 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3821 * We get CPU/PCH FIFO underruns on the other pipe when
3822 * doing the workaround. Sweep them under the rug.
3824 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3825 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3827 /* always enable with pattern 1 (as per spec) */
3828 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3829 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3830 I915_WRITE(intel_dp->output_reg, DP);
3831 POSTING_READ(intel_dp->output_reg);
3834 I915_WRITE(intel_dp->output_reg, DP);
3835 POSTING_READ(intel_dp->output_reg);
3837 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3838 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3839 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3842 msleep(intel_dp->panel_power_down_delay);
3848 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3850 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3851 struct drm_device *dev = dig_port->base.base.dev;
3852 struct drm_i915_private *dev_priv = dev->dev_private;
3855 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3856 sizeof(intel_dp->dpcd)) < 0)
3857 return false; /* aux transfer failed */
3859 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3861 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3862 return false; /* DPCD not present */
3864 /* Check if the panel supports PSR */
3865 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3866 if (is_edp(intel_dp)) {
3867 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3869 sizeof(intel_dp->psr_dpcd));
3870 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3871 dev_priv->psr.sink_support = true;
3872 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3875 if (INTEL_INFO(dev)->gen >= 9 &&
3876 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3877 uint8_t frame_sync_cap;
3879 dev_priv->psr.sink_support = true;
3880 intel_dp_dpcd_read_wake(&intel_dp->aux,
3881 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3882 &frame_sync_cap, 1);
3883 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3884 /* PSR2 needs frame sync as well */
3885 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3886 DRM_DEBUG_KMS("PSR2 %s on sink",
3887 dev_priv->psr.psr2_support ? "supported" : "not supported");
3891 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3892 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3893 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3895 /* Intermediate frequency support */
3896 if (is_edp(intel_dp) &&
3897 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3898 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3899 (rev >= 0x03)) { /* eDp v1.4 or higher */
3900 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3903 intel_dp_dpcd_read_wake(&intel_dp->aux,
3904 DP_SUPPORTED_LINK_RATES,
3906 sizeof(sink_rates));
3908 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3909 int val = le16_to_cpu(sink_rates[i]);
3914 /* Value read is in kHz while drm clock is saved in deca-kHz */
3915 intel_dp->sink_rates[i] = (val * 200) / 10;
3917 intel_dp->num_sink_rates = i;
3920 intel_dp_print_rates(intel_dp);
3922 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3923 DP_DWN_STRM_PORT_PRESENT))
3924 return true; /* native DP sink */
3926 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3927 return true; /* no per-port downstream info */
3929 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3930 intel_dp->downstream_ports,
3931 DP_MAX_DOWNSTREAM_PORTS) < 0)
3932 return false; /* downstream port status fetch failed */
3938 intel_dp_probe_oui(struct intel_dp *intel_dp)
3942 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3945 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3946 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3947 buf[0], buf[1], buf[2]);
3949 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3950 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3951 buf[0], buf[1], buf[2]);
3955 intel_dp_probe_mst(struct intel_dp *intel_dp)
3959 if (!intel_dp->can_mst)
3962 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3965 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3966 if (buf[0] & DP_MST_CAP) {
3967 DRM_DEBUG_KMS("Sink is MST capable\n");
3968 intel_dp->is_mst = true;
3970 DRM_DEBUG_KMS("Sink is not MST capable\n");
3971 intel_dp->is_mst = false;
3975 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3976 return intel_dp->is_mst;
3979 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3981 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3982 struct drm_device *dev = dig_port->base.base.dev;
3983 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3989 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3990 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3995 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3996 buf & ~DP_TEST_SINK_START) < 0) {
3997 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4003 intel_wait_for_vblank(dev, intel_crtc->pipe);
4005 if (drm_dp_dpcd_readb(&intel_dp->aux,
4006 DP_TEST_SINK_MISC, &buf) < 0) {
4010 count = buf & DP_TEST_COUNT_MASK;
4011 } while (--attempts && count);
4013 if (attempts == 0) {
4014 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4019 hsw_enable_ips(intel_crtc);
4023 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4025 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4026 struct drm_device *dev = dig_port->base.base.dev;
4027 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4031 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4034 if (!(buf & DP_TEST_CRC_SUPPORTED))
4037 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4040 if (buf & DP_TEST_SINK_START) {
4041 ret = intel_dp_sink_crc_stop(intel_dp);
4046 hsw_disable_ips(intel_crtc);
4048 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4049 buf | DP_TEST_SINK_START) < 0) {
4050 hsw_enable_ips(intel_crtc);
4054 intel_wait_for_vblank(dev, intel_crtc->pipe);
4058 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4060 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4061 struct drm_device *dev = dig_port->base.base.dev;
4062 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4067 ret = intel_dp_sink_crc_start(intel_dp);
4072 intel_wait_for_vblank(dev, intel_crtc->pipe);
4074 if (drm_dp_dpcd_readb(&intel_dp->aux,
4075 DP_TEST_SINK_MISC, &buf) < 0) {
4079 count = buf & DP_TEST_COUNT_MASK;
4081 } while (--attempts && count == 0);
4083 if (attempts == 0) {
4084 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4089 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4095 intel_dp_sink_crc_stop(intel_dp);
4100 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4102 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4103 DP_DEVICE_SERVICE_IRQ_VECTOR,
4104 sink_irq_vector, 1) == 1;
4108 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4112 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4114 sink_irq_vector, 14);
4121 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4123 uint8_t test_result = DP_TEST_ACK;
4127 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4129 uint8_t test_result = DP_TEST_NAK;
4133 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4135 uint8_t test_result = DP_TEST_NAK;
4136 struct intel_connector *intel_connector = intel_dp->attached_connector;
4137 struct drm_connector *connector = &intel_connector->base;
4139 if (intel_connector->detect_edid == NULL ||
4140 connector->edid_corrupt ||
4141 intel_dp->aux.i2c_defer_count > 6) {
4142 /* Check EDID read for NACKs, DEFERs and corruption
4143 * (DP CTS 1.2 Core r1.1)
4144 * 4.2.2.4 : Failed EDID read, I2C_NAK
4145 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4146 * 4.2.2.6 : EDID corruption detected
4147 * Use failsafe mode for all cases
4149 if (intel_dp->aux.i2c_nack_count > 0 ||
4150 intel_dp->aux.i2c_defer_count > 0)
4151 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4152 intel_dp->aux.i2c_nack_count,
4153 intel_dp->aux.i2c_defer_count);
4154 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4156 struct edid *block = intel_connector->detect_edid;
4158 /* We have to write the checksum
4159 * of the last block read
4161 block += intel_connector->detect_edid->extensions;
4163 if (!drm_dp_dpcd_write(&intel_dp->aux,
4164 DP_TEST_EDID_CHECKSUM,
4167 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4169 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4170 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4173 /* Set test active flag here so userspace doesn't interrupt things */
4174 intel_dp->compliance_test_active = 1;
4179 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4181 uint8_t test_result = DP_TEST_NAK;
4185 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4187 uint8_t response = DP_TEST_NAK;
4191 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4193 DRM_DEBUG_KMS("Could not read test request from sink\n");
4198 case DP_TEST_LINK_TRAINING:
4199 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4200 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4201 response = intel_dp_autotest_link_training(intel_dp);
4203 case DP_TEST_LINK_VIDEO_PATTERN:
4204 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4205 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4206 response = intel_dp_autotest_video_pattern(intel_dp);
4208 case DP_TEST_LINK_EDID_READ:
4209 DRM_DEBUG_KMS("EDID test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4211 response = intel_dp_autotest_edid(intel_dp);
4213 case DP_TEST_LINK_PHY_TEST_PATTERN:
4214 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4216 response = intel_dp_autotest_phy_pattern(intel_dp);
4219 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4224 status = drm_dp_dpcd_write(&intel_dp->aux,
4228 DRM_DEBUG_KMS("Could not write test response to sink\n");
4232 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4236 if (intel_dp->is_mst) {
4241 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4245 /* check link status - esi[10] = 0x200c */
4246 if (intel_dp->active_mst_links &&
4247 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4248 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4249 intel_dp_start_link_train(intel_dp);
4250 intel_dp_stop_link_train(intel_dp);
4253 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4254 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4257 for (retry = 0; retry < 3; retry++) {
4259 wret = drm_dp_dpcd_write(&intel_dp->aux,
4260 DP_SINK_COUNT_ESI+1,
4267 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4269 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4277 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4278 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4279 intel_dp->is_mst = false;
4280 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4281 /* send a hotplug event */
4282 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4289 * According to DP spec
4292 * 2. Configure link according to Receiver Capabilities
4293 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4294 * 4. Check link status on receipt of hot-plug interrupt
4297 intel_dp_check_link_status(struct intel_dp *intel_dp)
4299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4300 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4302 u8 link_status[DP_LINK_STATUS_SIZE];
4304 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4307 * Clearing compliance test variables to allow capturing
4308 * of values for next automated test request.
4310 intel_dp->compliance_test_active = 0;
4311 intel_dp->compliance_test_type = 0;
4312 intel_dp->compliance_test_data = 0;
4314 if (!intel_encoder->base.crtc)
4317 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4320 /* Try to read receiver status if the link appears to be up */
4321 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4325 /* Now read the DPCD to see if it's actually running */
4326 if (!intel_dp_get_dpcd(intel_dp)) {
4330 /* Try to read the source of the interrupt */
4331 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4332 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4333 /* Clear interrupt source */
4334 drm_dp_dpcd_writeb(&intel_dp->aux,
4335 DP_DEVICE_SERVICE_IRQ_VECTOR,
4338 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4339 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4340 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4341 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4344 /* if link training is requested we should perform it always */
4345 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4346 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4347 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4348 intel_encoder->base.name);
4349 intel_dp_start_link_train(intel_dp);
4350 intel_dp_stop_link_train(intel_dp);
4354 /* XXX this is probably wrong for multiple downstream ports */
4355 static enum drm_connector_status
4356 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4358 uint8_t *dpcd = intel_dp->dpcd;
4361 if (!intel_dp_get_dpcd(intel_dp))
4362 return connector_status_disconnected;
4364 /* if there's no downstream port, we're done */
4365 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4366 return connector_status_connected;
4368 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4369 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4370 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4373 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4375 return connector_status_unknown;
4377 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4378 : connector_status_disconnected;
4381 /* If no HPD, poke DDC gently */
4382 if (drm_probe_ddc(&intel_dp->aux.ddc))
4383 return connector_status_connected;
4385 /* Well we tried, say unknown for unreliable port types */
4386 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4387 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4388 if (type == DP_DS_PORT_TYPE_VGA ||
4389 type == DP_DS_PORT_TYPE_NON_EDID)
4390 return connector_status_unknown;
4392 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4393 DP_DWN_STRM_PORT_TYPE_MASK;
4394 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4395 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4396 return connector_status_unknown;
4399 /* Anything else is out of spec, warn and ignore */
4400 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4401 return connector_status_disconnected;
4404 static enum drm_connector_status
4405 edp_detect(struct intel_dp *intel_dp)
4407 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4408 enum drm_connector_status status;
4410 status = intel_panel_detect(dev);
4411 if (status == connector_status_unknown)
4412 status = connector_status_connected;
4417 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4418 struct intel_digital_port *port)
4422 switch (port->port) {
4426 bit = SDE_PORTB_HOTPLUG;
4429 bit = SDE_PORTC_HOTPLUG;
4432 bit = SDE_PORTD_HOTPLUG;
4435 MISSING_CASE(port->port);
4439 return I915_READ(SDEISR) & bit;
4442 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4443 struct intel_digital_port *port)
4447 switch (port->port) {
4451 bit = SDE_PORTB_HOTPLUG_CPT;
4454 bit = SDE_PORTC_HOTPLUG_CPT;
4457 bit = SDE_PORTD_HOTPLUG_CPT;
4460 bit = SDE_PORTE_HOTPLUG_SPT;
4463 MISSING_CASE(port->port);
4467 return I915_READ(SDEISR) & bit;
4470 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4471 struct intel_digital_port *port)
4475 switch (port->port) {
4477 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4480 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4483 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4486 MISSING_CASE(port->port);
4490 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4493 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4494 struct intel_digital_port *port)
4498 switch (port->port) {
4500 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4503 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4506 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4509 MISSING_CASE(port->port);
4513 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4516 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4517 struct intel_digital_port *intel_dig_port)
4519 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4523 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4526 bit = BXT_DE_PORT_HP_DDIA;
4529 bit = BXT_DE_PORT_HP_DDIB;
4532 bit = BXT_DE_PORT_HP_DDIC;
4539 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4543 * intel_digital_port_connected - is the specified port connected?
4544 * @dev_priv: i915 private structure
4545 * @port: the port to test
4547 * Return %true if @port is connected, %false otherwise.
4549 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4550 struct intel_digital_port *port)
4552 if (HAS_PCH_IBX(dev_priv))
4553 return ibx_digital_port_connected(dev_priv, port);
4554 if (HAS_PCH_SPLIT(dev_priv))
4555 return cpt_digital_port_connected(dev_priv, port);
4556 else if (IS_BROXTON(dev_priv))
4557 return bxt_digital_port_connected(dev_priv, port);
4558 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4559 return vlv_digital_port_connected(dev_priv, port);
4561 return g4x_digital_port_connected(dev_priv, port);
4564 static struct edid *
4565 intel_dp_get_edid(struct intel_dp *intel_dp)
4567 struct intel_connector *intel_connector = intel_dp->attached_connector;
4569 /* use cached edid if we have one */
4570 if (intel_connector->edid) {
4572 if (IS_ERR(intel_connector->edid))
4575 return drm_edid_duplicate(intel_connector->edid);
4577 return drm_get_edid(&intel_connector->base,
4578 &intel_dp->aux.ddc);
4582 intel_dp_set_edid(struct intel_dp *intel_dp)
4584 struct intel_connector *intel_connector = intel_dp->attached_connector;
4587 edid = intel_dp_get_edid(intel_dp);
4588 intel_connector->detect_edid = edid;
4590 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4591 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4593 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4597 intel_dp_unset_edid(struct intel_dp *intel_dp)
4599 struct intel_connector *intel_connector = intel_dp->attached_connector;
4601 kfree(intel_connector->detect_edid);
4602 intel_connector->detect_edid = NULL;
4604 intel_dp->has_audio = false;
4607 static enum drm_connector_status
4608 intel_dp_detect(struct drm_connector *connector, bool force)
4610 struct intel_dp *intel_dp = intel_attached_dp(connector);
4611 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4612 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4613 struct drm_device *dev = connector->dev;
4614 enum drm_connector_status status;
4615 enum intel_display_power_domain power_domain;
4619 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4620 connector->base.id, connector->name);
4621 intel_dp_unset_edid(intel_dp);
4623 if (intel_dp->is_mst) {
4624 /* MST devices are disconnected from a monitor POV */
4625 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4626 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4627 return connector_status_disconnected;
4630 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4631 intel_display_power_get(to_i915(dev), power_domain);
4633 /* Can't disconnect eDP, but you can close the lid... */
4634 if (is_edp(intel_dp))
4635 status = edp_detect(intel_dp);
4636 else if (intel_digital_port_connected(to_i915(dev),
4637 dp_to_dig_port(intel_dp)))
4638 status = intel_dp_detect_dpcd(intel_dp);
4640 status = connector_status_disconnected;
4642 if (status != connector_status_connected) {
4643 intel_dp->compliance_test_active = 0;
4644 intel_dp->compliance_test_type = 0;
4645 intel_dp->compliance_test_data = 0;
4650 intel_dp_probe_oui(intel_dp);
4652 ret = intel_dp_probe_mst(intel_dp);
4654 /* if we are in MST mode then this connector
4655 won't appear connected or have anything with EDID on it */
4656 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4657 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4658 status = connector_status_disconnected;
4663 * Clearing NACK and defer counts to get their exact values
4664 * while reading EDID which are required by Compliance tests
4665 * 4.2.2.4 and 4.2.2.5
4667 intel_dp->aux.i2c_nack_count = 0;
4668 intel_dp->aux.i2c_defer_count = 0;
4670 intel_dp_set_edid(intel_dp);
4672 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4673 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4674 status = connector_status_connected;
4676 /* Try to read the source of the interrupt */
4677 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4678 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4679 /* Clear interrupt source */
4680 drm_dp_dpcd_writeb(&intel_dp->aux,
4681 DP_DEVICE_SERVICE_IRQ_VECTOR,
4684 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4685 intel_dp_handle_test_request(intel_dp);
4686 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4687 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4691 intel_display_power_put(to_i915(dev), power_domain);
4696 intel_dp_force(struct drm_connector *connector)
4698 struct intel_dp *intel_dp = intel_attached_dp(connector);
4699 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4700 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4701 enum intel_display_power_domain power_domain;
4703 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4704 connector->base.id, connector->name);
4705 intel_dp_unset_edid(intel_dp);
4707 if (connector->status != connector_status_connected)
4710 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4711 intel_display_power_get(dev_priv, power_domain);
4713 intel_dp_set_edid(intel_dp);
4715 intel_display_power_put(dev_priv, power_domain);
4717 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4718 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4721 static int intel_dp_get_modes(struct drm_connector *connector)
4723 struct intel_connector *intel_connector = to_intel_connector(connector);
4726 edid = intel_connector->detect_edid;
4728 int ret = intel_connector_update_modes(connector, edid);
4733 /* if eDP has no EDID, fall back to fixed mode */
4734 if (is_edp(intel_attached_dp(connector)) &&
4735 intel_connector->panel.fixed_mode) {
4736 struct drm_display_mode *mode;
4738 mode = drm_mode_duplicate(connector->dev,
4739 intel_connector->panel.fixed_mode);
4741 drm_mode_probed_add(connector, mode);
4750 intel_dp_detect_audio(struct drm_connector *connector)
4752 bool has_audio = false;
4755 edid = to_intel_connector(connector)->detect_edid;
4757 has_audio = drm_detect_monitor_audio(edid);
4763 intel_dp_set_property(struct drm_connector *connector,
4764 struct drm_property *property,
4767 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4768 struct intel_connector *intel_connector = to_intel_connector(connector);
4769 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4770 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4773 ret = drm_object_property_set_value(&connector->base, property, val);
4777 if (property == dev_priv->force_audio_property) {
4781 if (i == intel_dp->force_audio)
4784 intel_dp->force_audio = i;
4786 if (i == HDMI_AUDIO_AUTO)
4787 has_audio = intel_dp_detect_audio(connector);
4789 has_audio = (i == HDMI_AUDIO_ON);
4791 if (has_audio == intel_dp->has_audio)
4794 intel_dp->has_audio = has_audio;
4798 if (property == dev_priv->broadcast_rgb_property) {
4799 bool old_auto = intel_dp->color_range_auto;
4800 bool old_range = intel_dp->limited_color_range;
4803 case INTEL_BROADCAST_RGB_AUTO:
4804 intel_dp->color_range_auto = true;
4806 case INTEL_BROADCAST_RGB_FULL:
4807 intel_dp->color_range_auto = false;
4808 intel_dp->limited_color_range = false;
4810 case INTEL_BROADCAST_RGB_LIMITED:
4811 intel_dp->color_range_auto = false;
4812 intel_dp->limited_color_range = true;
4818 if (old_auto == intel_dp->color_range_auto &&
4819 old_range == intel_dp->limited_color_range)
4825 if (is_edp(intel_dp) &&
4826 property == connector->dev->mode_config.scaling_mode_property) {
4827 if (val == DRM_MODE_SCALE_NONE) {
4828 DRM_DEBUG_KMS("no scaling not supported\n");
4832 if (intel_connector->panel.fitting_mode == val) {
4833 /* the eDP scaling property is not changed */
4836 intel_connector->panel.fitting_mode = val;
4844 if (intel_encoder->base.crtc)
4845 intel_crtc_restore_mode(intel_encoder->base.crtc);
4851 intel_dp_connector_destroy(struct drm_connector *connector)
4853 struct intel_connector *intel_connector = to_intel_connector(connector);
4855 kfree(intel_connector->detect_edid);
4857 if (!IS_ERR_OR_NULL(intel_connector->edid))
4858 kfree(intel_connector->edid);
4860 /* Can't call is_edp() since the encoder may have been destroyed
4862 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4863 intel_panel_fini(&intel_connector->panel);
4865 drm_connector_cleanup(connector);
4869 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4871 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4872 struct intel_dp *intel_dp = &intel_dig_port->dp;
4874 intel_dp_aux_fini(intel_dp);
4875 intel_dp_mst_encoder_cleanup(intel_dig_port);
4876 if (is_edp(intel_dp)) {
4877 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4879 * vdd might still be enabled do to the delayed vdd off.
4880 * Make sure vdd is actually turned off here.
4883 edp_panel_vdd_off_sync(intel_dp);
4884 pps_unlock(intel_dp);
4886 if (intel_dp->edp_notifier.notifier_call) {
4887 unregister_reboot_notifier(&intel_dp->edp_notifier);
4888 intel_dp->edp_notifier.notifier_call = NULL;
4891 drm_encoder_cleanup(encoder);
4892 kfree(intel_dig_port);
4895 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4897 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4899 if (!is_edp(intel_dp))
4903 * vdd might still be enabled do to the delayed vdd off.
4904 * Make sure vdd is actually turned off here.
4906 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4908 edp_panel_vdd_off_sync(intel_dp);
4909 pps_unlock(intel_dp);
4912 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4914 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4915 struct drm_device *dev = intel_dig_port->base.base.dev;
4916 struct drm_i915_private *dev_priv = dev->dev_private;
4917 enum intel_display_power_domain power_domain;
4919 lockdep_assert_held(&dev_priv->pps_mutex);
4921 if (!edp_have_panel_vdd(intel_dp))
4925 * The VDD bit needs a power domain reference, so if the bit is
4926 * already enabled when we boot or resume, grab this reference and
4927 * schedule a vdd off, so we don't hold on to the reference
4930 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4931 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4932 intel_display_power_get(dev_priv, power_domain);
4934 edp_panel_vdd_schedule_off(intel_dp);
4937 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4939 struct intel_dp *intel_dp;
4941 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4944 intel_dp = enc_to_intel_dp(encoder);
4949 * Read out the current power sequencer assignment,
4950 * in case the BIOS did something with it.
4952 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4953 vlv_initial_power_sequencer_setup(intel_dp);
4955 intel_edp_panel_vdd_sanitize(intel_dp);
4957 pps_unlock(intel_dp);
4960 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4961 .dpms = drm_atomic_helper_connector_dpms,
4962 .detect = intel_dp_detect,
4963 .force = intel_dp_force,
4964 .fill_modes = drm_helper_probe_single_connector_modes,
4965 .set_property = intel_dp_set_property,
4966 .atomic_get_property = intel_connector_atomic_get_property,
4967 .destroy = intel_dp_connector_destroy,
4968 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4969 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4972 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4973 .get_modes = intel_dp_get_modes,
4974 .mode_valid = intel_dp_mode_valid,
4975 .best_encoder = intel_best_encoder,
4978 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4979 .reset = intel_dp_encoder_reset,
4980 .destroy = intel_dp_encoder_destroy,
4984 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4986 struct intel_dp *intel_dp = &intel_dig_port->dp;
4987 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4988 struct drm_device *dev = intel_dig_port->base.base.dev;
4989 struct drm_i915_private *dev_priv = dev->dev_private;
4990 enum intel_display_power_domain power_domain;
4991 enum irqreturn ret = IRQ_NONE;
4993 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4994 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4995 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4997 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4999 * vdd off can generate a long pulse on eDP which
5000 * would require vdd on to handle it, and thus we
5001 * would end up in an endless cycle of
5002 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5004 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5005 port_name(intel_dig_port->port));
5009 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5010 port_name(intel_dig_port->port),
5011 long_hpd ? "long" : "short");
5013 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5014 intel_display_power_get(dev_priv, power_domain);
5017 /* indicate that we need to restart link training */
5018 intel_dp->train_set_valid = false;
5020 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5023 if (!intel_dp_get_dpcd(intel_dp)) {
5027 intel_dp_probe_oui(intel_dp);
5029 if (!intel_dp_probe_mst(intel_dp)) {
5030 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5031 intel_dp_check_link_status(intel_dp);
5032 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5036 if (intel_dp->is_mst) {
5037 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5041 if (!intel_dp->is_mst) {
5042 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5043 intel_dp_check_link_status(intel_dp);
5044 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5052 /* if we were in MST mode, and device is not there get out of MST mode */
5053 if (intel_dp->is_mst) {
5054 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5055 intel_dp->is_mst = false;
5056 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5059 intel_display_power_put(dev_priv, power_domain);
5064 /* check the VBT to see whether the eDP is on another port */
5065 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5067 struct drm_i915_private *dev_priv = dev->dev_private;
5068 union child_device_config *p_child;
5070 static const short port_mapping[] = {
5071 [PORT_B] = DVO_PORT_DPB,
5072 [PORT_C] = DVO_PORT_DPC,
5073 [PORT_D] = DVO_PORT_DPD,
5074 [PORT_E] = DVO_PORT_DPE,
5078 * eDP not supported on g4x. so bail out early just
5079 * for a bit extra safety in case the VBT is bonkers.
5081 if (INTEL_INFO(dev)->gen < 5)
5087 if (!dev_priv->vbt.child_dev_num)
5090 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5091 p_child = dev_priv->vbt.child_dev + i;
5093 if (p_child->common.dvo_port == port_mapping[port] &&
5094 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5095 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5102 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5104 struct intel_connector *intel_connector = to_intel_connector(connector);
5106 intel_attach_force_audio_property(connector);
5107 intel_attach_broadcast_rgb_property(connector);
5108 intel_dp->color_range_auto = true;
5110 if (is_edp(intel_dp)) {
5111 drm_mode_create_scaling_mode_property(connector->dev);
5112 drm_object_attach_property(
5114 connector->dev->mode_config.scaling_mode_property,
5115 DRM_MODE_SCALE_ASPECT);
5116 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5120 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5122 intel_dp->last_power_cycle = jiffies;
5123 intel_dp->last_power_on = jiffies;
5124 intel_dp->last_backlight_off = jiffies;
5128 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5129 struct intel_dp *intel_dp)
5131 struct drm_i915_private *dev_priv = dev->dev_private;
5132 struct edp_power_seq cur, vbt, spec,
5133 *final = &intel_dp->pps_delays;
5134 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5135 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5137 lockdep_assert_held(&dev_priv->pps_mutex);
5139 /* already initialized? */
5140 if (final->t11_t12 != 0)
5143 if (IS_BROXTON(dev)) {
5145 * TODO: BXT has 2 sets of PPS registers.
5146 * Correct Register for Broxton need to be identified
5147 * using VBT. hardcoding for now
5149 pp_ctrl_reg = BXT_PP_CONTROL(0);
5150 pp_on_reg = BXT_PP_ON_DELAYS(0);
5151 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5152 } else if (HAS_PCH_SPLIT(dev)) {
5153 pp_ctrl_reg = PCH_PP_CONTROL;
5154 pp_on_reg = PCH_PP_ON_DELAYS;
5155 pp_off_reg = PCH_PP_OFF_DELAYS;
5156 pp_div_reg = PCH_PP_DIVISOR;
5158 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5160 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5161 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5162 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5163 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5166 /* Workaround: Need to write PP_CONTROL with the unlock key as
5167 * the very first thing. */
5168 pp_ctl = ironlake_get_pp_control(intel_dp);
5170 pp_on = I915_READ(pp_on_reg);
5171 pp_off = I915_READ(pp_off_reg);
5172 if (!IS_BROXTON(dev)) {
5173 I915_WRITE(pp_ctrl_reg, pp_ctl);
5174 pp_div = I915_READ(pp_div_reg);
5177 /* Pull timing values out of registers */
5178 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5179 PANEL_POWER_UP_DELAY_SHIFT;
5181 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5182 PANEL_LIGHT_ON_DELAY_SHIFT;
5184 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5185 PANEL_LIGHT_OFF_DELAY_SHIFT;
5187 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5188 PANEL_POWER_DOWN_DELAY_SHIFT;
5190 if (IS_BROXTON(dev)) {
5191 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5192 BXT_POWER_CYCLE_DELAY_SHIFT;
5194 cur.t11_t12 = (tmp - 1) * 1000;
5198 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5199 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5202 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5203 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5205 vbt = dev_priv->vbt.edp_pps;
5207 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5208 * our hw here, which are all in 100usec. */
5209 spec.t1_t3 = 210 * 10;
5210 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5211 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5212 spec.t10 = 500 * 10;
5213 /* This one is special and actually in units of 100ms, but zero
5214 * based in the hw (so we need to add 100 ms). But the sw vbt
5215 * table multiplies it with 1000 to make it in units of 100usec,
5217 spec.t11_t12 = (510 + 100) * 10;
5219 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5220 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5222 /* Use the max of the register settings and vbt. If both are
5223 * unset, fall back to the spec limits. */
5224 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5226 max(cur.field, vbt.field))
5227 assign_final(t1_t3);
5231 assign_final(t11_t12);
5234 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5235 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5236 intel_dp->backlight_on_delay = get_delay(t8);
5237 intel_dp->backlight_off_delay = get_delay(t9);
5238 intel_dp->panel_power_down_delay = get_delay(t10);
5239 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5242 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5243 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5244 intel_dp->panel_power_cycle_delay);
5246 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5247 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5251 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5252 struct intel_dp *intel_dp)
5254 struct drm_i915_private *dev_priv = dev->dev_private;
5255 u32 pp_on, pp_off, pp_div, port_sel = 0;
5256 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5257 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5258 enum port port = dp_to_dig_port(intel_dp)->port;
5259 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5261 lockdep_assert_held(&dev_priv->pps_mutex);
5263 if (IS_BROXTON(dev)) {
5265 * TODO: BXT has 2 sets of PPS registers.
5266 * Correct Register for Broxton need to be identified
5267 * using VBT. hardcoding for now
5269 pp_ctrl_reg = BXT_PP_CONTROL(0);
5270 pp_on_reg = BXT_PP_ON_DELAYS(0);
5271 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5273 } else if (HAS_PCH_SPLIT(dev)) {
5274 pp_on_reg = PCH_PP_ON_DELAYS;
5275 pp_off_reg = PCH_PP_OFF_DELAYS;
5276 pp_div_reg = PCH_PP_DIVISOR;
5278 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5280 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5281 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5282 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5286 * And finally store the new values in the power sequencer. The
5287 * backlight delays are set to 1 because we do manual waits on them. For
5288 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5289 * we'll end up waiting for the backlight off delay twice: once when we
5290 * do the manual sleep, and once when we disable the panel and wait for
5291 * the PP_STATUS bit to become zero.
5293 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5294 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5295 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5296 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5297 /* Compute the divisor for the pp clock, simply match the Bspec
5299 if (IS_BROXTON(dev)) {
5300 pp_div = I915_READ(pp_ctrl_reg);
5301 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5302 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5303 << BXT_POWER_CYCLE_DELAY_SHIFT);
5305 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5306 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5307 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5310 /* Haswell doesn't have any port selection bits for the panel
5311 * power sequencer any more. */
5312 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5313 port_sel = PANEL_PORT_SELECT_VLV(port);
5314 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5316 port_sel = PANEL_PORT_SELECT_DPA;
5318 port_sel = PANEL_PORT_SELECT_DPD;
5323 I915_WRITE(pp_on_reg, pp_on);
5324 I915_WRITE(pp_off_reg, pp_off);
5325 if (IS_BROXTON(dev))
5326 I915_WRITE(pp_ctrl_reg, pp_div);
5328 I915_WRITE(pp_div_reg, pp_div);
5330 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5331 I915_READ(pp_on_reg),
5332 I915_READ(pp_off_reg),
5334 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5335 I915_READ(pp_div_reg));
5339 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5341 * @refresh_rate: RR to be programmed
5343 * This function gets called when refresh rate (RR) has to be changed from
5344 * one frequency to another. Switches can be between high and low RR
5345 * supported by the panel or to any other RR based on media playback (in
5346 * this case, RR value needs to be passed from user space).
5348 * The caller of this function needs to take a lock on dev_priv->drrs.
5350 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5352 struct drm_i915_private *dev_priv = dev->dev_private;
5353 struct intel_encoder *encoder;
5354 struct intel_digital_port *dig_port = NULL;
5355 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5356 struct intel_crtc_state *config = NULL;
5357 struct intel_crtc *intel_crtc = NULL;
5358 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5360 if (refresh_rate <= 0) {
5361 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5365 if (intel_dp == NULL) {
5366 DRM_DEBUG_KMS("DRRS not supported.\n");
5371 * FIXME: This needs proper synchronization with psr state for some
5372 * platforms that cannot have PSR and DRRS enabled at the same time.
5375 dig_port = dp_to_dig_port(intel_dp);
5376 encoder = &dig_port->base;
5377 intel_crtc = to_intel_crtc(encoder->base.crtc);
5380 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5384 config = intel_crtc->config;
5386 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5387 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5391 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5393 index = DRRS_LOW_RR;
5395 if (index == dev_priv->drrs.refresh_rate_type) {
5397 "DRRS requested for previously set RR...ignoring\n");
5401 if (!intel_crtc->active) {
5402 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5406 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5409 intel_dp_set_m_n(intel_crtc, M1_N1);
5412 intel_dp_set_m_n(intel_crtc, M2_N2);
5416 DRM_ERROR("Unsupported refreshrate type\n");
5418 } else if (INTEL_INFO(dev)->gen > 6) {
5419 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5422 val = I915_READ(reg);
5423 if (index > DRRS_HIGH_RR) {
5424 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5425 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5427 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5429 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5430 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5432 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5434 I915_WRITE(reg, val);
5437 dev_priv->drrs.refresh_rate_type = index;
5439 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5443 * intel_edp_drrs_enable - init drrs struct if supported
5444 * @intel_dp: DP struct
5446 * Initializes frontbuffer_bits and drrs.dp
5448 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5450 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5451 struct drm_i915_private *dev_priv = dev->dev_private;
5452 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5453 struct drm_crtc *crtc = dig_port->base.base.crtc;
5454 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5456 if (!intel_crtc->config->has_drrs) {
5457 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5461 mutex_lock(&dev_priv->drrs.mutex);
5462 if (WARN_ON(dev_priv->drrs.dp)) {
5463 DRM_ERROR("DRRS already enabled\n");
5467 dev_priv->drrs.busy_frontbuffer_bits = 0;
5469 dev_priv->drrs.dp = intel_dp;
5472 mutex_unlock(&dev_priv->drrs.mutex);
5476 * intel_edp_drrs_disable - Disable DRRS
5477 * @intel_dp: DP struct
5480 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5482 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5483 struct drm_i915_private *dev_priv = dev->dev_private;
5484 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5485 struct drm_crtc *crtc = dig_port->base.base.crtc;
5486 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5488 if (!intel_crtc->config->has_drrs)
5491 mutex_lock(&dev_priv->drrs.mutex);
5492 if (!dev_priv->drrs.dp) {
5493 mutex_unlock(&dev_priv->drrs.mutex);
5497 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5498 intel_dp_set_drrs_state(dev_priv->dev,
5499 intel_dp->attached_connector->panel.
5500 fixed_mode->vrefresh);
5502 dev_priv->drrs.dp = NULL;
5503 mutex_unlock(&dev_priv->drrs.mutex);
5505 cancel_delayed_work_sync(&dev_priv->drrs.work);
5508 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5510 struct drm_i915_private *dev_priv =
5511 container_of(work, typeof(*dev_priv), drrs.work.work);
5512 struct intel_dp *intel_dp;
5514 mutex_lock(&dev_priv->drrs.mutex);
5516 intel_dp = dev_priv->drrs.dp;
5522 * The delayed work can race with an invalidate hence we need to
5526 if (dev_priv->drrs.busy_frontbuffer_bits)
5529 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5530 intel_dp_set_drrs_state(dev_priv->dev,
5531 intel_dp->attached_connector->panel.
5532 downclock_mode->vrefresh);
5535 mutex_unlock(&dev_priv->drrs.mutex);
5539 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5541 * @frontbuffer_bits: frontbuffer plane tracking bits
5543 * This function gets called everytime rendering on the given planes start.
5544 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5546 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5548 void intel_edp_drrs_invalidate(struct drm_device *dev,
5549 unsigned frontbuffer_bits)
5551 struct drm_i915_private *dev_priv = dev->dev_private;
5552 struct drm_crtc *crtc;
5555 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5558 cancel_delayed_work(&dev_priv->drrs.work);
5560 mutex_lock(&dev_priv->drrs.mutex);
5561 if (!dev_priv->drrs.dp) {
5562 mutex_unlock(&dev_priv->drrs.mutex);
5566 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5567 pipe = to_intel_crtc(crtc)->pipe;
5569 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5570 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5572 /* invalidate means busy screen hence upclock */
5573 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5574 intel_dp_set_drrs_state(dev_priv->dev,
5575 dev_priv->drrs.dp->attached_connector->panel.
5576 fixed_mode->vrefresh);
5578 mutex_unlock(&dev_priv->drrs.mutex);
5582 * intel_edp_drrs_flush - Restart Idleness DRRS
5584 * @frontbuffer_bits: frontbuffer plane tracking bits
5586 * This function gets called every time rendering on the given planes has
5587 * completed or flip on a crtc is completed. So DRRS should be upclocked
5588 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5589 * if no other planes are dirty.
5591 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5593 void intel_edp_drrs_flush(struct drm_device *dev,
5594 unsigned frontbuffer_bits)
5596 struct drm_i915_private *dev_priv = dev->dev_private;
5597 struct drm_crtc *crtc;
5600 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5603 cancel_delayed_work(&dev_priv->drrs.work);
5605 mutex_lock(&dev_priv->drrs.mutex);
5606 if (!dev_priv->drrs.dp) {
5607 mutex_unlock(&dev_priv->drrs.mutex);
5611 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5612 pipe = to_intel_crtc(crtc)->pipe;
5614 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5615 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5617 /* flush means busy screen hence upclock */
5618 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5619 intel_dp_set_drrs_state(dev_priv->dev,
5620 dev_priv->drrs.dp->attached_connector->panel.
5621 fixed_mode->vrefresh);
5624 * flush also means no more activity hence schedule downclock, if all
5625 * other fbs are quiescent too
5627 if (!dev_priv->drrs.busy_frontbuffer_bits)
5628 schedule_delayed_work(&dev_priv->drrs.work,
5629 msecs_to_jiffies(1000));
5630 mutex_unlock(&dev_priv->drrs.mutex);
5634 * DOC: Display Refresh Rate Switching (DRRS)
5636 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5637 * which enables swtching between low and high refresh rates,
5638 * dynamically, based on the usage scenario. This feature is applicable
5639 * for internal panels.
5641 * Indication that the panel supports DRRS is given by the panel EDID, which
5642 * would list multiple refresh rates for one resolution.
5644 * DRRS is of 2 types - static and seamless.
5645 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5646 * (may appear as a blink on screen) and is used in dock-undock scenario.
5647 * Seamless DRRS involves changing RR without any visual effect to the user
5648 * and can be used during normal system usage. This is done by programming
5649 * certain registers.
5651 * Support for static/seamless DRRS may be indicated in the VBT based on
5652 * inputs from the panel spec.
5654 * DRRS saves power by switching to low RR based on usage scenarios.
5657 * The implementation is based on frontbuffer tracking implementation.
5658 * When there is a disturbance on the screen triggered by user activity or a
5659 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5660 * When there is no movement on screen, after a timeout of 1 second, a switch
5661 * to low RR is made.
5662 * For integration with frontbuffer tracking code,
5663 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5665 * DRRS can be further extended to support other internal panels and also
5666 * the scenario of video playback wherein RR is set based on the rate
5667 * requested by userspace.
5671 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5672 * @intel_connector: eDP connector
5673 * @fixed_mode: preferred mode of panel
5675 * This function is called only once at driver load to initialize basic
5679 * Downclock mode if panel supports it, else return NULL.
5680 * DRRS support is determined by the presence of downclock mode (apart
5681 * from VBT setting).
5683 static struct drm_display_mode *
5684 intel_dp_drrs_init(struct intel_connector *intel_connector,
5685 struct drm_display_mode *fixed_mode)
5687 struct drm_connector *connector = &intel_connector->base;
5688 struct drm_device *dev = connector->dev;
5689 struct drm_i915_private *dev_priv = dev->dev_private;
5690 struct drm_display_mode *downclock_mode = NULL;
5692 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5693 mutex_init(&dev_priv->drrs.mutex);
5695 if (INTEL_INFO(dev)->gen <= 6) {
5696 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5700 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5701 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5705 downclock_mode = intel_find_panel_downclock
5706 (dev, fixed_mode, connector);
5708 if (!downclock_mode) {
5709 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5713 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5715 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5716 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5717 return downclock_mode;
5720 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5721 struct intel_connector *intel_connector)
5723 struct drm_connector *connector = &intel_connector->base;
5724 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5725 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5726 struct drm_device *dev = intel_encoder->base.dev;
5727 struct drm_i915_private *dev_priv = dev->dev_private;
5728 struct drm_display_mode *fixed_mode = NULL;
5729 struct drm_display_mode *downclock_mode = NULL;
5731 struct drm_display_mode *scan;
5733 enum pipe pipe = INVALID_PIPE;
5735 if (!is_edp(intel_dp))
5739 intel_edp_panel_vdd_sanitize(intel_dp);
5740 pps_unlock(intel_dp);
5742 /* Cache DPCD and EDID for edp. */
5743 has_dpcd = intel_dp_get_dpcd(intel_dp);
5746 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5747 dev_priv->no_aux_handshake =
5748 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5749 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5751 /* if this fails, presume the device is a ghost */
5752 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5756 /* We now know it's not a ghost, init power sequence regs. */
5758 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5759 pps_unlock(intel_dp);
5761 mutex_lock(&dev->mode_config.mutex);
5762 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5764 if (drm_add_edid_modes(connector, edid)) {
5765 drm_mode_connector_update_edid_property(connector,
5767 drm_edid_to_eld(connector, edid);
5770 edid = ERR_PTR(-EINVAL);
5773 edid = ERR_PTR(-ENOENT);
5775 intel_connector->edid = edid;
5777 /* prefer fixed mode from EDID if available */
5778 list_for_each_entry(scan, &connector->probed_modes, head) {
5779 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5780 fixed_mode = drm_mode_duplicate(dev, scan);
5781 downclock_mode = intel_dp_drrs_init(
5782 intel_connector, fixed_mode);
5787 /* fallback to VBT if available for eDP */
5788 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5789 fixed_mode = drm_mode_duplicate(dev,
5790 dev_priv->vbt.lfp_lvds_vbt_mode);
5792 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5794 mutex_unlock(&dev->mode_config.mutex);
5796 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5797 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5798 register_reboot_notifier(&intel_dp->edp_notifier);
5801 * Figure out the current pipe for the initial backlight setup.
5802 * If the current pipe isn't valid, try the PPS pipe, and if that
5803 * fails just assume pipe A.
5805 if (IS_CHERRYVIEW(dev))
5806 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5808 pipe = PORT_TO_PIPE(intel_dp->DP);
5810 if (pipe != PIPE_A && pipe != PIPE_B)
5811 pipe = intel_dp->pps_pipe;
5813 if (pipe != PIPE_A && pipe != PIPE_B)
5816 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5820 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5821 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5822 intel_panel_setup_backlight(connector, pipe);
5828 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5829 struct intel_connector *intel_connector)
5831 struct drm_connector *connector = &intel_connector->base;
5832 struct intel_dp *intel_dp = &intel_dig_port->dp;
5833 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5834 struct drm_device *dev = intel_encoder->base.dev;
5835 struct drm_i915_private *dev_priv = dev->dev_private;
5836 enum port port = intel_dig_port->port;
5839 if (WARN(intel_dig_port->max_lanes < 1,
5840 "Not enough lanes (%d) for DP on port %c\n",
5841 intel_dig_port->max_lanes, port_name(port)))
5844 intel_dp->pps_pipe = INVALID_PIPE;
5846 /* intel_dp vfuncs */
5847 if (INTEL_INFO(dev)->gen >= 9)
5848 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5849 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5850 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5851 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5852 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5853 else if (HAS_PCH_SPLIT(dev))
5854 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5856 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5858 if (INTEL_INFO(dev)->gen >= 9)
5859 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5861 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5864 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5866 /* Preserve the current hw state. */
5867 intel_dp->DP = I915_READ(intel_dp->output_reg);
5868 intel_dp->attached_connector = intel_connector;
5870 if (intel_dp_is_edp(dev, port))
5871 type = DRM_MODE_CONNECTOR_eDP;
5873 type = DRM_MODE_CONNECTOR_DisplayPort;
5876 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5877 * for DP the encoder type can be set by the caller to
5878 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5880 if (type == DRM_MODE_CONNECTOR_eDP)
5881 intel_encoder->type = INTEL_OUTPUT_EDP;
5883 /* eDP only on port B and/or C on vlv/chv */
5884 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5885 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5888 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5889 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5892 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5893 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5895 connector->interlace_allowed = true;
5896 connector->doublescan_allowed = 0;
5898 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5899 edp_panel_vdd_work);
5901 intel_connector_attach_encoder(intel_connector, intel_encoder);
5902 drm_connector_register(connector);
5905 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5907 intel_connector->get_hw_state = intel_connector_get_hw_state;
5908 intel_connector->unregister = intel_dp_connector_unregister;
5910 /* Set up the hotplug pin. */
5913 intel_encoder->hpd_pin = HPD_PORT_A;
5916 intel_encoder->hpd_pin = HPD_PORT_B;
5917 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5918 intel_encoder->hpd_pin = HPD_PORT_A;
5921 intel_encoder->hpd_pin = HPD_PORT_C;
5924 intel_encoder->hpd_pin = HPD_PORT_D;
5927 intel_encoder->hpd_pin = HPD_PORT_E;
5933 if (is_edp(intel_dp)) {
5935 intel_dp_init_panel_power_timestamps(intel_dp);
5936 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5937 vlv_initial_power_sequencer_setup(intel_dp);
5939 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5940 pps_unlock(intel_dp);
5943 ret = intel_dp_aux_init(intel_dp, intel_connector);
5947 /* init MST on ports that can support it */
5948 if (HAS_DP_MST(dev) &&
5949 (port == PORT_B || port == PORT_C || port == PORT_D))
5950 intel_dp_mst_encoder_init(intel_dig_port,
5951 intel_connector->base.base.id);
5953 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5954 intel_dp_aux_fini(intel_dp);
5955 intel_dp_mst_encoder_cleanup(intel_dig_port);
5959 intel_dp_add_properties(intel_dp, connector);
5961 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5962 * 0xd. Failure to do so will result in spurious interrupts being
5963 * generated on the port when a cable is not attached.
5965 if (IS_G4X(dev) && !IS_GM45(dev)) {
5966 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5967 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5970 i915_debugfs_connector_add(connector);
5975 if (is_edp(intel_dp)) {
5976 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5978 * vdd might still be enabled do to the delayed vdd off.
5979 * Make sure vdd is actually turned off here.
5982 edp_panel_vdd_off_sync(intel_dp);
5983 pps_unlock(intel_dp);
5985 drm_connector_unregister(connector);
5986 drm_connector_cleanup(connector);
5992 intel_dp_init(struct drm_device *dev,
5993 i915_reg_t output_reg, enum port port)
5995 struct drm_i915_private *dev_priv = dev->dev_private;
5996 struct intel_digital_port *intel_dig_port;
5997 struct intel_encoder *intel_encoder;
5998 struct drm_encoder *encoder;
5999 struct intel_connector *intel_connector;
6001 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6002 if (!intel_dig_port)
6005 intel_connector = intel_connector_alloc();
6006 if (!intel_connector)
6007 goto err_connector_alloc;
6009 intel_encoder = &intel_dig_port->base;
6010 encoder = &intel_encoder->base;
6012 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6013 DRM_MODE_ENCODER_TMDS, NULL))
6014 goto err_encoder_init;
6016 intel_encoder->compute_config = intel_dp_compute_config;
6017 intel_encoder->disable = intel_disable_dp;
6018 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6019 intel_encoder->get_config = intel_dp_get_config;
6020 intel_encoder->suspend = intel_dp_encoder_suspend;
6021 if (IS_CHERRYVIEW(dev)) {
6022 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6023 intel_encoder->pre_enable = chv_pre_enable_dp;
6024 intel_encoder->enable = vlv_enable_dp;
6025 intel_encoder->post_disable = chv_post_disable_dp;
6026 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6027 } else if (IS_VALLEYVIEW(dev)) {
6028 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6029 intel_encoder->pre_enable = vlv_pre_enable_dp;
6030 intel_encoder->enable = vlv_enable_dp;
6031 intel_encoder->post_disable = vlv_post_disable_dp;
6033 intel_encoder->pre_enable = g4x_pre_enable_dp;
6034 intel_encoder->enable = g4x_enable_dp;
6035 if (INTEL_INFO(dev)->gen >= 5)
6036 intel_encoder->post_disable = ilk_post_disable_dp;
6039 intel_dig_port->port = port;
6040 dev_priv->dig_port_map[port] = intel_encoder;
6041 intel_dig_port->dp.output_reg = output_reg;
6042 intel_dig_port->max_lanes = 4;
6044 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6045 if (IS_CHERRYVIEW(dev)) {
6047 intel_encoder->crtc_mask = 1 << 2;
6049 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6051 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6053 intel_encoder->cloneable = 0;
6055 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6056 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6058 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6059 goto err_init_connector;
6064 drm_encoder_cleanup(encoder);
6066 kfree(intel_connector);
6067 err_connector_alloc:
6068 kfree(intel_dig_port);
6073 void intel_dp_mst_suspend(struct drm_device *dev)
6075 struct drm_i915_private *dev_priv = dev->dev_private;
6079 for (i = 0; i < I915_MAX_PORTS; i++) {
6080 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6081 if (!intel_dig_port)
6084 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6085 if (!intel_dig_port->dp.can_mst)
6087 if (intel_dig_port->dp.is_mst)
6088 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6093 void intel_dp_mst_resume(struct drm_device *dev)
6095 struct drm_i915_private *dev_priv = dev->dev_private;
6098 for (i = 0; i < I915_MAX_PORTS; i++) {
6099 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6100 if (!intel_dig_port)
6102 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6105 if (!intel_dig_port->dp.can_mst)
6108 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6110 intel_dp_check_mst_status(&intel_dig_port->dp);