]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
Merge remote-tracking branch 'tty/tty-next'
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135         return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
140 {
141         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143         switch (max_link_bw) {
144         case DP_LINK_BW_1_62:
145         case DP_LINK_BW_2_7:
146         case DP_LINK_BW_5_4:
147                 break;
148         default:
149                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150                      max_link_bw);
151                 max_link_bw = DP_LINK_BW_1_62;
152                 break;
153         }
154         return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160         u8 source_max, sink_max;
161
162         source_max = intel_dig_port->max_lanes;
163         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165         return min(source_max, sink_max);
166 }
167
168 /*
169  * The units on the numbers in the next two are... bizarre.  Examples will
170  * make it clearer; this one parallels an example in the eDP spec.
171  *
172  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173  *
174  *     270000 * 1 * 8 / 10 == 216000
175  *
176  * The actual data capacity of that configuration is 2.16Gbit/s, so the
177  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
178  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179  * 119000.  At 18bpp that's 2142000 kilobits per second.
180  *
181  * Thus the strange-looking division by 10 in intel_dp_link_required, to
182  * get the result in decakilobits instead of kilobits.
183  */
184
185 static int
186 intel_dp_link_required(int pixel_clock, int bpp)
187 {
188         return (pixel_clock * bpp + 9) / 10;
189 }
190
191 static int
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193 {
194         return (max_link_clock * max_lanes * 8) / 10;
195 }
196
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199                     struct drm_display_mode *mode)
200 {
201         struct intel_dp *intel_dp = intel_attached_dp(connector);
202         struct intel_connector *intel_connector = to_intel_connector(connector);
203         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204         int target_clock = mode->clock;
205         int max_rate, mode_rate, max_lanes, max_link_clock;
206
207         if (is_edp(intel_dp) && fixed_mode) {
208                 if (mode->hdisplay > fixed_mode->hdisplay)
209                         return MODE_PANEL;
210
211                 if (mode->vdisplay > fixed_mode->vdisplay)
212                         return MODE_PANEL;
213
214                 target_clock = fixed_mode->clock;
215         }
216
217         max_link_clock = intel_dp_max_link_rate(intel_dp);
218         max_lanes = intel_dp_max_lane_count(intel_dp);
219
220         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221         mode_rate = intel_dp_link_required(target_clock, 18);
222
223         if (mode_rate > max_rate)
224                 return MODE_CLOCK_HIGH;
225
226         if (mode->clock < 10000)
227                 return MODE_CLOCK_LOW;
228
229         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230                 return MODE_H_ILLEGAL;
231
232         return MODE_OK;
233 }
234
235 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
236 {
237         int     i;
238         uint32_t v = 0;
239
240         if (src_bytes > 4)
241                 src_bytes = 4;
242         for (i = 0; i < src_bytes; i++)
243                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244         return v;
245 }
246
247 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
248 {
249         int i;
250         if (dst_bytes > 4)
251                 dst_bytes = 4;
252         for (i = 0; i < dst_bytes; i++)
253                 dst[i] = src >> ((3-i) * 8);
254 }
255
256 static void
257 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
258                                     struct intel_dp *intel_dp);
259 static void
260 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
261                                               struct intel_dp *intel_dp);
262
263 static void pps_lock(struct intel_dp *intel_dp)
264 {
265         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266         struct intel_encoder *encoder = &intel_dig_port->base;
267         struct drm_device *dev = encoder->base.dev;
268         struct drm_i915_private *dev_priv = dev->dev_private;
269         enum intel_display_power_domain power_domain;
270
271         /*
272          * See vlv_power_sequencer_reset() why we need
273          * a power domain reference here.
274          */
275         power_domain = intel_display_port_aux_power_domain(encoder);
276         intel_display_power_get(dev_priv, power_domain);
277
278         mutex_lock(&dev_priv->pps_mutex);
279 }
280
281 static void pps_unlock(struct intel_dp *intel_dp)
282 {
283         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
284         struct intel_encoder *encoder = &intel_dig_port->base;
285         struct drm_device *dev = encoder->base.dev;
286         struct drm_i915_private *dev_priv = dev->dev_private;
287         enum intel_display_power_domain power_domain;
288
289         mutex_unlock(&dev_priv->pps_mutex);
290
291         power_domain = intel_display_port_aux_power_domain(encoder);
292         intel_display_power_put(dev_priv, power_domain);
293 }
294
295 static void
296 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
297 {
298         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299         struct drm_device *dev = intel_dig_port->base.base.dev;
300         struct drm_i915_private *dev_priv = dev->dev_private;
301         enum pipe pipe = intel_dp->pps_pipe;
302         bool pll_enabled, release_cl_override = false;
303         enum dpio_phy phy = DPIO_PHY(pipe);
304         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
305         uint32_t DP;
306
307         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
308                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
309                  pipe_name(pipe), port_name(intel_dig_port->port)))
310                 return;
311
312         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
313                       pipe_name(pipe), port_name(intel_dig_port->port));
314
315         /* Preserve the BIOS-computed detected bit. This is
316          * supposed to be read-only.
317          */
318         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
319         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
320         DP |= DP_PORT_WIDTH(1);
321         DP |= DP_LINK_TRAIN_PAT_1;
322
323         if (IS_CHERRYVIEW(dev))
324                 DP |= DP_PIPE_SELECT_CHV(pipe);
325         else if (pipe == PIPE_B)
326                 DP |= DP_PIPEB_SELECT;
327
328         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
329
330         /*
331          * The DPLL for the pipe must be enabled for this to work.
332          * So enable temporarily it if it's not already enabled.
333          */
334         if (!pll_enabled) {
335                 release_cl_override = IS_CHERRYVIEW(dev) &&
336                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
337
338                 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
339                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
340                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
341                                   pipe_name(pipe));
342                         return;
343                 }
344         }
345
346         /*
347          * Similar magic as in intel_dp_enable_port().
348          * We _must_ do this port enable + disable trick
349          * to make this power seqeuencer lock onto the port.
350          * Otherwise even VDD force bit won't work.
351          */
352         I915_WRITE(intel_dp->output_reg, DP);
353         POSTING_READ(intel_dp->output_reg);
354
355         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
356         POSTING_READ(intel_dp->output_reg);
357
358         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
359         POSTING_READ(intel_dp->output_reg);
360
361         if (!pll_enabled) {
362                 vlv_force_pll_off(dev, pipe);
363
364                 if (release_cl_override)
365                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
366         }
367 }
368
369 static enum pipe
370 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
371 {
372         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
373         struct drm_device *dev = intel_dig_port->base.base.dev;
374         struct drm_i915_private *dev_priv = dev->dev_private;
375         struct intel_encoder *encoder;
376         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
377         enum pipe pipe;
378
379         lockdep_assert_held(&dev_priv->pps_mutex);
380
381         /* We should never land here with regular DP ports */
382         WARN_ON(!is_edp(intel_dp));
383
384         if (intel_dp->pps_pipe != INVALID_PIPE)
385                 return intel_dp->pps_pipe;
386
387         /*
388          * We don't have power sequencer currently.
389          * Pick one that's not used by other ports.
390          */
391         for_each_intel_encoder(dev, encoder) {
392                 struct intel_dp *tmp;
393
394                 if (encoder->type != INTEL_OUTPUT_EDP)
395                         continue;
396
397                 tmp = enc_to_intel_dp(&encoder->base);
398
399                 if (tmp->pps_pipe != INVALID_PIPE)
400                         pipes &= ~(1 << tmp->pps_pipe);
401         }
402
403         /*
404          * Didn't find one. This should not happen since there
405          * are two power sequencers and up to two eDP ports.
406          */
407         if (WARN_ON(pipes == 0))
408                 pipe = PIPE_A;
409         else
410                 pipe = ffs(pipes) - 1;
411
412         vlv_steal_power_sequencer(dev, pipe);
413         intel_dp->pps_pipe = pipe;
414
415         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416                       pipe_name(intel_dp->pps_pipe),
417                       port_name(intel_dig_port->port));
418
419         /* init power sequencer on this pipe and port */
420         intel_dp_init_panel_power_sequencer(dev, intel_dp);
421         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
422
423         /*
424          * Even vdd force doesn't work until we've made
425          * the power sequencer lock in on the port.
426          */
427         vlv_power_sequencer_kick(intel_dp);
428
429         return intel_dp->pps_pipe;
430 }
431
432 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
433                                enum pipe pipe);
434
435 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
436                                enum pipe pipe)
437 {
438         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
439 }
440
441 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
442                                 enum pipe pipe)
443 {
444         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
445 }
446
447 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
448                          enum pipe pipe)
449 {
450         return true;
451 }
452
453 static enum pipe
454 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
455                      enum port port,
456                      vlv_pipe_check pipe_check)
457 {
458         enum pipe pipe;
459
460         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462                         PANEL_PORT_SELECT_MASK;
463
464                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
465                         continue;
466
467                 if (!pipe_check(dev_priv, pipe))
468                         continue;
469
470                 return pipe;
471         }
472
473         return INVALID_PIPE;
474 }
475
476 static void
477 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
478 {
479         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480         struct drm_device *dev = intel_dig_port->base.base.dev;
481         struct drm_i915_private *dev_priv = dev->dev_private;
482         enum port port = intel_dig_port->port;
483
484         lockdep_assert_held(&dev_priv->pps_mutex);
485
486         /* try to find a pipe with this port selected */
487         /* first pick one where the panel is on */
488         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489                                                   vlv_pipe_has_pp_on);
490         /* didn't find one? pick one where vdd is on */
491         if (intel_dp->pps_pipe == INVALID_PIPE)
492                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493                                                           vlv_pipe_has_vdd_on);
494         /* didn't find one? pick one with just the correct port */
495         if (intel_dp->pps_pipe == INVALID_PIPE)
496                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497                                                           vlv_pipe_any);
498
499         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500         if (intel_dp->pps_pipe == INVALID_PIPE) {
501                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
502                               port_name(port));
503                 return;
504         }
505
506         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507                       port_name(port), pipe_name(intel_dp->pps_pipe));
508
509         intel_dp_init_panel_power_sequencer(dev, intel_dp);
510         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
511 }
512
513 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514 {
515         struct drm_device *dev = dev_priv->dev;
516         struct intel_encoder *encoder;
517
518         if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
519                 return;
520
521         /*
522          * We can't grab pps_mutex here due to deadlock with power_domain
523          * mutex when power_domain functions are called while holding pps_mutex.
524          * That also means that in order to use pps_pipe the code needs to
525          * hold both a power domain reference and pps_mutex, and the power domain
526          * reference get/put must be done while _not_ holding pps_mutex.
527          * pps_{lock,unlock}() do these steps in the correct order, so one
528          * should use them always.
529          */
530
531         for_each_intel_encoder(dev, encoder) {
532                 struct intel_dp *intel_dp;
533
534                 if (encoder->type != INTEL_OUTPUT_EDP)
535                         continue;
536
537                 intel_dp = enc_to_intel_dp(&encoder->base);
538                 intel_dp->pps_pipe = INVALID_PIPE;
539         }
540 }
541
542 static i915_reg_t
543 _pp_ctrl_reg(struct intel_dp *intel_dp)
544 {
545         struct drm_device *dev = intel_dp_to_dev(intel_dp);
546
547         if (IS_BROXTON(dev))
548                 return BXT_PP_CONTROL(0);
549         else if (HAS_PCH_SPLIT(dev))
550                 return PCH_PP_CONTROL;
551         else
552                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
553 }
554
555 static i915_reg_t
556 _pp_stat_reg(struct intel_dp *intel_dp)
557 {
558         struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
560         if (IS_BROXTON(dev))
561                 return BXT_PP_STATUS(0);
562         else if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_STATUS;
564         else
565                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569    This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571                               void *unused)
572 {
573         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574                                                  edp_notifier);
575         struct drm_device *dev = intel_dp_to_dev(intel_dp);
576         struct drm_i915_private *dev_priv = dev->dev_private;
577
578         if (!is_edp(intel_dp) || code != SYS_RESTART)
579                 return 0;
580
581         pps_lock(intel_dp);
582
583         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
584                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585                 i915_reg_t pp_ctrl_reg, pp_div_reg;
586                 u32 pp_div;
587
588                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
590                 pp_div = I915_READ(pp_div_reg);
591                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596                 msleep(intel_dp->panel_power_cycle_delay);
597         }
598
599         pps_unlock(intel_dp);
600
601         return 0;
602 }
603
604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
605 {
606         struct drm_device *dev = intel_dp_to_dev(intel_dp);
607         struct drm_i915_private *dev_priv = dev->dev_private;
608
609         lockdep_assert_held(&dev_priv->pps_mutex);
610
611         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
612             intel_dp->pps_pipe == INVALID_PIPE)
613                 return false;
614
615         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
616 }
617
618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
619 {
620         struct drm_device *dev = intel_dp_to_dev(intel_dp);
621         struct drm_i915_private *dev_priv = dev->dev_private;
622
623         lockdep_assert_held(&dev_priv->pps_mutex);
624
625         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
626             intel_dp->pps_pipe == INVALID_PIPE)
627                 return false;
628
629         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
630 }
631
632 static void
633 intel_dp_check_edp(struct intel_dp *intel_dp)
634 {
635         struct drm_device *dev = intel_dp_to_dev(intel_dp);
636         struct drm_i915_private *dev_priv = dev->dev_private;
637
638         if (!is_edp(intel_dp))
639                 return;
640
641         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644                               I915_READ(_pp_stat_reg(intel_dp)),
645                               I915_READ(_pp_ctrl_reg(intel_dp)));
646         }
647 }
648
649 static uint32_t
650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651 {
652         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653         struct drm_device *dev = intel_dig_port->base.base.dev;
654         struct drm_i915_private *dev_priv = dev->dev_private;
655         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656         uint32_t status;
657         bool done;
658
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660         if (has_aux_irq)
661                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662                                           msecs_to_jiffies_timeout(10));
663         else
664                 done = wait_for_atomic(C, 10) == 0;
665         if (!done)
666                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667                           has_aux_irq);
668 #undef C
669
670         return status;
671 }
672
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674 {
675         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676         struct drm_device *dev = intel_dig_port->base.base.dev;
677
678         /*
679          * The clock divider is based off the hrawclk, and would like to run at
680          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
681          */
682         return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
683 }
684
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688         struct drm_device *dev = intel_dig_port->base.base.dev;
689         struct drm_i915_private *dev_priv = dev->dev_private;
690
691         if (index)
692                 return 0;
693
694         if (intel_dig_port->port == PORT_A) {
695                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
696
697         } else {
698                 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
699         }
700 }
701
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 {
704         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705         struct drm_device *dev = intel_dig_port->base.base.dev;
706         struct drm_i915_private *dev_priv = dev->dev_private;
707
708         if (intel_dig_port->port == PORT_A) {
709                 if (index)
710                         return 0;
711                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712         } else if (HAS_PCH_LPT_H(dev_priv)) {
713                 /* Workaround for non-ULT HSW */
714                 switch (index) {
715                 case 0: return 63;
716                 case 1: return 72;
717                 default: return 0;
718                 }
719         } else  {
720                 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
721         }
722 }
723
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726         return index ? 0 : 100;
727 }
728
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731         /*
732          * SKL doesn't need us to program the AUX clock divider (Hardware will
733          * derive the clock from CDCLK automatically). We still implement the
734          * get_aux_clock_divider vfunc to plug-in into the existing code.
735          */
736         return index ? 0 : 1;
737 }
738
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740                                       bool has_aux_irq,
741                                       int send_bytes,
742                                       uint32_t aux_clock_divider)
743 {
744         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745         struct drm_device *dev = intel_dig_port->base.base.dev;
746         uint32_t precharge, timeout;
747
748         if (IS_GEN6(dev))
749                 precharge = 3;
750         else
751                 precharge = 5;
752
753         if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755         else
756                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758         return DP_AUX_CH_CTL_SEND_BUSY |
759                DP_AUX_CH_CTL_DONE |
760                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761                DP_AUX_CH_CTL_TIME_OUT_ERROR |
762                timeout |
763                DP_AUX_CH_CTL_RECEIVE_ERROR |
764                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767 }
768
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770                                       bool has_aux_irq,
771                                       int send_bytes,
772                                       uint32_t unused)
773 {
774         return DP_AUX_CH_CTL_SEND_BUSY |
775                DP_AUX_CH_CTL_DONE |
776                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777                DP_AUX_CH_CTL_TIME_OUT_ERROR |
778                DP_AUX_CH_CTL_TIME_OUT_1600us |
779                DP_AUX_CH_CTL_RECEIVE_ERROR |
780                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782 }
783
784 static int
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786                 const uint8_t *send, int send_bytes,
787                 uint8_t *recv, int recv_size)
788 {
789         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790         struct drm_device *dev = intel_dig_port->base.base.dev;
791         struct drm_i915_private *dev_priv = dev->dev_private;
792         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793         uint32_t aux_clock_divider;
794         int i, ret, recv_bytes;
795         uint32_t status;
796         int try, clock = 0;
797         bool has_aux_irq = HAS_AUX_IRQ(dev);
798         bool vdd;
799
800         pps_lock(intel_dp);
801
802         /*
803          * We will be called with VDD already enabled for dpcd/edid/oui reads.
804          * In such cases we want to leave VDD enabled and it's up to upper layers
805          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
806          * ourselves.
807          */
808         vdd = edp_panel_vdd_on(intel_dp);
809
810         /* dp aux is extremely sensitive to irq latency, hence request the
811          * lowest possible wakeup latency and so prevent the cpu from going into
812          * deep sleep states.
813          */
814         pm_qos_update_request(&dev_priv->pm_qos, 0);
815
816         intel_dp_check_edp(intel_dp);
817
818         /* Try to wait for any previous AUX channel activity */
819         for (try = 0; try < 3; try++) {
820                 status = I915_READ_NOTRACE(ch_ctl);
821                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
822                         break;
823                 msleep(1);
824         }
825
826         if (try == 3) {
827                 static u32 last_status = -1;
828                 const u32 status = I915_READ(ch_ctl);
829
830                 if (status != last_status) {
831                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
832                              status);
833                         last_status = status;
834                 }
835
836                 ret = -EBUSY;
837                 goto out;
838         }
839
840         /* Only 5 data registers! */
841         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
842                 ret = -E2BIG;
843                 goto out;
844         }
845
846         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
847                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
848                                                           has_aux_irq,
849                                                           send_bytes,
850                                                           aux_clock_divider);
851
852                 /* Must try at least 3 times according to DP spec */
853                 for (try = 0; try < 5; try++) {
854                         /* Load the send data into the aux channel data registers */
855                         for (i = 0; i < send_bytes; i += 4)
856                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
857                                            intel_dp_pack_aux(send + i,
858                                                              send_bytes - i));
859
860                         /* Send the command and wait for it to complete */
861                         I915_WRITE(ch_ctl, send_ctl);
862
863                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
864
865                         /* Clear done status and any errors */
866                         I915_WRITE(ch_ctl,
867                                    status |
868                                    DP_AUX_CH_CTL_DONE |
869                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
870                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
871
872                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
873                                 continue;
874
875                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
876                          *   400us delay required for errors and timeouts
877                          *   Timeout errors from the HW already meet this
878                          *   requirement so skip to next iteration
879                          */
880                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
881                                 usleep_range(400, 500);
882                                 continue;
883                         }
884                         if (status & DP_AUX_CH_CTL_DONE)
885                                 goto done;
886                 }
887         }
888
889         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891                 ret = -EBUSY;
892                 goto out;
893         }
894
895 done:
896         /* Check for timeout or receive error.
897          * Timeouts occur when the sink is not connected
898          */
899         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
900                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
901                 ret = -EIO;
902                 goto out;
903         }
904
905         /* Timeouts occur when the device isn't connected, so they're
906          * "normal" -- don't fill the kernel log with these */
907         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
908                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
909                 ret = -ETIMEDOUT;
910                 goto out;
911         }
912
913         /* Unload any bytes sent back from the other side */
914         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
915                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
916
917         /*
918          * By BSpec: "Message sizes of 0 or >20 are not allowed."
919          * We have no idea of what happened so we return -EBUSY so
920          * drm layer takes care for the necessary retries.
921          */
922         if (recv_bytes == 0 || recv_bytes > 20) {
923                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
924                               recv_bytes);
925                 /*
926                  * FIXME: This patch was created on top of a series that
927                  * organize the retries at drm level. There EBUSY should
928                  * also take care for 1ms wait before retrying.
929                  * That aux retries re-org is still needed and after that is
930                  * merged we remove this sleep from here.
931                  */
932                 usleep_range(1000, 1500);
933                 ret = -EBUSY;
934                 goto out;
935         }
936
937         if (recv_bytes > recv_size)
938                 recv_bytes = recv_size;
939
940         for (i = 0; i < recv_bytes; i += 4)
941                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
942                                     recv + i, recv_bytes - i);
943
944         ret = recv_bytes;
945 out:
946         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
947
948         if (vdd)
949                 edp_panel_vdd_off(intel_dp, false);
950
951         pps_unlock(intel_dp);
952
953         return ret;
954 }
955
956 #define BARE_ADDRESS_SIZE       3
957 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
958 static ssize_t
959 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
960 {
961         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
962         uint8_t txbuf[20], rxbuf[20];
963         size_t txsize, rxsize;
964         int ret;
965
966         txbuf[0] = (msg->request << 4) |
967                 ((msg->address >> 16) & 0xf);
968         txbuf[1] = (msg->address >> 8) & 0xff;
969         txbuf[2] = msg->address & 0xff;
970         txbuf[3] = msg->size - 1;
971
972         switch (msg->request & ~DP_AUX_I2C_MOT) {
973         case DP_AUX_NATIVE_WRITE:
974         case DP_AUX_I2C_WRITE:
975         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
976                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
977                 rxsize = 2; /* 0 or 1 data bytes */
978
979                 if (WARN_ON(txsize > 20))
980                         return -E2BIG;
981
982                 if (msg->buffer)
983                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
984                 else
985                         WARN_ON(msg->size);
986
987                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988                 if (ret > 0) {
989                         msg->reply = rxbuf[0] >> 4;
990
991                         if (ret > 1) {
992                                 /* Number of bytes written in a short write. */
993                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
994                         } else {
995                                 /* Return payload size. */
996                                 ret = msg->size;
997                         }
998                 }
999                 break;
1000
1001         case DP_AUX_NATIVE_READ:
1002         case DP_AUX_I2C_READ:
1003                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1004                 rxsize = msg->size + 1;
1005
1006                 if (WARN_ON(rxsize > 20))
1007                         return -E2BIG;
1008
1009                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1010                 if (ret > 0) {
1011                         msg->reply = rxbuf[0] >> 4;
1012                         /*
1013                          * Assume happy day, and copy the data. The caller is
1014                          * expected to check msg->reply before touching it.
1015                          *
1016                          * Return payload size.
1017                          */
1018                         ret--;
1019                         memcpy(msg->buffer, rxbuf + 1, ret);
1020                 }
1021                 break;
1022
1023         default:
1024                 ret = -EINVAL;
1025                 break;
1026         }
1027
1028         return ret;
1029 }
1030
1031 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1032                                        enum port port)
1033 {
1034         switch (port) {
1035         case PORT_B:
1036         case PORT_C:
1037         case PORT_D:
1038                 return DP_AUX_CH_CTL(port);
1039         default:
1040                 MISSING_CASE(port);
1041                 return DP_AUX_CH_CTL(PORT_B);
1042         }
1043 }
1044
1045 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1046                                         enum port port, int index)
1047 {
1048         switch (port) {
1049         case PORT_B:
1050         case PORT_C:
1051         case PORT_D:
1052                 return DP_AUX_CH_DATA(port, index);
1053         default:
1054                 MISSING_CASE(port);
1055                 return DP_AUX_CH_DATA(PORT_B, index);
1056         }
1057 }
1058
1059 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1060                                        enum port port)
1061 {
1062         switch (port) {
1063         case PORT_A:
1064                 return DP_AUX_CH_CTL(port);
1065         case PORT_B:
1066         case PORT_C:
1067         case PORT_D:
1068                 return PCH_DP_AUX_CH_CTL(port);
1069         default:
1070                 MISSING_CASE(port);
1071                 return DP_AUX_CH_CTL(PORT_A);
1072         }
1073 }
1074
1075 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1076                                         enum port port, int index)
1077 {
1078         switch (port) {
1079         case PORT_A:
1080                 return DP_AUX_CH_DATA(port, index);
1081         case PORT_B:
1082         case PORT_C:
1083         case PORT_D:
1084                 return PCH_DP_AUX_CH_DATA(port, index);
1085         default:
1086                 MISSING_CASE(port);
1087                 return DP_AUX_CH_DATA(PORT_A, index);
1088         }
1089 }
1090
1091 /*
1092  * On SKL we don't have Aux for port E so we rely
1093  * on VBT to set a proper alternate aux channel.
1094  */
1095 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1096 {
1097         const struct ddi_vbt_port_info *info =
1098                 &dev_priv->vbt.ddi_port_info[PORT_E];
1099
1100         switch (info->alternate_aux_channel) {
1101         case DP_AUX_A:
1102                 return PORT_A;
1103         case DP_AUX_B:
1104                 return PORT_B;
1105         case DP_AUX_C:
1106                 return PORT_C;
1107         case DP_AUX_D:
1108                 return PORT_D;
1109         default:
1110                 MISSING_CASE(info->alternate_aux_channel);
1111                 return PORT_A;
1112         }
1113 }
1114
1115 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1116                                        enum port port)
1117 {
1118         if (port == PORT_E)
1119                 port = skl_porte_aux_port(dev_priv);
1120
1121         switch (port) {
1122         case PORT_A:
1123         case PORT_B:
1124         case PORT_C:
1125         case PORT_D:
1126                 return DP_AUX_CH_CTL(port);
1127         default:
1128                 MISSING_CASE(port);
1129                 return DP_AUX_CH_CTL(PORT_A);
1130         }
1131 }
1132
1133 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1134                                         enum port port, int index)
1135 {
1136         if (port == PORT_E)
1137                 port = skl_porte_aux_port(dev_priv);
1138
1139         switch (port) {
1140         case PORT_A:
1141         case PORT_B:
1142         case PORT_C:
1143         case PORT_D:
1144                 return DP_AUX_CH_DATA(port, index);
1145         default:
1146                 MISSING_CASE(port);
1147                 return DP_AUX_CH_DATA(PORT_A, index);
1148         }
1149 }
1150
1151 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1152                                          enum port port)
1153 {
1154         if (INTEL_INFO(dev_priv)->gen >= 9)
1155                 return skl_aux_ctl_reg(dev_priv, port);
1156         else if (HAS_PCH_SPLIT(dev_priv))
1157                 return ilk_aux_ctl_reg(dev_priv, port);
1158         else
1159                 return g4x_aux_ctl_reg(dev_priv, port);
1160 }
1161
1162 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1163                                           enum port port, int index)
1164 {
1165         if (INTEL_INFO(dev_priv)->gen >= 9)
1166                 return skl_aux_data_reg(dev_priv, port, index);
1167         else if (HAS_PCH_SPLIT(dev_priv))
1168                 return ilk_aux_data_reg(dev_priv, port, index);
1169         else
1170                 return g4x_aux_data_reg(dev_priv, port, index);
1171 }
1172
1173 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1174 {
1175         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1176         enum port port = dp_to_dig_port(intel_dp)->port;
1177         int i;
1178
1179         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1180         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1181                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1182 }
1183
1184 static void
1185 intel_dp_aux_fini(struct intel_dp *intel_dp)
1186 {
1187         drm_dp_aux_unregister(&intel_dp->aux);
1188         kfree(intel_dp->aux.name);
1189 }
1190
1191 static int
1192 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1193 {
1194         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1195         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196         enum port port = intel_dig_port->port;
1197         int ret;
1198
1199         intel_aux_reg_init(intel_dp);
1200
1201         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202         if (!intel_dp->aux.name)
1203                 return -ENOMEM;
1204
1205         intel_dp->aux.dev = dev->dev;
1206         intel_dp->aux.transfer = intel_dp_aux_transfer;
1207
1208         DRM_DEBUG_KMS("registering %s bus for %s\n",
1209                       intel_dp->aux.name,
1210                       connector->base.kdev->kobj.name);
1211
1212         ret = drm_dp_aux_register(&intel_dp->aux);
1213         if (ret < 0) {
1214                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1215                           intel_dp->aux.name, ret);
1216                 kfree(intel_dp->aux.name);
1217                 return ret;
1218         }
1219
1220         ret = sysfs_create_link(&connector->base.kdev->kobj,
1221                                 &intel_dp->aux.ddc.dev.kobj,
1222                                 intel_dp->aux.ddc.dev.kobj.name);
1223         if (ret < 0) {
1224                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1225                           intel_dp->aux.name, ret);
1226                 intel_dp_aux_fini(intel_dp);
1227                 return ret;
1228         }
1229
1230         return 0;
1231 }
1232
1233 static void
1234 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1235 {
1236         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1237
1238         if (!intel_connector->mst_port)
1239                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1240                                   intel_dp->aux.ddc.dev.kobj.name);
1241         intel_connector_unregister(intel_connector);
1242 }
1243
1244 static void
1245 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1246 {
1247         u32 ctrl1;
1248
1249         memset(&pipe_config->dpll_hw_state, 0,
1250                sizeof(pipe_config->dpll_hw_state));
1251
1252         pipe_config->ddi_pll_sel = SKL_DPLL0;
1253         pipe_config->dpll_hw_state.cfgcr1 = 0;
1254         pipe_config->dpll_hw_state.cfgcr2 = 0;
1255
1256         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1257         switch (pipe_config->port_clock / 2) {
1258         case 81000:
1259                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1260                                               SKL_DPLL0);
1261                 break;
1262         case 135000:
1263                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1264                                               SKL_DPLL0);
1265                 break;
1266         case 270000:
1267                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1268                                               SKL_DPLL0);
1269                 break;
1270         case 162000:
1271                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1272                                               SKL_DPLL0);
1273                 break;
1274         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1275         results in CDCLK change. Need to handle the change of CDCLK by
1276         disabling pipes and re-enabling them */
1277         case 108000:
1278                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1279                                               SKL_DPLL0);
1280                 break;
1281         case 216000:
1282                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1283                                               SKL_DPLL0);
1284                 break;
1285
1286         }
1287         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1288 }
1289
1290 void
1291 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1292 {
1293         memset(&pipe_config->dpll_hw_state, 0,
1294                sizeof(pipe_config->dpll_hw_state));
1295
1296         switch (pipe_config->port_clock / 2) {
1297         case 81000:
1298                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1299                 break;
1300         case 135000:
1301                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1302                 break;
1303         case 270000:
1304                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1305                 break;
1306         }
1307 }
1308
1309 static int
1310 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1311 {
1312         if (intel_dp->num_sink_rates) {
1313                 *sink_rates = intel_dp->sink_rates;
1314                 return intel_dp->num_sink_rates;
1315         }
1316
1317         *sink_rates = default_rates;
1318
1319         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1320 }
1321
1322 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1323 {
1324         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1325         struct drm_device *dev = dig_port->base.base.dev;
1326
1327         /* WaDisableHBR2:skl */
1328         if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1329                 return false;
1330
1331         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1332             (INTEL_INFO(dev)->gen >= 9))
1333                 return true;
1334         else
1335                 return false;
1336 }
1337
1338 static int
1339 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1340 {
1341         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1342         struct drm_device *dev = dig_port->base.base.dev;
1343         int size;
1344
1345         if (IS_BROXTON(dev)) {
1346                 *source_rates = bxt_rates;
1347                 size = ARRAY_SIZE(bxt_rates);
1348         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1349                 *source_rates = skl_rates;
1350                 size = ARRAY_SIZE(skl_rates);
1351         } else {
1352                 *source_rates = default_rates;
1353                 size = ARRAY_SIZE(default_rates);
1354         }
1355
1356         /* This depends on the fact that 5.4 is last value in the array */
1357         if (!intel_dp_source_supports_hbr2(intel_dp))
1358                 size--;
1359
1360         return size;
1361 }
1362
1363 static void
1364 intel_dp_set_clock(struct intel_encoder *encoder,
1365                    struct intel_crtc_state *pipe_config)
1366 {
1367         struct drm_device *dev = encoder->base.dev;
1368         const struct dp_link_dpll *divisor = NULL;
1369         int i, count = 0;
1370
1371         if (IS_G4X(dev)) {
1372                 divisor = gen4_dpll;
1373                 count = ARRAY_SIZE(gen4_dpll);
1374         } else if (HAS_PCH_SPLIT(dev)) {
1375                 divisor = pch_dpll;
1376                 count = ARRAY_SIZE(pch_dpll);
1377         } else if (IS_CHERRYVIEW(dev)) {
1378                 divisor = chv_dpll;
1379                 count = ARRAY_SIZE(chv_dpll);
1380         } else if (IS_VALLEYVIEW(dev)) {
1381                 divisor = vlv_dpll;
1382                 count = ARRAY_SIZE(vlv_dpll);
1383         }
1384
1385         if (divisor && count) {
1386                 for (i = 0; i < count; i++) {
1387                         if (pipe_config->port_clock == divisor[i].clock) {
1388                                 pipe_config->dpll = divisor[i].dpll;
1389                                 pipe_config->clock_set = true;
1390                                 break;
1391                         }
1392                 }
1393         }
1394 }
1395
1396 static int intersect_rates(const int *source_rates, int source_len,
1397                            const int *sink_rates, int sink_len,
1398                            int *common_rates)
1399 {
1400         int i = 0, j = 0, k = 0;
1401
1402         while (i < source_len && j < sink_len) {
1403                 if (source_rates[i] == sink_rates[j]) {
1404                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1405                                 return k;
1406                         common_rates[k] = source_rates[i];
1407                         ++k;
1408                         ++i;
1409                         ++j;
1410                 } else if (source_rates[i] < sink_rates[j]) {
1411                         ++i;
1412                 } else {
1413                         ++j;
1414                 }
1415         }
1416         return k;
1417 }
1418
1419 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1420                                  int *common_rates)
1421 {
1422         const int *source_rates, *sink_rates;
1423         int source_len, sink_len;
1424
1425         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1426         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1427
1428         return intersect_rates(source_rates, source_len,
1429                                sink_rates, sink_len,
1430                                common_rates);
1431 }
1432
1433 static void snprintf_int_array(char *str, size_t len,
1434                                const int *array, int nelem)
1435 {
1436         int i;
1437
1438         str[0] = '\0';
1439
1440         for (i = 0; i < nelem; i++) {
1441                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1442                 if (r >= len)
1443                         return;
1444                 str += r;
1445                 len -= r;
1446         }
1447 }
1448
1449 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1450 {
1451         const int *source_rates, *sink_rates;
1452         int source_len, sink_len, common_len;
1453         int common_rates[DP_MAX_SUPPORTED_RATES];
1454         char str[128]; /* FIXME: too big for stack? */
1455
1456         if ((drm_debug & DRM_UT_KMS) == 0)
1457                 return;
1458
1459         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1460         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1461         DRM_DEBUG_KMS("source rates: %s\n", str);
1462
1463         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1464         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1465         DRM_DEBUG_KMS("sink rates: %s\n", str);
1466
1467         common_len = intel_dp_common_rates(intel_dp, common_rates);
1468         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1469         DRM_DEBUG_KMS("common rates: %s\n", str);
1470 }
1471
1472 static int rate_to_index(int find, const int *rates)
1473 {
1474         int i = 0;
1475
1476         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1477                 if (find == rates[i])
1478                         break;
1479
1480         return i;
1481 }
1482
1483 int
1484 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1485 {
1486         int rates[DP_MAX_SUPPORTED_RATES] = {};
1487         int len;
1488
1489         len = intel_dp_common_rates(intel_dp, rates);
1490         if (WARN_ON(len <= 0))
1491                 return 162000;
1492
1493         return rates[rate_to_index(0, rates) - 1];
1494 }
1495
1496 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1497 {
1498         return rate_to_index(rate, intel_dp->sink_rates);
1499 }
1500
1501 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1502                            uint8_t *link_bw, uint8_t *rate_select)
1503 {
1504         if (intel_dp->num_sink_rates) {
1505                 *link_bw = 0;
1506                 *rate_select =
1507                         intel_dp_rate_select(intel_dp, port_clock);
1508         } else {
1509                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1510                 *rate_select = 0;
1511         }
1512 }
1513
1514 bool
1515 intel_dp_compute_config(struct intel_encoder *encoder,
1516                         struct intel_crtc_state *pipe_config)
1517 {
1518         struct drm_device *dev = encoder->base.dev;
1519         struct drm_i915_private *dev_priv = dev->dev_private;
1520         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1521         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1522         enum port port = dp_to_dig_port(intel_dp)->port;
1523         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1524         struct intel_connector *intel_connector = intel_dp->attached_connector;
1525         int lane_count, clock;
1526         int min_lane_count = 1;
1527         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1528         /* Conveniently, the link BW constants become indices with a shift...*/
1529         int min_clock = 0;
1530         int max_clock;
1531         int bpp, mode_rate;
1532         int link_avail, link_clock;
1533         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1534         int common_len;
1535         uint8_t link_bw, rate_select;
1536
1537         common_len = intel_dp_common_rates(intel_dp, common_rates);
1538
1539         /* No common link rates between source and sink */
1540         WARN_ON(common_len <= 0);
1541
1542         max_clock = common_len - 1;
1543
1544         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1545                 pipe_config->has_pch_encoder = true;
1546
1547         pipe_config->has_dp_encoder = true;
1548         pipe_config->has_drrs = false;
1549         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1550
1551         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1552                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1553                                        adjusted_mode);
1554
1555                 if (INTEL_INFO(dev)->gen >= 9) {
1556                         int ret;
1557                         ret = skl_update_scaler_crtc(pipe_config);
1558                         if (ret)
1559                                 return ret;
1560                 }
1561
1562                 if (HAS_GMCH_DISPLAY(dev))
1563                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1564                                                  intel_connector->panel.fitting_mode);
1565                 else
1566                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1567                                                 intel_connector->panel.fitting_mode);
1568         }
1569
1570         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1571                 return false;
1572
1573         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1574                       "max bw %d pixel clock %iKHz\n",
1575                       max_lane_count, common_rates[max_clock],
1576                       adjusted_mode->crtc_clock);
1577
1578         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1579          * bpc in between. */
1580         bpp = pipe_config->pipe_bpp;
1581         if (is_edp(intel_dp)) {
1582
1583                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1584                 if (intel_connector->base.display_info.bpc == 0 &&
1585                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1586                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1587                                       dev_priv->vbt.edp_bpp);
1588                         bpp = dev_priv->vbt.edp_bpp;
1589                 }
1590
1591                 /*
1592                  * Use the maximum clock and number of lanes the eDP panel
1593                  * advertizes being capable of. The panels are generally
1594                  * designed to support only a single clock and lane
1595                  * configuration, and typically these values correspond to the
1596                  * native resolution of the panel.
1597                  */
1598                 min_lane_count = max_lane_count;
1599                 min_clock = max_clock;
1600         }
1601
1602         for (; bpp >= 6*3; bpp -= 2*3) {
1603                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1604                                                    bpp);
1605
1606                 for (clock = min_clock; clock <= max_clock; clock++) {
1607                         for (lane_count = min_lane_count;
1608                                 lane_count <= max_lane_count;
1609                                 lane_count <<= 1) {
1610
1611                                 link_clock = common_rates[clock];
1612                                 link_avail = intel_dp_max_data_rate(link_clock,
1613                                                                     lane_count);
1614
1615                                 if (mode_rate <= link_avail) {
1616                                         goto found;
1617                                 }
1618                         }
1619                 }
1620         }
1621
1622         return false;
1623
1624 found:
1625         if (intel_dp->color_range_auto) {
1626                 /*
1627                  * See:
1628                  * CEA-861-E - 5.1 Default Encoding Parameters
1629                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1630                  */
1631                 pipe_config->limited_color_range =
1632                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1633         } else {
1634                 pipe_config->limited_color_range =
1635                         intel_dp->limited_color_range;
1636         }
1637
1638         pipe_config->lane_count = lane_count;
1639
1640         pipe_config->pipe_bpp = bpp;
1641         pipe_config->port_clock = common_rates[clock];
1642
1643         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1644                               &link_bw, &rate_select);
1645
1646         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1647                       link_bw, rate_select, pipe_config->lane_count,
1648                       pipe_config->port_clock, bpp);
1649         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1650                       mode_rate, link_avail);
1651
1652         intel_link_compute_m_n(bpp, lane_count,
1653                                adjusted_mode->crtc_clock,
1654                                pipe_config->port_clock,
1655                                &pipe_config->dp_m_n);
1656
1657         if (intel_connector->panel.downclock_mode != NULL &&
1658                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1659                         pipe_config->has_drrs = true;
1660                         intel_link_compute_m_n(bpp, lane_count,
1661                                 intel_connector->panel.downclock_mode->clock,
1662                                 pipe_config->port_clock,
1663                                 &pipe_config->dp_m2_n2);
1664         }
1665
1666         if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1667                 skl_edp_set_pll_config(pipe_config);
1668         else if (IS_BROXTON(dev))
1669                 /* handled in ddi */;
1670         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1671                 hsw_dp_set_ddi_pll_sel(pipe_config);
1672         else
1673                 intel_dp_set_clock(encoder, pipe_config);
1674
1675         return true;
1676 }
1677
1678 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1679                               const struct intel_crtc_state *pipe_config)
1680 {
1681         intel_dp->link_rate = pipe_config->port_clock;
1682         intel_dp->lane_count = pipe_config->lane_count;
1683 }
1684
1685 static void intel_dp_prepare(struct intel_encoder *encoder)
1686 {
1687         struct drm_device *dev = encoder->base.dev;
1688         struct drm_i915_private *dev_priv = dev->dev_private;
1689         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1690         enum port port = dp_to_dig_port(intel_dp)->port;
1691         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1692         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1693
1694         intel_dp_set_link_params(intel_dp, crtc->config);
1695
1696         /*
1697          * There are four kinds of DP registers:
1698          *
1699          *      IBX PCH
1700          *      SNB CPU
1701          *      IVB CPU
1702          *      CPT PCH
1703          *
1704          * IBX PCH and CPU are the same for almost everything,
1705          * except that the CPU DP PLL is configured in this
1706          * register
1707          *
1708          * CPT PCH is quite different, having many bits moved
1709          * to the TRANS_DP_CTL register instead. That
1710          * configuration happens (oddly) in ironlake_pch_enable
1711          */
1712
1713         /* Preserve the BIOS-computed detected bit. This is
1714          * supposed to be read-only.
1715          */
1716         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1717
1718         /* Handle DP bits in common between all three register formats */
1719         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1720         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1721
1722         /* Split out the IBX/CPU vs CPT settings */
1723
1724         if (IS_GEN7(dev) && port == PORT_A) {
1725                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1726                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1727                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1728                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1729                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1730
1731                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1732                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1733
1734                 intel_dp->DP |= crtc->pipe << 29;
1735         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1736                 u32 trans_dp;
1737
1738                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1739
1740                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1741                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1742                         trans_dp |= TRANS_DP_ENH_FRAMING;
1743                 else
1744                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1745                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1746         } else {
1747                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1748                     !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1749                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1750
1751                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1752                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1753                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1754                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1755                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1756
1757                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1758                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1759
1760                 if (IS_CHERRYVIEW(dev))
1761                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1762                 else if (crtc->pipe == PIPE_B)
1763                         intel_dp->DP |= DP_PIPEB_SELECT;
1764         }
1765 }
1766
1767 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1768 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1769
1770 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1771 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1772
1773 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1774 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1775
1776 static void wait_panel_status(struct intel_dp *intel_dp,
1777                                        u32 mask,
1778                                        u32 value)
1779 {
1780         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1781         struct drm_i915_private *dev_priv = dev->dev_private;
1782         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1783
1784         lockdep_assert_held(&dev_priv->pps_mutex);
1785
1786         pp_stat_reg = _pp_stat_reg(intel_dp);
1787         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1788
1789         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1790                         mask, value,
1791                         I915_READ(pp_stat_reg),
1792                         I915_READ(pp_ctrl_reg));
1793
1794         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1795                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1796                                 I915_READ(pp_stat_reg),
1797                                 I915_READ(pp_ctrl_reg));
1798         }
1799
1800         DRM_DEBUG_KMS("Wait complete\n");
1801 }
1802
1803 static void wait_panel_on(struct intel_dp *intel_dp)
1804 {
1805         DRM_DEBUG_KMS("Wait for panel power on\n");
1806         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1807 }
1808
1809 static void wait_panel_off(struct intel_dp *intel_dp)
1810 {
1811         DRM_DEBUG_KMS("Wait for panel power off time\n");
1812         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1813 }
1814
1815 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1816 {
1817         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1818
1819         /* When we disable the VDD override bit last we have to do the manual
1820          * wait. */
1821         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1822                                        intel_dp->panel_power_cycle_delay);
1823
1824         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1825 }
1826
1827 static void wait_backlight_on(struct intel_dp *intel_dp)
1828 {
1829         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1830                                        intel_dp->backlight_on_delay);
1831 }
1832
1833 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1834 {
1835         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1836                                        intel_dp->backlight_off_delay);
1837 }
1838
1839 /* Read the current pp_control value, unlocking the register if it
1840  * is locked
1841  */
1842
1843 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1844 {
1845         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1846         struct drm_i915_private *dev_priv = dev->dev_private;
1847         u32 control;
1848
1849         lockdep_assert_held(&dev_priv->pps_mutex);
1850
1851         control = I915_READ(_pp_ctrl_reg(intel_dp));
1852         if (!IS_BROXTON(dev)) {
1853                 control &= ~PANEL_UNLOCK_MASK;
1854                 control |= PANEL_UNLOCK_REGS;
1855         }
1856         return control;
1857 }
1858
1859 /*
1860  * Must be paired with edp_panel_vdd_off().
1861  * Must hold pps_mutex around the whole on/off sequence.
1862  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1863  */
1864 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1865 {
1866         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1867         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1868         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1869         struct drm_i915_private *dev_priv = dev->dev_private;
1870         enum intel_display_power_domain power_domain;
1871         u32 pp;
1872         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1873         bool need_to_disable = !intel_dp->want_panel_vdd;
1874
1875         lockdep_assert_held(&dev_priv->pps_mutex);
1876
1877         if (!is_edp(intel_dp))
1878                 return false;
1879
1880         cancel_delayed_work(&intel_dp->panel_vdd_work);
1881         intel_dp->want_panel_vdd = true;
1882
1883         if (edp_have_panel_vdd(intel_dp))
1884                 return need_to_disable;
1885
1886         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1887         intel_display_power_get(dev_priv, power_domain);
1888
1889         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1890                       port_name(intel_dig_port->port));
1891
1892         if (!edp_have_panel_power(intel_dp))
1893                 wait_panel_power_cycle(intel_dp);
1894
1895         pp = ironlake_get_pp_control(intel_dp);
1896         pp |= EDP_FORCE_VDD;
1897
1898         pp_stat_reg = _pp_stat_reg(intel_dp);
1899         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1900
1901         I915_WRITE(pp_ctrl_reg, pp);
1902         POSTING_READ(pp_ctrl_reg);
1903         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1904                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1905         /*
1906          * If the panel wasn't on, delay before accessing aux channel
1907          */
1908         if (!edp_have_panel_power(intel_dp)) {
1909                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1910                               port_name(intel_dig_port->port));
1911                 msleep(intel_dp->panel_power_up_delay);
1912         }
1913
1914         return need_to_disable;
1915 }
1916
1917 /*
1918  * Must be paired with intel_edp_panel_vdd_off() or
1919  * intel_edp_panel_off().
1920  * Nested calls to these functions are not allowed since
1921  * we drop the lock. Caller must use some higher level
1922  * locking to prevent nested calls from other threads.
1923  */
1924 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1925 {
1926         bool vdd;
1927
1928         if (!is_edp(intel_dp))
1929                 return;
1930
1931         pps_lock(intel_dp);
1932         vdd = edp_panel_vdd_on(intel_dp);
1933         pps_unlock(intel_dp);
1934
1935         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1936              port_name(dp_to_dig_port(intel_dp)->port));
1937 }
1938
1939 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1940 {
1941         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1942         struct drm_i915_private *dev_priv = dev->dev_private;
1943         struct intel_digital_port *intel_dig_port =
1944                 dp_to_dig_port(intel_dp);
1945         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1946         enum intel_display_power_domain power_domain;
1947         u32 pp;
1948         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1949
1950         lockdep_assert_held(&dev_priv->pps_mutex);
1951
1952         WARN_ON(intel_dp->want_panel_vdd);
1953
1954         if (!edp_have_panel_vdd(intel_dp))
1955                 return;
1956
1957         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1958                       port_name(intel_dig_port->port));
1959
1960         pp = ironlake_get_pp_control(intel_dp);
1961         pp &= ~EDP_FORCE_VDD;
1962
1963         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1964         pp_stat_reg = _pp_stat_reg(intel_dp);
1965
1966         I915_WRITE(pp_ctrl_reg, pp);
1967         POSTING_READ(pp_ctrl_reg);
1968
1969         /* Make sure sequencer is idle before allowing subsequent activity */
1970         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1971         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1972
1973         if ((pp & POWER_TARGET_ON) == 0)
1974                 intel_dp->last_power_cycle = jiffies;
1975
1976         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1977         intel_display_power_put(dev_priv, power_domain);
1978 }
1979
1980 static void edp_panel_vdd_work(struct work_struct *__work)
1981 {
1982         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1983                                                  struct intel_dp, panel_vdd_work);
1984
1985         pps_lock(intel_dp);
1986         if (!intel_dp->want_panel_vdd)
1987                 edp_panel_vdd_off_sync(intel_dp);
1988         pps_unlock(intel_dp);
1989 }
1990
1991 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1992 {
1993         unsigned long delay;
1994
1995         /*
1996          * Queue the timer to fire a long time from now (relative to the power
1997          * down delay) to keep the panel power up across a sequence of
1998          * operations.
1999          */
2000         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2001         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2002 }
2003
2004 /*
2005  * Must be paired with edp_panel_vdd_on().
2006  * Must hold pps_mutex around the whole on/off sequence.
2007  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2008  */
2009 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2010 {
2011         struct drm_i915_private *dev_priv =
2012                 intel_dp_to_dev(intel_dp)->dev_private;
2013
2014         lockdep_assert_held(&dev_priv->pps_mutex);
2015
2016         if (!is_edp(intel_dp))
2017                 return;
2018
2019         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2020              port_name(dp_to_dig_port(intel_dp)->port));
2021
2022         intel_dp->want_panel_vdd = false;
2023
2024         if (sync)
2025                 edp_panel_vdd_off_sync(intel_dp);
2026         else
2027                 edp_panel_vdd_schedule_off(intel_dp);
2028 }
2029
2030 static void edp_panel_on(struct intel_dp *intel_dp)
2031 {
2032         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2033         struct drm_i915_private *dev_priv = dev->dev_private;
2034         u32 pp;
2035         i915_reg_t pp_ctrl_reg;
2036
2037         lockdep_assert_held(&dev_priv->pps_mutex);
2038
2039         if (!is_edp(intel_dp))
2040                 return;
2041
2042         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2043                       port_name(dp_to_dig_port(intel_dp)->port));
2044
2045         if (WARN(edp_have_panel_power(intel_dp),
2046                  "eDP port %c panel power already on\n",
2047                  port_name(dp_to_dig_port(intel_dp)->port)))
2048                 return;
2049
2050         wait_panel_power_cycle(intel_dp);
2051
2052         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2053         pp = ironlake_get_pp_control(intel_dp);
2054         if (IS_GEN5(dev)) {
2055                 /* ILK workaround: disable reset around power sequence */
2056                 pp &= ~PANEL_POWER_RESET;
2057                 I915_WRITE(pp_ctrl_reg, pp);
2058                 POSTING_READ(pp_ctrl_reg);
2059         }
2060
2061         pp |= POWER_TARGET_ON;
2062         if (!IS_GEN5(dev))
2063                 pp |= PANEL_POWER_RESET;
2064
2065         I915_WRITE(pp_ctrl_reg, pp);
2066         POSTING_READ(pp_ctrl_reg);
2067
2068         wait_panel_on(intel_dp);
2069         intel_dp->last_power_on = jiffies;
2070
2071         if (IS_GEN5(dev)) {
2072                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2073                 I915_WRITE(pp_ctrl_reg, pp);
2074                 POSTING_READ(pp_ctrl_reg);
2075         }
2076 }
2077
2078 void intel_edp_panel_on(struct intel_dp *intel_dp)
2079 {
2080         if (!is_edp(intel_dp))
2081                 return;
2082
2083         pps_lock(intel_dp);
2084         edp_panel_on(intel_dp);
2085         pps_unlock(intel_dp);
2086 }
2087
2088
2089 static void edp_panel_off(struct intel_dp *intel_dp)
2090 {
2091         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2092         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2093         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2094         struct drm_i915_private *dev_priv = dev->dev_private;
2095         enum intel_display_power_domain power_domain;
2096         u32 pp;
2097         i915_reg_t pp_ctrl_reg;
2098
2099         lockdep_assert_held(&dev_priv->pps_mutex);
2100
2101         if (!is_edp(intel_dp))
2102                 return;
2103
2104         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2105                       port_name(dp_to_dig_port(intel_dp)->port));
2106
2107         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2108              port_name(dp_to_dig_port(intel_dp)->port));
2109
2110         pp = ironlake_get_pp_control(intel_dp);
2111         /* We need to switch off panel power _and_ force vdd, for otherwise some
2112          * panels get very unhappy and cease to work. */
2113         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2114                 EDP_BLC_ENABLE);
2115
2116         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2117
2118         intel_dp->want_panel_vdd = false;
2119
2120         I915_WRITE(pp_ctrl_reg, pp);
2121         POSTING_READ(pp_ctrl_reg);
2122
2123         intel_dp->last_power_cycle = jiffies;
2124         wait_panel_off(intel_dp);
2125
2126         /* We got a reference when we enabled the VDD. */
2127         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2128         intel_display_power_put(dev_priv, power_domain);
2129 }
2130
2131 void intel_edp_panel_off(struct intel_dp *intel_dp)
2132 {
2133         if (!is_edp(intel_dp))
2134                 return;
2135
2136         pps_lock(intel_dp);
2137         edp_panel_off(intel_dp);
2138         pps_unlock(intel_dp);
2139 }
2140
2141 /* Enable backlight in the panel power control. */
2142 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2143 {
2144         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2145         struct drm_device *dev = intel_dig_port->base.base.dev;
2146         struct drm_i915_private *dev_priv = dev->dev_private;
2147         u32 pp;
2148         i915_reg_t pp_ctrl_reg;
2149
2150         /*
2151          * If we enable the backlight right away following a panel power
2152          * on, we may see slight flicker as the panel syncs with the eDP
2153          * link.  So delay a bit to make sure the image is solid before
2154          * allowing it to appear.
2155          */
2156         wait_backlight_on(intel_dp);
2157
2158         pps_lock(intel_dp);
2159
2160         pp = ironlake_get_pp_control(intel_dp);
2161         pp |= EDP_BLC_ENABLE;
2162
2163         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2164
2165         I915_WRITE(pp_ctrl_reg, pp);
2166         POSTING_READ(pp_ctrl_reg);
2167
2168         pps_unlock(intel_dp);
2169 }
2170
2171 /* Enable backlight PWM and backlight PP control. */
2172 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2173 {
2174         if (!is_edp(intel_dp))
2175                 return;
2176
2177         DRM_DEBUG_KMS("\n");
2178
2179         intel_panel_enable_backlight(intel_dp->attached_connector);
2180         _intel_edp_backlight_on(intel_dp);
2181 }
2182
2183 /* Disable backlight in the panel power control. */
2184 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2185 {
2186         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2187         struct drm_i915_private *dev_priv = dev->dev_private;
2188         u32 pp;
2189         i915_reg_t pp_ctrl_reg;
2190
2191         if (!is_edp(intel_dp))
2192                 return;
2193
2194         pps_lock(intel_dp);
2195
2196         pp = ironlake_get_pp_control(intel_dp);
2197         pp &= ~EDP_BLC_ENABLE;
2198
2199         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2200
2201         I915_WRITE(pp_ctrl_reg, pp);
2202         POSTING_READ(pp_ctrl_reg);
2203
2204         pps_unlock(intel_dp);
2205
2206         intel_dp->last_backlight_off = jiffies;
2207         edp_wait_backlight_off(intel_dp);
2208 }
2209
2210 /* Disable backlight PP control and backlight PWM. */
2211 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2212 {
2213         if (!is_edp(intel_dp))
2214                 return;
2215
2216         DRM_DEBUG_KMS("\n");
2217
2218         _intel_edp_backlight_off(intel_dp);
2219         intel_panel_disable_backlight(intel_dp->attached_connector);
2220 }
2221
2222 /*
2223  * Hook for controlling the panel power control backlight through the bl_power
2224  * sysfs attribute. Take care to handle multiple calls.
2225  */
2226 static void intel_edp_backlight_power(struct intel_connector *connector,
2227                                       bool enable)
2228 {
2229         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2230         bool is_enabled;
2231
2232         pps_lock(intel_dp);
2233         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2234         pps_unlock(intel_dp);
2235
2236         if (is_enabled == enable)
2237                 return;
2238
2239         DRM_DEBUG_KMS("panel power control backlight %s\n",
2240                       enable ? "enable" : "disable");
2241
2242         if (enable)
2243                 _intel_edp_backlight_on(intel_dp);
2244         else
2245                 _intel_edp_backlight_off(intel_dp);
2246 }
2247
2248 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2249 {
2250         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2251         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2252         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2253
2254         I915_STATE_WARN(cur_state != state,
2255                         "DP port %c state assertion failure (expected %s, current %s)\n",
2256                         port_name(dig_port->port),
2257                         onoff(state), onoff(cur_state));
2258 }
2259 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2260
2261 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2262 {
2263         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2264
2265         I915_STATE_WARN(cur_state != state,
2266                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2267                         onoff(state), onoff(cur_state));
2268 }
2269 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2270 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2271
2272 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2273 {
2274         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2275         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2276         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2277
2278         assert_pipe_disabled(dev_priv, crtc->pipe);
2279         assert_dp_port_disabled(intel_dp);
2280         assert_edp_pll_disabled(dev_priv);
2281
2282         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2283                       crtc->config->port_clock);
2284
2285         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2286
2287         if (crtc->config->port_clock == 162000)
2288                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2289         else
2290                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2291
2292         I915_WRITE(DP_A, intel_dp->DP);
2293         POSTING_READ(DP_A);
2294         udelay(500);
2295
2296         intel_dp->DP |= DP_PLL_ENABLE;
2297
2298         I915_WRITE(DP_A, intel_dp->DP);
2299         POSTING_READ(DP_A);
2300         udelay(200);
2301 }
2302
2303 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2304 {
2305         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2306         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2307         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2308
2309         assert_pipe_disabled(dev_priv, crtc->pipe);
2310         assert_dp_port_disabled(intel_dp);
2311         assert_edp_pll_enabled(dev_priv);
2312
2313         DRM_DEBUG_KMS("disabling eDP PLL\n");
2314
2315         intel_dp->DP &= ~DP_PLL_ENABLE;
2316
2317         I915_WRITE(DP_A, intel_dp->DP);
2318         POSTING_READ(DP_A);
2319         udelay(200);
2320 }
2321
2322 /* If the sink supports it, try to set the power state appropriately */
2323 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2324 {
2325         int ret, i;
2326
2327         /* Should have a valid DPCD by this point */
2328         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2329                 return;
2330
2331         if (mode != DRM_MODE_DPMS_ON) {
2332                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2333                                          DP_SET_POWER_D3);
2334         } else {
2335                 /*
2336                  * When turning on, we need to retry for 1ms to give the sink
2337                  * time to wake up.
2338                  */
2339                 for (i = 0; i < 3; i++) {
2340                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2341                                                  DP_SET_POWER_D0);
2342                         if (ret == 1)
2343                                 break;
2344                         msleep(1);
2345                 }
2346         }
2347
2348         if (ret != 1)
2349                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2350                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2351 }
2352
2353 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2354                                   enum pipe *pipe)
2355 {
2356         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2357         enum port port = dp_to_dig_port(intel_dp)->port;
2358         struct drm_device *dev = encoder->base.dev;
2359         struct drm_i915_private *dev_priv = dev->dev_private;
2360         enum intel_display_power_domain power_domain;
2361         u32 tmp;
2362
2363         power_domain = intel_display_port_power_domain(encoder);
2364         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2365                 return false;
2366
2367         tmp = I915_READ(intel_dp->output_reg);
2368
2369         if (!(tmp & DP_PORT_EN))
2370                 return false;
2371
2372         if (IS_GEN7(dev) && port == PORT_A) {
2373                 *pipe = PORT_TO_PIPE_CPT(tmp);
2374         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2375                 enum pipe p;
2376
2377                 for_each_pipe(dev_priv, p) {
2378                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2380                                 *pipe = p;
2381                                 return true;
2382                         }
2383                 }
2384
2385                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2386                               i915_mmio_reg_offset(intel_dp->output_reg));
2387         } else if (IS_CHERRYVIEW(dev)) {
2388                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2389         } else {
2390                 *pipe = PORT_TO_PIPE(tmp);
2391         }
2392
2393         return true;
2394 }
2395
2396 static void intel_dp_get_config(struct intel_encoder *encoder,
2397                                 struct intel_crtc_state *pipe_config)
2398 {
2399         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2400         u32 tmp, flags = 0;
2401         struct drm_device *dev = encoder->base.dev;
2402         struct drm_i915_private *dev_priv = dev->dev_private;
2403         enum port port = dp_to_dig_port(intel_dp)->port;
2404         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2405         int dotclock;
2406
2407         tmp = I915_READ(intel_dp->output_reg);
2408
2409         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2410
2411         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2412                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2413
2414                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2415                         flags |= DRM_MODE_FLAG_PHSYNC;
2416                 else
2417                         flags |= DRM_MODE_FLAG_NHSYNC;
2418
2419                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2420                         flags |= DRM_MODE_FLAG_PVSYNC;
2421                 else
2422                         flags |= DRM_MODE_FLAG_NVSYNC;
2423         } else {
2424                 if (tmp & DP_SYNC_HS_HIGH)
2425                         flags |= DRM_MODE_FLAG_PHSYNC;
2426                 else
2427                         flags |= DRM_MODE_FLAG_NHSYNC;
2428
2429                 if (tmp & DP_SYNC_VS_HIGH)
2430                         flags |= DRM_MODE_FLAG_PVSYNC;
2431                 else
2432                         flags |= DRM_MODE_FLAG_NVSYNC;
2433         }
2434
2435         pipe_config->base.adjusted_mode.flags |= flags;
2436
2437         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2438             !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2439                 pipe_config->limited_color_range = true;
2440
2441         pipe_config->has_dp_encoder = true;
2442
2443         pipe_config->lane_count =
2444                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2445
2446         intel_dp_get_m_n(crtc, pipe_config);
2447
2448         if (port == PORT_A) {
2449                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2450                         pipe_config->port_clock = 162000;
2451                 else
2452                         pipe_config->port_clock = 270000;
2453         }
2454
2455         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2456                                             &pipe_config->dp_m_n);
2457
2458         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2459                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2460
2461         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2462
2463         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2464             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2465                 /*
2466                  * This is a big fat ugly hack.
2467                  *
2468                  * Some machines in UEFI boot mode provide us a VBT that has 18
2469                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2470                  * unknown we fail to light up. Yet the same BIOS boots up with
2471                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2472                  * max, not what it tells us to use.
2473                  *
2474                  * Note: This will still be broken if the eDP panel is not lit
2475                  * up by the BIOS, and thus we can't get the mode at module
2476                  * load.
2477                  */
2478                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2479                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2480                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2481         }
2482 }
2483
2484 static void intel_disable_dp(struct intel_encoder *encoder)
2485 {
2486         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2487         struct drm_device *dev = encoder->base.dev;
2488         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2489
2490         if (crtc->config->has_audio)
2491                 intel_audio_codec_disable(encoder);
2492
2493         if (HAS_PSR(dev) && !HAS_DDI(dev))
2494                 intel_psr_disable(intel_dp);
2495
2496         /* Make sure the panel is off before trying to change the mode. But also
2497          * ensure that we have vdd while we switch off the panel. */
2498         intel_edp_panel_vdd_on(intel_dp);
2499         intel_edp_backlight_off(intel_dp);
2500         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2501         intel_edp_panel_off(intel_dp);
2502
2503         /* disable the port before the pipe on g4x */
2504         if (INTEL_INFO(dev)->gen < 5)
2505                 intel_dp_link_down(intel_dp);
2506 }
2507
2508 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2509 {
2510         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511         enum port port = dp_to_dig_port(intel_dp)->port;
2512
2513         intel_dp_link_down(intel_dp);
2514
2515         /* Only ilk+ has port A */
2516         if (port == PORT_A)
2517                 ironlake_edp_pll_off(intel_dp);
2518 }
2519
2520 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2521 {
2522         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2523
2524         intel_dp_link_down(intel_dp);
2525 }
2526
2527 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2528                                      bool reset)
2529 {
2530         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2531         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2532         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2533         enum pipe pipe = crtc->pipe;
2534         uint32_t val;
2535
2536         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2537         if (reset)
2538                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2539         else
2540                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2541         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2542
2543         if (crtc->config->lane_count > 2) {
2544                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2545                 if (reset)
2546                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2547                 else
2548                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2549                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2550         }
2551
2552         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2553         val |= CHV_PCS_REQ_SOFTRESET_EN;
2554         if (reset)
2555                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2556         else
2557                 val |= DPIO_PCS_CLK_SOFT_RESET;
2558         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2559
2560         if (crtc->config->lane_count > 2) {
2561                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2562                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2563                 if (reset)
2564                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2565                 else
2566                         val |= DPIO_PCS_CLK_SOFT_RESET;
2567                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2568         }
2569 }
2570
2571 static void chv_post_disable_dp(struct intel_encoder *encoder)
2572 {
2573         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574         struct drm_device *dev = encoder->base.dev;
2575         struct drm_i915_private *dev_priv = dev->dev_private;
2576
2577         intel_dp_link_down(intel_dp);
2578
2579         mutex_lock(&dev_priv->sb_lock);
2580
2581         /* Assert data lane reset */
2582         chv_data_lane_soft_reset(encoder, true);
2583
2584         mutex_unlock(&dev_priv->sb_lock);
2585 }
2586
2587 static void
2588 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2589                          uint32_t *DP,
2590                          uint8_t dp_train_pat)
2591 {
2592         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2593         struct drm_device *dev = intel_dig_port->base.base.dev;
2594         struct drm_i915_private *dev_priv = dev->dev_private;
2595         enum port port = intel_dig_port->port;
2596
2597         if (HAS_DDI(dev)) {
2598                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2599
2600                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2601                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2602                 else
2603                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2604
2605                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2606                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2607                 case DP_TRAINING_PATTERN_DISABLE:
2608                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2609
2610                         break;
2611                 case DP_TRAINING_PATTERN_1:
2612                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2613                         break;
2614                 case DP_TRAINING_PATTERN_2:
2615                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2616                         break;
2617                 case DP_TRAINING_PATTERN_3:
2618                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2619                         break;
2620                 }
2621                 I915_WRITE(DP_TP_CTL(port), temp);
2622
2623         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2624                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2625                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2626
2627                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2628                 case DP_TRAINING_PATTERN_DISABLE:
2629                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2630                         break;
2631                 case DP_TRAINING_PATTERN_1:
2632                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2633                         break;
2634                 case DP_TRAINING_PATTERN_2:
2635                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2636                         break;
2637                 case DP_TRAINING_PATTERN_3:
2638                         DRM_ERROR("DP training pattern 3 not supported\n");
2639                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2640                         break;
2641                 }
2642
2643         } else {
2644                 if (IS_CHERRYVIEW(dev))
2645                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2646                 else
2647                         *DP &= ~DP_LINK_TRAIN_MASK;
2648
2649                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2650                 case DP_TRAINING_PATTERN_DISABLE:
2651                         *DP |= DP_LINK_TRAIN_OFF;
2652                         break;
2653                 case DP_TRAINING_PATTERN_1:
2654                         *DP |= DP_LINK_TRAIN_PAT_1;
2655                         break;
2656                 case DP_TRAINING_PATTERN_2:
2657                         *DP |= DP_LINK_TRAIN_PAT_2;
2658                         break;
2659                 case DP_TRAINING_PATTERN_3:
2660                         if (IS_CHERRYVIEW(dev)) {
2661                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2662                         } else {
2663                                 DRM_ERROR("DP training pattern 3 not supported\n");
2664                                 *DP |= DP_LINK_TRAIN_PAT_2;
2665                         }
2666                         break;
2667                 }
2668         }
2669 }
2670
2671 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2672 {
2673         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *crtc =
2676                 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2677
2678         /* enable with pattern 1 (as per spec) */
2679         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2680                                  DP_TRAINING_PATTERN_1);
2681
2682         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2683         POSTING_READ(intel_dp->output_reg);
2684
2685         /*
2686          * Magic for VLV/CHV. We _must_ first set up the register
2687          * without actually enabling the port, and then do another
2688          * write to enable the port. Otherwise link training will
2689          * fail when the power sequencer is freshly used for this port.
2690          */
2691         intel_dp->DP |= DP_PORT_EN;
2692         if (crtc->config->has_audio)
2693                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2694
2695         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2696         POSTING_READ(intel_dp->output_reg);
2697 }
2698
2699 static void intel_enable_dp(struct intel_encoder *encoder)
2700 {
2701         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2702         struct drm_device *dev = encoder->base.dev;
2703         struct drm_i915_private *dev_priv = dev->dev_private;
2704         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2705         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2706         enum port port = dp_to_dig_port(intel_dp)->port;
2707         enum pipe pipe = crtc->pipe;
2708
2709         if (WARN_ON(dp_reg & DP_PORT_EN))
2710                 return;
2711
2712         pps_lock(intel_dp);
2713
2714         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2715                 vlv_init_panel_power_sequencer(intel_dp);
2716
2717         /*
2718          * We get an occasional spurious underrun between the port
2719          * enable and vdd enable, when enabling port A eDP.
2720          *
2721          * FIXME: Not sure if this applies to (PCH) port D eDP as well
2722          */
2723         if (port == PORT_A)
2724                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2725
2726         intel_dp_enable_port(intel_dp);
2727
2728         if (port == PORT_A && IS_GEN5(dev_priv)) {
2729                 /*
2730                  * Underrun reporting for the other pipe was disabled in
2731                  * g4x_pre_enable_dp(). The eDP PLL and port have now been
2732                  * enabled, so it's now safe to re-enable underrun reporting.
2733                  */
2734                 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2735                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2736                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2737         }
2738
2739         edp_panel_vdd_on(intel_dp);
2740         edp_panel_on(intel_dp);
2741         edp_panel_vdd_off(intel_dp, true);
2742
2743         if (port == PORT_A)
2744                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2745
2746         pps_unlock(intel_dp);
2747
2748         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2749                 unsigned int lane_mask = 0x0;
2750
2751                 if (IS_CHERRYVIEW(dev))
2752                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2753
2754                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2755                                     lane_mask);
2756         }
2757
2758         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2759         intel_dp_start_link_train(intel_dp);
2760         intel_dp_stop_link_train(intel_dp);
2761
2762         if (crtc->config->has_audio) {
2763                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2764                                  pipe_name(pipe));
2765                 intel_audio_codec_enable(encoder);
2766         }
2767 }
2768
2769 static void g4x_enable_dp(struct intel_encoder *encoder)
2770 {
2771         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2772
2773         intel_enable_dp(encoder);
2774         intel_edp_backlight_on(intel_dp);
2775 }
2776
2777 static void vlv_enable_dp(struct intel_encoder *encoder)
2778 {
2779         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2780
2781         intel_edp_backlight_on(intel_dp);
2782         intel_psr_enable(intel_dp);
2783 }
2784
2785 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2786 {
2787         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2788         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2789         enum port port = dp_to_dig_port(intel_dp)->port;
2790         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2791
2792         intel_dp_prepare(encoder);
2793
2794         if (port == PORT_A && IS_GEN5(dev_priv)) {
2795                 /*
2796                  * We get FIFO underruns on the other pipe when
2797                  * enabling the CPU eDP PLL, and when enabling CPU
2798                  * eDP port. We could potentially avoid the PLL
2799                  * underrun with a vblank wait just prior to enabling
2800                  * the PLL, but that doesn't appear to help the port
2801                  * enable case. Just sweep it all under the rug.
2802                  */
2803                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2804                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2805         }
2806
2807         /* Only ilk+ has port A */
2808         if (port == PORT_A)
2809                 ironlake_edp_pll_on(intel_dp);
2810 }
2811
2812 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2813 {
2814         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2815         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2816         enum pipe pipe = intel_dp->pps_pipe;
2817         i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2818
2819         edp_panel_vdd_off_sync(intel_dp);
2820
2821         /*
2822          * VLV seems to get confused when multiple power seqeuencers
2823          * have the same port selected (even if only one has power/vdd
2824          * enabled). The failure manifests as vlv_wait_port_ready() failing
2825          * CHV on the other hand doesn't seem to mind having the same port
2826          * selected in multiple power seqeuencers, but let's clear the
2827          * port select always when logically disconnecting a power sequencer
2828          * from a port.
2829          */
2830         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2831                       pipe_name(pipe), port_name(intel_dig_port->port));
2832         I915_WRITE(pp_on_reg, 0);
2833         POSTING_READ(pp_on_reg);
2834
2835         intel_dp->pps_pipe = INVALID_PIPE;
2836 }
2837
2838 static void vlv_steal_power_sequencer(struct drm_device *dev,
2839                                       enum pipe pipe)
2840 {
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842         struct intel_encoder *encoder;
2843
2844         lockdep_assert_held(&dev_priv->pps_mutex);
2845
2846         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2847                 return;
2848
2849         for_each_intel_encoder(dev, encoder) {
2850                 struct intel_dp *intel_dp;
2851                 enum port port;
2852
2853                 if (encoder->type != INTEL_OUTPUT_EDP)
2854                         continue;
2855
2856                 intel_dp = enc_to_intel_dp(&encoder->base);
2857                 port = dp_to_dig_port(intel_dp)->port;
2858
2859                 if (intel_dp->pps_pipe != pipe)
2860                         continue;
2861
2862                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2863                               pipe_name(pipe), port_name(port));
2864
2865                 WARN(encoder->base.crtc,
2866                      "stealing pipe %c power sequencer from active eDP port %c\n",
2867                      pipe_name(pipe), port_name(port));
2868
2869                 /* make sure vdd is off before we steal it */
2870                 vlv_detach_power_sequencer(intel_dp);
2871         }
2872 }
2873
2874 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2875 {
2876         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2877         struct intel_encoder *encoder = &intel_dig_port->base;
2878         struct drm_device *dev = encoder->base.dev;
2879         struct drm_i915_private *dev_priv = dev->dev_private;
2880         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2881
2882         lockdep_assert_held(&dev_priv->pps_mutex);
2883
2884         if (!is_edp(intel_dp))
2885                 return;
2886
2887         if (intel_dp->pps_pipe == crtc->pipe)
2888                 return;
2889
2890         /*
2891          * If another power sequencer was being used on this
2892          * port previously make sure to turn off vdd there while
2893          * we still have control of it.
2894          */
2895         if (intel_dp->pps_pipe != INVALID_PIPE)
2896                 vlv_detach_power_sequencer(intel_dp);
2897
2898         /*
2899          * We may be stealing the power
2900          * sequencer from another port.
2901          */
2902         vlv_steal_power_sequencer(dev, crtc->pipe);
2903
2904         /* now it's all ours */
2905         intel_dp->pps_pipe = crtc->pipe;
2906
2907         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2908                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2909
2910         /* init power sequencer on this pipe and port */
2911         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2912         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2913 }
2914
2915 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2916 {
2917         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2918         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2919         struct drm_device *dev = encoder->base.dev;
2920         struct drm_i915_private *dev_priv = dev->dev_private;
2921         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2922         enum dpio_channel port = vlv_dport_to_channel(dport);
2923         int pipe = intel_crtc->pipe;
2924         u32 val;
2925
2926         mutex_lock(&dev_priv->sb_lock);
2927
2928         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2929         val = 0;
2930         if (pipe)
2931                 val |= (1<<21);
2932         else
2933                 val &= ~(1<<21);
2934         val |= 0x001000c4;
2935         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2936         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2937         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2938
2939         mutex_unlock(&dev_priv->sb_lock);
2940
2941         intel_enable_dp(encoder);
2942 }
2943
2944 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2945 {
2946         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2947         struct drm_device *dev = encoder->base.dev;
2948         struct drm_i915_private *dev_priv = dev->dev_private;
2949         struct intel_crtc *intel_crtc =
2950                 to_intel_crtc(encoder->base.crtc);
2951         enum dpio_channel port = vlv_dport_to_channel(dport);
2952         int pipe = intel_crtc->pipe;
2953
2954         intel_dp_prepare(encoder);
2955
2956         /* Program Tx lane resets to default */
2957         mutex_lock(&dev_priv->sb_lock);
2958         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2959                          DPIO_PCS_TX_LANE2_RESET |
2960                          DPIO_PCS_TX_LANE1_RESET);
2961         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2962                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2963                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2964                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2965                                  DPIO_PCS_CLK_SOFT_RESET);
2966
2967         /* Fix up inter-pair skew failure */
2968         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2969         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2970         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2971         mutex_unlock(&dev_priv->sb_lock);
2972 }
2973
2974 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2975 {
2976         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2977         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2978         struct drm_device *dev = encoder->base.dev;
2979         struct drm_i915_private *dev_priv = dev->dev_private;
2980         struct intel_crtc *intel_crtc =
2981                 to_intel_crtc(encoder->base.crtc);
2982         enum dpio_channel ch = vlv_dport_to_channel(dport);
2983         int pipe = intel_crtc->pipe;
2984         int data, i, stagger;
2985         u32 val;
2986
2987         mutex_lock(&dev_priv->sb_lock);
2988
2989         /* allow hardware to manage TX FIFO reset source */
2990         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2991         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2992         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2993
2994         if (intel_crtc->config->lane_count > 2) {
2995                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2996                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2997                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2998         }
2999
3000         /* Program Tx lane latency optimal setting*/
3001         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3002                 /* Set the upar bit */
3003                 if (intel_crtc->config->lane_count == 1)
3004                         data = 0x0;
3005                 else
3006                         data = (i == 1) ? 0x0 : 0x1;
3007                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3008                                 data << DPIO_UPAR_SHIFT);
3009         }
3010
3011         /* Data lane stagger programming */
3012         if (intel_crtc->config->port_clock > 270000)
3013                 stagger = 0x18;
3014         else if (intel_crtc->config->port_clock > 135000)
3015                 stagger = 0xd;
3016         else if (intel_crtc->config->port_clock > 67500)
3017                 stagger = 0x7;
3018         else if (intel_crtc->config->port_clock > 33750)
3019                 stagger = 0x4;
3020         else
3021                 stagger = 0x2;
3022
3023         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3024         val |= DPIO_TX2_STAGGER_MASK(0x1f);
3025         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3026
3027         if (intel_crtc->config->lane_count > 2) {
3028                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3029                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3030                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3031         }
3032
3033         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3034                        DPIO_LANESTAGGER_STRAP(stagger) |
3035                        DPIO_LANESTAGGER_STRAP_OVRD |
3036                        DPIO_TX1_STAGGER_MASK(0x1f) |
3037                        DPIO_TX1_STAGGER_MULT(6) |
3038                        DPIO_TX2_STAGGER_MULT(0));
3039
3040         if (intel_crtc->config->lane_count > 2) {
3041                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3042                                DPIO_LANESTAGGER_STRAP(stagger) |
3043                                DPIO_LANESTAGGER_STRAP_OVRD |
3044                                DPIO_TX1_STAGGER_MASK(0x1f) |
3045                                DPIO_TX1_STAGGER_MULT(7) |
3046                                DPIO_TX2_STAGGER_MULT(5));
3047         }
3048
3049         /* Deassert data lane reset */
3050         chv_data_lane_soft_reset(encoder, false);
3051
3052         mutex_unlock(&dev_priv->sb_lock);
3053
3054         intel_enable_dp(encoder);
3055
3056         /* Second common lane will stay alive on its own now */
3057         if (dport->release_cl2_override) {
3058                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3059                 dport->release_cl2_override = false;
3060         }
3061 }
3062
3063 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3064 {
3065         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3066         struct drm_device *dev = encoder->base.dev;
3067         struct drm_i915_private *dev_priv = dev->dev_private;
3068         struct intel_crtc *intel_crtc =
3069                 to_intel_crtc(encoder->base.crtc);
3070         enum dpio_channel ch = vlv_dport_to_channel(dport);
3071         enum pipe pipe = intel_crtc->pipe;
3072         unsigned int lane_mask =
3073                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3074         u32 val;
3075
3076         intel_dp_prepare(encoder);
3077
3078         /*
3079          * Must trick the second common lane into life.
3080          * Otherwise we can't even access the PLL.
3081          */
3082         if (ch == DPIO_CH0 && pipe == PIPE_B)
3083                 dport->release_cl2_override =
3084                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3085
3086         chv_phy_powergate_lanes(encoder, true, lane_mask);
3087
3088         mutex_lock(&dev_priv->sb_lock);
3089
3090         /* Assert data lane reset */
3091         chv_data_lane_soft_reset(encoder, true);
3092
3093         /* program left/right clock distribution */
3094         if (pipe != PIPE_B) {
3095                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3096                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3097                 if (ch == DPIO_CH0)
3098                         val |= CHV_BUFLEFTENA1_FORCE;
3099                 if (ch == DPIO_CH1)
3100                         val |= CHV_BUFRIGHTENA1_FORCE;
3101                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3102         } else {
3103                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3104                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3105                 if (ch == DPIO_CH0)
3106                         val |= CHV_BUFLEFTENA2_FORCE;
3107                 if (ch == DPIO_CH1)
3108                         val |= CHV_BUFRIGHTENA2_FORCE;
3109                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3110         }
3111
3112         /* program clock channel usage */
3113         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3114         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3115         if (pipe != PIPE_B)
3116                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3117         else
3118                 val |= CHV_PCS_USEDCLKCHANNEL;
3119         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3120
3121         if (intel_crtc->config->lane_count > 2) {
3122                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3123                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3124                 if (pipe != PIPE_B)
3125                         val &= ~CHV_PCS_USEDCLKCHANNEL;
3126                 else
3127                         val |= CHV_PCS_USEDCLKCHANNEL;
3128                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3129         }
3130
3131         /*
3132          * This a a bit weird since generally CL
3133          * matches the pipe, but here we need to
3134          * pick the CL based on the port.
3135          */
3136         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3137         if (pipe != PIPE_B)
3138                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3139         else
3140                 val |= CHV_CMN_USEDCLKCHANNEL;
3141         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3142
3143         mutex_unlock(&dev_priv->sb_lock);
3144 }
3145
3146 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3147 {
3148         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3149         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3150         u32 val;
3151
3152         mutex_lock(&dev_priv->sb_lock);
3153
3154         /* disable left/right clock distribution */
3155         if (pipe != PIPE_B) {
3156                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3157                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3158                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3159         } else {
3160                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3161                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3162                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3163         }
3164
3165         mutex_unlock(&dev_priv->sb_lock);
3166
3167         /*
3168          * Leave the power down bit cleared for at least one
3169          * lane so that chv_powergate_phy_ch() will power
3170          * on something when the channel is otherwise unused.
3171          * When the port is off and the override is removed
3172          * the lanes power down anyway, so otherwise it doesn't
3173          * really matter what the state of power down bits is
3174          * after this.
3175          */
3176         chv_phy_powergate_lanes(encoder, false, 0x0);
3177 }
3178
3179 /*
3180  * Native read with retry for link status and receiver capability reads for
3181  * cases where the sink may still be asleep.
3182  *
3183  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3184  * supposed to retry 3 times per the spec.
3185  */
3186 static ssize_t
3187 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3188                         void *buffer, size_t size)
3189 {
3190         ssize_t ret;
3191         int i;
3192
3193         /*
3194          * Sometime we just get the same incorrect byte repeated
3195          * over the entire buffer. Doing just one throw away read
3196          * initially seems to "solve" it.
3197          */
3198         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3199
3200         for (i = 0; i < 3; i++) {
3201                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3202                 if (ret == size)
3203                         return ret;
3204                 msleep(1);
3205         }
3206
3207         return ret;
3208 }
3209
3210 /*
3211  * Fetch AUX CH registers 0x202 - 0x207 which contain
3212  * link status information
3213  */
3214 bool
3215 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3216 {
3217         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3218                                        DP_LANE0_1_STATUS,
3219                                        link_status,
3220                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3221 }
3222
3223 /* These are source-specific values. */
3224 uint8_t
3225 intel_dp_voltage_max(struct intel_dp *intel_dp)
3226 {
3227         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3228         struct drm_i915_private *dev_priv = dev->dev_private;
3229         enum port port = dp_to_dig_port(intel_dp)->port;
3230
3231         if (IS_BROXTON(dev))
3232                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3233         else if (INTEL_INFO(dev)->gen >= 9) {
3234                 if (dev_priv->edp_low_vswing && port == PORT_A)
3235                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3236                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3237         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3238                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3239         else if (IS_GEN7(dev) && port == PORT_A)
3240                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3241         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3242                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243         else
3244                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3245 }
3246
3247 uint8_t
3248 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3249 {
3250         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3251         enum port port = dp_to_dig_port(intel_dp)->port;
3252
3253         if (INTEL_INFO(dev)->gen >= 9) {
3254                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3257                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3258                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3259                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3260                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3261                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3262                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3263                 default:
3264                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3265                 }
3266         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3267                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3270                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3271                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3272                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3274                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3275                 default:
3276                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3277                 }
3278         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3279                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3280                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3281                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3282                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3283                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3284                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3285                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3286                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3287                 default:
3288                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3289                 }
3290         } else if (IS_GEN7(dev) && port == PORT_A) {
3291                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3292                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3293                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3294                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3295                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3296                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3297                 default:
3298                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3299                 }
3300         } else {
3301                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3302                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3303                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3306                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3307                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3308                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3309                 default:
3310                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3311                 }
3312         }
3313 }
3314
3315 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3316 {
3317         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3318         struct drm_i915_private *dev_priv = dev->dev_private;
3319         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3320         struct intel_crtc *intel_crtc =
3321                 to_intel_crtc(dport->base.base.crtc);
3322         unsigned long demph_reg_value, preemph_reg_value,
3323                 uniqtranscale_reg_value;
3324         uint8_t train_set = intel_dp->train_set[0];
3325         enum dpio_channel port = vlv_dport_to_channel(dport);
3326         int pipe = intel_crtc->pipe;
3327
3328         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3329         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3330                 preemph_reg_value = 0x0004000;
3331                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3332                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3333                         demph_reg_value = 0x2B405555;
3334                         uniqtranscale_reg_value = 0x552AB83A;
3335                         break;
3336                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3337                         demph_reg_value = 0x2B404040;
3338                         uniqtranscale_reg_value = 0x5548B83A;
3339                         break;
3340                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3341                         demph_reg_value = 0x2B245555;
3342                         uniqtranscale_reg_value = 0x5560B83A;
3343                         break;
3344                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3345                         demph_reg_value = 0x2B405555;
3346                         uniqtranscale_reg_value = 0x5598DA3A;
3347                         break;
3348                 default:
3349                         return 0;
3350                 }
3351                 break;
3352         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3353                 preemph_reg_value = 0x0002000;
3354                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3355                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3356                         demph_reg_value = 0x2B404040;
3357                         uniqtranscale_reg_value = 0x5552B83A;
3358                         break;
3359                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3360                         demph_reg_value = 0x2B404848;
3361                         uniqtranscale_reg_value = 0x5580B83A;
3362                         break;
3363                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3364                         demph_reg_value = 0x2B404040;
3365                         uniqtranscale_reg_value = 0x55ADDA3A;
3366                         break;
3367                 default:
3368                         return 0;
3369                 }
3370                 break;
3371         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3372                 preemph_reg_value = 0x0000000;
3373                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3374                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3375                         demph_reg_value = 0x2B305555;
3376                         uniqtranscale_reg_value = 0x5570B83A;
3377                         break;
3378                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3379                         demph_reg_value = 0x2B2B4040;
3380                         uniqtranscale_reg_value = 0x55ADDA3A;
3381                         break;
3382                 default:
3383                         return 0;
3384                 }
3385                 break;
3386         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3387                 preemph_reg_value = 0x0006000;
3388                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3389                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3390                         demph_reg_value = 0x1B405555;
3391                         uniqtranscale_reg_value = 0x55ADDA3A;
3392                         break;
3393                 default:
3394                         return 0;
3395                 }
3396                 break;
3397         default:
3398                 return 0;
3399         }
3400
3401         mutex_lock(&dev_priv->sb_lock);
3402         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3403         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3404         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3405                          uniqtranscale_reg_value);
3406         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3407         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3408         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3409         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3410         mutex_unlock(&dev_priv->sb_lock);
3411
3412         return 0;
3413 }
3414
3415 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3416 {
3417         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3418                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3419 }
3420
3421 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3422 {
3423         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3424         struct drm_i915_private *dev_priv = dev->dev_private;
3425         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3426         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3427         u32 deemph_reg_value, margin_reg_value, val;
3428         uint8_t train_set = intel_dp->train_set[0];
3429         enum dpio_channel ch = vlv_dport_to_channel(dport);
3430         enum pipe pipe = intel_crtc->pipe;
3431         int i;
3432
3433         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3434         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3435                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3436                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3437                         deemph_reg_value = 128;
3438                         margin_reg_value = 52;
3439                         break;
3440                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3441                         deemph_reg_value = 128;
3442                         margin_reg_value = 77;
3443                         break;
3444                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3445                         deemph_reg_value = 128;
3446                         margin_reg_value = 102;
3447                         break;
3448                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3449                         deemph_reg_value = 128;
3450                         margin_reg_value = 154;
3451                         /* FIXME extra to set for 1200 */
3452                         break;
3453                 default:
3454                         return 0;
3455                 }
3456                 break;
3457         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3458                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3459                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3460                         deemph_reg_value = 85;
3461                         margin_reg_value = 78;
3462                         break;
3463                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3464                         deemph_reg_value = 85;
3465                         margin_reg_value = 116;
3466                         break;
3467                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3468                         deemph_reg_value = 85;
3469                         margin_reg_value = 154;
3470                         break;
3471                 default:
3472                         return 0;
3473                 }
3474                 break;
3475         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3476                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3477                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3478                         deemph_reg_value = 64;
3479                         margin_reg_value = 104;
3480                         break;
3481                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3482                         deemph_reg_value = 64;
3483                         margin_reg_value = 154;
3484                         break;
3485                 default:
3486                         return 0;
3487                 }
3488                 break;
3489         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3490                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3491                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3492                         deemph_reg_value = 43;
3493                         margin_reg_value = 154;
3494                         break;
3495                 default:
3496                         return 0;
3497                 }
3498                 break;
3499         default:
3500                 return 0;
3501         }
3502
3503         mutex_lock(&dev_priv->sb_lock);
3504
3505         /* Clear calc init */
3506         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3507         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3508         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3509         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3510         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3511
3512         if (intel_crtc->config->lane_count > 2) {
3513                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3514                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3515                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3517                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3518         }
3519
3520         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3521         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3522         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3523         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3524
3525         if (intel_crtc->config->lane_count > 2) {
3526                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3527                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3528                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3529                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3530         }
3531
3532         /* Program swing deemph */
3533         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3534                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3535                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3536                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3537                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3538         }
3539
3540         /* Program swing margin */
3541         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3542                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3543
3544                 val &= ~DPIO_SWING_MARGIN000_MASK;
3545                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3546
3547                 /*
3548                  * Supposedly this value shouldn't matter when unique transition
3549                  * scale is disabled, but in fact it does matter. Let's just
3550                  * always program the same value and hope it's OK.
3551                  */
3552                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3553                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3554
3555                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3556         }
3557
3558         /*
3559          * The document said it needs to set bit 27 for ch0 and bit 26
3560          * for ch1. Might be a typo in the doc.
3561          * For now, for this unique transition scale selection, set bit
3562          * 27 for ch0 and ch1.
3563          */
3564         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3565                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3566                 if (chv_need_uniq_trans_scale(train_set))
3567                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3568                 else
3569                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3570                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3571         }
3572
3573         /* Start swing calculation */
3574         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3575         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3576         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3577
3578         if (intel_crtc->config->lane_count > 2) {
3579                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3580                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3581                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3582         }
3583
3584         mutex_unlock(&dev_priv->sb_lock);
3585
3586         return 0;
3587 }
3588
3589 static uint32_t
3590 gen4_signal_levels(uint8_t train_set)
3591 {
3592         uint32_t        signal_levels = 0;
3593
3594         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3595         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3596         default:
3597                 signal_levels |= DP_VOLTAGE_0_4;
3598                 break;
3599         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3600                 signal_levels |= DP_VOLTAGE_0_6;
3601                 break;
3602         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3603                 signal_levels |= DP_VOLTAGE_0_8;
3604                 break;
3605         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3606                 signal_levels |= DP_VOLTAGE_1_2;
3607                 break;
3608         }
3609         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3610         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3611         default:
3612                 signal_levels |= DP_PRE_EMPHASIS_0;
3613                 break;
3614         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3615                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3616                 break;
3617         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3618                 signal_levels |= DP_PRE_EMPHASIS_6;
3619                 break;
3620         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3621                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3622                 break;
3623         }
3624         return signal_levels;
3625 }
3626
3627 /* Gen6's DP voltage swing and pre-emphasis control */
3628 static uint32_t
3629 gen6_edp_signal_levels(uint8_t train_set)
3630 {
3631         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3632                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3633         switch (signal_levels) {
3634         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3635         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3636                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3637         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3638                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3639         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3640         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3641                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3642         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3643         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3644                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3645         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3646         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3648         default:
3649                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3650                               "0x%x\n", signal_levels);
3651                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3652         }
3653 }
3654
3655 /* Gen7's DP voltage swing and pre-emphasis control */
3656 static uint32_t
3657 gen7_edp_signal_levels(uint8_t train_set)
3658 {
3659         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3660                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3661         switch (signal_levels) {
3662         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3663                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3664         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3665                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3666         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3667                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3668
3669         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3671         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3673
3674         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3675                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3676         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3677                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3678
3679         default:
3680                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3681                               "0x%x\n", signal_levels);
3682                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3683         }
3684 }
3685
3686 void
3687 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3688 {
3689         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3690         enum port port = intel_dig_port->port;
3691         struct drm_device *dev = intel_dig_port->base.base.dev;
3692         struct drm_i915_private *dev_priv = to_i915(dev);
3693         uint32_t signal_levels, mask = 0;
3694         uint8_t train_set = intel_dp->train_set[0];
3695
3696         if (HAS_DDI(dev)) {
3697                 signal_levels = ddi_signal_levels(intel_dp);
3698
3699                 if (IS_BROXTON(dev))
3700                         signal_levels = 0;
3701                 else
3702                         mask = DDI_BUF_EMP_MASK;
3703         } else if (IS_CHERRYVIEW(dev)) {
3704                 signal_levels = chv_signal_levels(intel_dp);
3705         } else if (IS_VALLEYVIEW(dev)) {
3706                 signal_levels = vlv_signal_levels(intel_dp);
3707         } else if (IS_GEN7(dev) && port == PORT_A) {
3708                 signal_levels = gen7_edp_signal_levels(train_set);
3709                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3710         } else if (IS_GEN6(dev) && port == PORT_A) {
3711                 signal_levels = gen6_edp_signal_levels(train_set);
3712                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3713         } else {
3714                 signal_levels = gen4_signal_levels(train_set);
3715                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3716         }
3717
3718         if (mask)
3719                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3720
3721         DRM_DEBUG_KMS("Using vswing level %d\n",
3722                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3723         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3724                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3725                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3726
3727         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3728
3729         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3730         POSTING_READ(intel_dp->output_reg);
3731 }
3732
3733 void
3734 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3735                                        uint8_t dp_train_pat)
3736 {
3737         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3738         struct drm_i915_private *dev_priv =
3739                 to_i915(intel_dig_port->base.base.dev);
3740
3741         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3742
3743         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3744         POSTING_READ(intel_dp->output_reg);
3745 }
3746
3747 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3748 {
3749         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3750         struct drm_device *dev = intel_dig_port->base.base.dev;
3751         struct drm_i915_private *dev_priv = dev->dev_private;
3752         enum port port = intel_dig_port->port;
3753         uint32_t val;
3754
3755         if (!HAS_DDI(dev))
3756                 return;
3757
3758         val = I915_READ(DP_TP_CTL(port));
3759         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3760         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3761         I915_WRITE(DP_TP_CTL(port), val);
3762
3763         /*
3764          * On PORT_A we can have only eDP in SST mode. There the only reason
3765          * we need to set idle transmission mode is to work around a HW issue
3766          * where we enable the pipe while not in idle link-training mode.
3767          * In this case there is requirement to wait for a minimum number of
3768          * idle patterns to be sent.
3769          */
3770         if (port == PORT_A)
3771                 return;
3772
3773         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3774                      1))
3775                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3776 }
3777
3778 static void
3779 intel_dp_link_down(struct intel_dp *intel_dp)
3780 {
3781         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3782         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3783         enum port port = intel_dig_port->port;
3784         struct drm_device *dev = intel_dig_port->base.base.dev;
3785         struct drm_i915_private *dev_priv = dev->dev_private;
3786         uint32_t DP = intel_dp->DP;
3787
3788         if (WARN_ON(HAS_DDI(dev)))
3789                 return;
3790
3791         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3792                 return;
3793
3794         DRM_DEBUG_KMS("\n");
3795
3796         if ((IS_GEN7(dev) && port == PORT_A) ||
3797             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3798                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3799                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3800         } else {
3801                 if (IS_CHERRYVIEW(dev))
3802                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3803                 else
3804                         DP &= ~DP_LINK_TRAIN_MASK;
3805                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3806         }
3807         I915_WRITE(intel_dp->output_reg, DP);
3808         POSTING_READ(intel_dp->output_reg);
3809
3810         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3811         I915_WRITE(intel_dp->output_reg, DP);
3812         POSTING_READ(intel_dp->output_reg);
3813
3814         /*
3815          * HW workaround for IBX, we need to move the port
3816          * to transcoder A after disabling it to allow the
3817          * matching HDMI port to be enabled on transcoder A.
3818          */
3819         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3820                 /*
3821                  * We get CPU/PCH FIFO underruns on the other pipe when
3822                  * doing the workaround. Sweep them under the rug.
3823                  */
3824                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3825                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3826
3827                 /* always enable with pattern 1 (as per spec) */
3828                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3829                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3830                 I915_WRITE(intel_dp->output_reg, DP);
3831                 POSTING_READ(intel_dp->output_reg);
3832
3833                 DP &= ~DP_PORT_EN;
3834                 I915_WRITE(intel_dp->output_reg, DP);
3835                 POSTING_READ(intel_dp->output_reg);
3836
3837                 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3838                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3839                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3840         }
3841
3842         msleep(intel_dp->panel_power_down_delay);
3843
3844         intel_dp->DP = DP;
3845 }
3846
3847 static bool
3848 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3849 {
3850         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3851         struct drm_device *dev = dig_port->base.base.dev;
3852         struct drm_i915_private *dev_priv = dev->dev_private;
3853         uint8_t rev;
3854
3855         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3856                                     sizeof(intel_dp->dpcd)) < 0)
3857                 return false; /* aux transfer failed */
3858
3859         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3860
3861         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3862                 return false; /* DPCD not present */
3863
3864         /* Check if the panel supports PSR */
3865         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3866         if (is_edp(intel_dp)) {
3867                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3868                                         intel_dp->psr_dpcd,
3869                                         sizeof(intel_dp->psr_dpcd));
3870                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3871                         dev_priv->psr.sink_support = true;
3872                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3873                 }
3874
3875                 if (INTEL_INFO(dev)->gen >= 9 &&
3876                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3877                         uint8_t frame_sync_cap;
3878
3879                         dev_priv->psr.sink_support = true;
3880                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3881                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3882                                         &frame_sync_cap, 1);
3883                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3884                         /* PSR2 needs frame sync as well */
3885                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3886                         DRM_DEBUG_KMS("PSR2 %s on sink",
3887                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3888                 }
3889         }
3890
3891         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3892                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
3893                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3894
3895         /* Intermediate frequency support */
3896         if (is_edp(intel_dp) &&
3897             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3898             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3899             (rev >= 0x03)) { /* eDp v1.4 or higher */
3900                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3901                 int i;
3902
3903                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3904                                 DP_SUPPORTED_LINK_RATES,
3905                                 sink_rates,
3906                                 sizeof(sink_rates));
3907
3908                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3909                         int val = le16_to_cpu(sink_rates[i]);
3910
3911                         if (val == 0)
3912                                 break;
3913
3914                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3915                         intel_dp->sink_rates[i] = (val * 200) / 10;
3916                 }
3917                 intel_dp->num_sink_rates = i;
3918         }
3919
3920         intel_dp_print_rates(intel_dp);
3921
3922         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3923               DP_DWN_STRM_PORT_PRESENT))
3924                 return true; /* native DP sink */
3925
3926         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3927                 return true; /* no per-port downstream info */
3928
3929         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3930                                     intel_dp->downstream_ports,
3931                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3932                 return false; /* downstream port status fetch failed */
3933
3934         return true;
3935 }
3936
3937 static void
3938 intel_dp_probe_oui(struct intel_dp *intel_dp)
3939 {
3940         u8 buf[3];
3941
3942         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3943                 return;
3944
3945         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3946                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3947                               buf[0], buf[1], buf[2]);
3948
3949         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3950                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3951                               buf[0], buf[1], buf[2]);
3952 }
3953
3954 static bool
3955 intel_dp_probe_mst(struct intel_dp *intel_dp)
3956 {
3957         u8 buf[1];
3958
3959         if (!intel_dp->can_mst)
3960                 return false;
3961
3962         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3963                 return false;
3964
3965         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3966                 if (buf[0] & DP_MST_CAP) {
3967                         DRM_DEBUG_KMS("Sink is MST capable\n");
3968                         intel_dp->is_mst = true;
3969                 } else {
3970                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3971                         intel_dp->is_mst = false;
3972                 }
3973         }
3974
3975         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3976         return intel_dp->is_mst;
3977 }
3978
3979 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3980 {
3981         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3982         struct drm_device *dev = dig_port->base.base.dev;
3983         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3984         u8 buf;
3985         int ret = 0;
3986         int count = 0;
3987         int attempts = 10;
3988
3989         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3990                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3991                 ret = -EIO;
3992                 goto out;
3993         }
3994
3995         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3996                                buf & ~DP_TEST_SINK_START) < 0) {
3997                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3998                 ret = -EIO;
3999                 goto out;
4000         }
4001
4002         do {
4003                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4004
4005                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4006                                       DP_TEST_SINK_MISC, &buf) < 0) {
4007                         ret = -EIO;
4008                         goto out;
4009                 }
4010                 count = buf & DP_TEST_COUNT_MASK;
4011         } while (--attempts && count);
4012
4013         if (attempts == 0) {
4014                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4015                 ret = -ETIMEDOUT;
4016         }
4017
4018  out:
4019         hsw_enable_ips(intel_crtc);
4020         return ret;
4021 }
4022
4023 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4024 {
4025         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4026         struct drm_device *dev = dig_port->base.base.dev;
4027         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4028         u8 buf;
4029         int ret;
4030
4031         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4032                 return -EIO;
4033
4034         if (!(buf & DP_TEST_CRC_SUPPORTED))
4035                 return -ENOTTY;
4036
4037         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4038                 return -EIO;
4039
4040         if (buf & DP_TEST_SINK_START) {
4041                 ret = intel_dp_sink_crc_stop(intel_dp);
4042                 if (ret)
4043                         return ret;
4044         }
4045
4046         hsw_disable_ips(intel_crtc);
4047
4048         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4049                                buf | DP_TEST_SINK_START) < 0) {
4050                 hsw_enable_ips(intel_crtc);
4051                 return -EIO;
4052         }
4053
4054         intel_wait_for_vblank(dev, intel_crtc->pipe);
4055         return 0;
4056 }
4057
4058 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4059 {
4060         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4061         struct drm_device *dev = dig_port->base.base.dev;
4062         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4063         u8 buf;
4064         int count, ret;
4065         int attempts = 6;
4066
4067         ret = intel_dp_sink_crc_start(intel_dp);
4068         if (ret)
4069                 return ret;
4070
4071         do {
4072                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4073
4074                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4075                                       DP_TEST_SINK_MISC, &buf) < 0) {
4076                         ret = -EIO;
4077                         goto stop;
4078                 }
4079                 count = buf & DP_TEST_COUNT_MASK;
4080
4081         } while (--attempts && count == 0);
4082
4083         if (attempts == 0) {
4084                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4085                 ret = -ETIMEDOUT;
4086                 goto stop;
4087         }
4088
4089         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4090                 ret = -EIO;
4091                 goto stop;
4092         }
4093
4094 stop:
4095         intel_dp_sink_crc_stop(intel_dp);
4096         return ret;
4097 }
4098
4099 static bool
4100 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4101 {
4102         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4103                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4104                                        sink_irq_vector, 1) == 1;
4105 }
4106
4107 static bool
4108 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4109 {
4110         int ret;
4111
4112         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4113                                              DP_SINK_COUNT_ESI,
4114                                              sink_irq_vector, 14);
4115         if (ret != 14)
4116                 return false;
4117
4118         return true;
4119 }
4120
4121 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4122 {
4123         uint8_t test_result = DP_TEST_ACK;
4124         return test_result;
4125 }
4126
4127 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4128 {
4129         uint8_t test_result = DP_TEST_NAK;
4130         return test_result;
4131 }
4132
4133 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4134 {
4135         uint8_t test_result = DP_TEST_NAK;
4136         struct intel_connector *intel_connector = intel_dp->attached_connector;
4137         struct drm_connector *connector = &intel_connector->base;
4138
4139         if (intel_connector->detect_edid == NULL ||
4140             connector->edid_corrupt ||
4141             intel_dp->aux.i2c_defer_count > 6) {
4142                 /* Check EDID read for NACKs, DEFERs and corruption
4143                  * (DP CTS 1.2 Core r1.1)
4144                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4145                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4146                  *    4.2.2.6 : EDID corruption detected
4147                  * Use failsafe mode for all cases
4148                  */
4149                 if (intel_dp->aux.i2c_nack_count > 0 ||
4150                         intel_dp->aux.i2c_defer_count > 0)
4151                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4152                                       intel_dp->aux.i2c_nack_count,
4153                                       intel_dp->aux.i2c_defer_count);
4154                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4155         } else {
4156                 struct edid *block = intel_connector->detect_edid;
4157
4158                 /* We have to write the checksum
4159                  * of the last block read
4160                  */
4161                 block += intel_connector->detect_edid->extensions;
4162
4163                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4164                                         DP_TEST_EDID_CHECKSUM,
4165                                         &block->checksum,
4166                                         1))
4167                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4168
4169                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4170                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4171         }
4172
4173         /* Set test active flag here so userspace doesn't interrupt things */
4174         intel_dp->compliance_test_active = 1;
4175
4176         return test_result;
4177 }
4178
4179 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4180 {
4181         uint8_t test_result = DP_TEST_NAK;
4182         return test_result;
4183 }
4184
4185 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4186 {
4187         uint8_t response = DP_TEST_NAK;
4188         uint8_t rxdata = 0;
4189         int status = 0;
4190
4191         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4192         if (status <= 0) {
4193                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4194                 goto update_status;
4195         }
4196
4197         switch (rxdata) {
4198         case DP_TEST_LINK_TRAINING:
4199                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4200                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4201                 response = intel_dp_autotest_link_training(intel_dp);
4202                 break;
4203         case DP_TEST_LINK_VIDEO_PATTERN:
4204                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4205                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4206                 response = intel_dp_autotest_video_pattern(intel_dp);
4207                 break;
4208         case DP_TEST_LINK_EDID_READ:
4209                 DRM_DEBUG_KMS("EDID test requested\n");
4210                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4211                 response = intel_dp_autotest_edid(intel_dp);
4212                 break;
4213         case DP_TEST_LINK_PHY_TEST_PATTERN:
4214                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4215                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4216                 response = intel_dp_autotest_phy_pattern(intel_dp);
4217                 break;
4218         default:
4219                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4220                 break;
4221         }
4222
4223 update_status:
4224         status = drm_dp_dpcd_write(&intel_dp->aux,
4225                                    DP_TEST_RESPONSE,
4226                                    &response, 1);
4227         if (status <= 0)
4228                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4229 }
4230
4231 static int
4232 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4233 {
4234         bool bret;
4235
4236         if (intel_dp->is_mst) {
4237                 u8 esi[16] = { 0 };
4238                 int ret = 0;
4239                 int retry;
4240                 bool handled;
4241                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4242 go_again:
4243                 if (bret == true) {
4244
4245                         /* check link status - esi[10] = 0x200c */
4246                         if (intel_dp->active_mst_links &&
4247                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4248                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4249                                 intel_dp_start_link_train(intel_dp);
4250                                 intel_dp_stop_link_train(intel_dp);
4251                         }
4252
4253                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4254                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4255
4256                         if (handled) {
4257                                 for (retry = 0; retry < 3; retry++) {
4258                                         int wret;
4259                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4260                                                                  DP_SINK_COUNT_ESI+1,
4261                                                                  &esi[1], 3);
4262                                         if (wret == 3) {
4263                                                 break;
4264                                         }
4265                                 }
4266
4267                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4268                                 if (bret == true) {
4269                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4270                                         goto go_again;
4271                                 }
4272                         } else
4273                                 ret = 0;
4274
4275                         return ret;
4276                 } else {
4277                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4278                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4279                         intel_dp->is_mst = false;
4280                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4281                         /* send a hotplug event */
4282                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4283                 }
4284         }
4285         return -EINVAL;
4286 }
4287
4288 /*
4289  * According to DP spec
4290  * 5.1.2:
4291  *  1. Read DPCD
4292  *  2. Configure link according to Receiver Capabilities
4293  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4294  *  4. Check link status on receipt of hot-plug interrupt
4295  */
4296 static void
4297 intel_dp_check_link_status(struct intel_dp *intel_dp)
4298 {
4299         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4300         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4301         u8 sink_irq_vector;
4302         u8 link_status[DP_LINK_STATUS_SIZE];
4303
4304         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4305
4306         /*
4307          * Clearing compliance test variables to allow capturing
4308          * of values for next automated test request.
4309          */
4310         intel_dp->compliance_test_active = 0;
4311         intel_dp->compliance_test_type = 0;
4312         intel_dp->compliance_test_data = 0;
4313
4314         if (!intel_encoder->base.crtc)
4315                 return;
4316
4317         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4318                 return;
4319
4320         /* Try to read receiver status if the link appears to be up */
4321         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4322                 return;
4323         }
4324
4325         /* Now read the DPCD to see if it's actually running */
4326         if (!intel_dp_get_dpcd(intel_dp)) {
4327                 return;
4328         }
4329
4330         /* Try to read the source of the interrupt */
4331         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4332             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4333                 /* Clear interrupt source */
4334                 drm_dp_dpcd_writeb(&intel_dp->aux,
4335                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4336                                    sink_irq_vector);
4337
4338                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4339                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4340                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4341                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4342         }
4343
4344         /* if link training is requested we should perform it always */
4345         if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4346                 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4347                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4348                               intel_encoder->base.name);
4349                 intel_dp_start_link_train(intel_dp);
4350                 intel_dp_stop_link_train(intel_dp);
4351         }
4352 }
4353
4354 /* XXX this is probably wrong for multiple downstream ports */
4355 static enum drm_connector_status
4356 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4357 {
4358         uint8_t *dpcd = intel_dp->dpcd;
4359         uint8_t type;
4360
4361         if (!intel_dp_get_dpcd(intel_dp))
4362                 return connector_status_disconnected;
4363
4364         /* if there's no downstream port, we're done */
4365         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4366                 return connector_status_connected;
4367
4368         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4369         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4370             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4371                 uint8_t reg;
4372
4373                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4374                                             &reg, 1) < 0)
4375                         return connector_status_unknown;
4376
4377                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4378                                               : connector_status_disconnected;
4379         }
4380
4381         /* If no HPD, poke DDC gently */
4382         if (drm_probe_ddc(&intel_dp->aux.ddc))
4383                 return connector_status_connected;
4384
4385         /* Well we tried, say unknown for unreliable port types */
4386         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4387                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4388                 if (type == DP_DS_PORT_TYPE_VGA ||
4389                     type == DP_DS_PORT_TYPE_NON_EDID)
4390                         return connector_status_unknown;
4391         } else {
4392                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4393                         DP_DWN_STRM_PORT_TYPE_MASK;
4394                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4395                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4396                         return connector_status_unknown;
4397         }
4398
4399         /* Anything else is out of spec, warn and ignore */
4400         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4401         return connector_status_disconnected;
4402 }
4403
4404 static enum drm_connector_status
4405 edp_detect(struct intel_dp *intel_dp)
4406 {
4407         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4408         enum drm_connector_status status;
4409
4410         status = intel_panel_detect(dev);
4411         if (status == connector_status_unknown)
4412                 status = connector_status_connected;
4413
4414         return status;
4415 }
4416
4417 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4418                                        struct intel_digital_port *port)
4419 {
4420         u32 bit;
4421
4422         switch (port->port) {
4423         case PORT_A:
4424                 return true;
4425         case PORT_B:
4426                 bit = SDE_PORTB_HOTPLUG;
4427                 break;
4428         case PORT_C:
4429                 bit = SDE_PORTC_HOTPLUG;
4430                 break;
4431         case PORT_D:
4432                 bit = SDE_PORTD_HOTPLUG;
4433                 break;
4434         default:
4435                 MISSING_CASE(port->port);
4436                 return false;
4437         }
4438
4439         return I915_READ(SDEISR) & bit;
4440 }
4441
4442 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4443                                        struct intel_digital_port *port)
4444 {
4445         u32 bit;
4446
4447         switch (port->port) {
4448         case PORT_A:
4449                 return true;
4450         case PORT_B:
4451                 bit = SDE_PORTB_HOTPLUG_CPT;
4452                 break;
4453         case PORT_C:
4454                 bit = SDE_PORTC_HOTPLUG_CPT;
4455                 break;
4456         case PORT_D:
4457                 bit = SDE_PORTD_HOTPLUG_CPT;
4458                 break;
4459         case PORT_E:
4460                 bit = SDE_PORTE_HOTPLUG_SPT;
4461                 break;
4462         default:
4463                 MISSING_CASE(port->port);
4464                 return false;
4465         }
4466
4467         return I915_READ(SDEISR) & bit;
4468 }
4469
4470 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4471                                        struct intel_digital_port *port)
4472 {
4473         u32 bit;
4474
4475         switch (port->port) {
4476         case PORT_B:
4477                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4478                 break;
4479         case PORT_C:
4480                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4481                 break;
4482         case PORT_D:
4483                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4484                 break;
4485         default:
4486                 MISSING_CASE(port->port);
4487                 return false;
4488         }
4489
4490         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4491 }
4492
4493 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4494                                        struct intel_digital_port *port)
4495 {
4496         u32 bit;
4497
4498         switch (port->port) {
4499         case PORT_B:
4500                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4501                 break;
4502         case PORT_C:
4503                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4504                 break;
4505         case PORT_D:
4506                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4507                 break;
4508         default:
4509                 MISSING_CASE(port->port);
4510                 return false;
4511         }
4512
4513         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4514 }
4515
4516 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4517                                        struct intel_digital_port *intel_dig_port)
4518 {
4519         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4520         enum port port;
4521         u32 bit;
4522
4523         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4524         switch (port) {
4525         case PORT_A:
4526                 bit = BXT_DE_PORT_HP_DDIA;
4527                 break;
4528         case PORT_B:
4529                 bit = BXT_DE_PORT_HP_DDIB;
4530                 break;
4531         case PORT_C:
4532                 bit = BXT_DE_PORT_HP_DDIC;
4533                 break;
4534         default:
4535                 MISSING_CASE(port);
4536                 return false;
4537         }
4538
4539         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4540 }
4541
4542 /*
4543  * intel_digital_port_connected - is the specified port connected?
4544  * @dev_priv: i915 private structure
4545  * @port: the port to test
4546  *
4547  * Return %true if @port is connected, %false otherwise.
4548  */
4549 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4550                                          struct intel_digital_port *port)
4551 {
4552         if (HAS_PCH_IBX(dev_priv))
4553                 return ibx_digital_port_connected(dev_priv, port);
4554         if (HAS_PCH_SPLIT(dev_priv))
4555                 return cpt_digital_port_connected(dev_priv, port);
4556         else if (IS_BROXTON(dev_priv))
4557                 return bxt_digital_port_connected(dev_priv, port);
4558         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4559                 return vlv_digital_port_connected(dev_priv, port);
4560         else
4561                 return g4x_digital_port_connected(dev_priv, port);
4562 }
4563
4564 static struct edid *
4565 intel_dp_get_edid(struct intel_dp *intel_dp)
4566 {
4567         struct intel_connector *intel_connector = intel_dp->attached_connector;
4568
4569         /* use cached edid if we have one */
4570         if (intel_connector->edid) {
4571                 /* invalid edid */
4572                 if (IS_ERR(intel_connector->edid))
4573                         return NULL;
4574
4575                 return drm_edid_duplicate(intel_connector->edid);
4576         } else
4577                 return drm_get_edid(&intel_connector->base,
4578                                     &intel_dp->aux.ddc);
4579 }
4580
4581 static void
4582 intel_dp_set_edid(struct intel_dp *intel_dp)
4583 {
4584         struct intel_connector *intel_connector = intel_dp->attached_connector;
4585         struct edid *edid;
4586
4587         edid = intel_dp_get_edid(intel_dp);
4588         intel_connector->detect_edid = edid;
4589
4590         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4591                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4592         else
4593                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4594 }
4595
4596 static void
4597 intel_dp_unset_edid(struct intel_dp *intel_dp)
4598 {
4599         struct intel_connector *intel_connector = intel_dp->attached_connector;
4600
4601         kfree(intel_connector->detect_edid);
4602         intel_connector->detect_edid = NULL;
4603
4604         intel_dp->has_audio = false;
4605 }
4606
4607 static enum drm_connector_status
4608 intel_dp_detect(struct drm_connector *connector, bool force)
4609 {
4610         struct intel_dp *intel_dp = intel_attached_dp(connector);
4611         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4612         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4613         struct drm_device *dev = connector->dev;
4614         enum drm_connector_status status;
4615         enum intel_display_power_domain power_domain;
4616         bool ret;
4617         u8 sink_irq_vector;
4618
4619         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4620                       connector->base.id, connector->name);
4621         intel_dp_unset_edid(intel_dp);
4622
4623         if (intel_dp->is_mst) {
4624                 /* MST devices are disconnected from a monitor POV */
4625                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4626                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4627                 return connector_status_disconnected;
4628         }
4629
4630         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4631         intel_display_power_get(to_i915(dev), power_domain);
4632
4633         /* Can't disconnect eDP, but you can close the lid... */
4634         if (is_edp(intel_dp))
4635                 status = edp_detect(intel_dp);
4636         else if (intel_digital_port_connected(to_i915(dev),
4637                                               dp_to_dig_port(intel_dp)))
4638                 status = intel_dp_detect_dpcd(intel_dp);
4639         else
4640                 status = connector_status_disconnected;
4641
4642         if (status != connector_status_connected) {
4643                 intel_dp->compliance_test_active = 0;
4644                 intel_dp->compliance_test_type = 0;
4645                 intel_dp->compliance_test_data = 0;
4646
4647                 goto out;
4648         }
4649
4650         intel_dp_probe_oui(intel_dp);
4651
4652         ret = intel_dp_probe_mst(intel_dp);
4653         if (ret) {
4654                 /* if we are in MST mode then this connector
4655                    won't appear connected or have anything with EDID on it */
4656                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4657                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4658                 status = connector_status_disconnected;
4659                 goto out;
4660         }
4661
4662         /*
4663          * Clearing NACK and defer counts to get their exact values
4664          * while reading EDID which are required by Compliance tests
4665          * 4.2.2.4 and 4.2.2.5
4666          */
4667         intel_dp->aux.i2c_nack_count = 0;
4668         intel_dp->aux.i2c_defer_count = 0;
4669
4670         intel_dp_set_edid(intel_dp);
4671
4672         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4673                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4674         status = connector_status_connected;
4675
4676         /* Try to read the source of the interrupt */
4677         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4678             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4679                 /* Clear interrupt source */
4680                 drm_dp_dpcd_writeb(&intel_dp->aux,
4681                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4682                                    sink_irq_vector);
4683
4684                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4685                         intel_dp_handle_test_request(intel_dp);
4686                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4687                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4688         }
4689
4690 out:
4691         intel_display_power_put(to_i915(dev), power_domain);
4692         return status;
4693 }
4694
4695 static void
4696 intel_dp_force(struct drm_connector *connector)
4697 {
4698         struct intel_dp *intel_dp = intel_attached_dp(connector);
4699         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4700         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4701         enum intel_display_power_domain power_domain;
4702
4703         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4704                       connector->base.id, connector->name);
4705         intel_dp_unset_edid(intel_dp);
4706
4707         if (connector->status != connector_status_connected)
4708                 return;
4709
4710         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4711         intel_display_power_get(dev_priv, power_domain);
4712
4713         intel_dp_set_edid(intel_dp);
4714
4715         intel_display_power_put(dev_priv, power_domain);
4716
4717         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4718                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4719 }
4720
4721 static int intel_dp_get_modes(struct drm_connector *connector)
4722 {
4723         struct intel_connector *intel_connector = to_intel_connector(connector);
4724         struct edid *edid;
4725
4726         edid = intel_connector->detect_edid;
4727         if (edid) {
4728                 int ret = intel_connector_update_modes(connector, edid);
4729                 if (ret)
4730                         return ret;
4731         }
4732
4733         /* if eDP has no EDID, fall back to fixed mode */
4734         if (is_edp(intel_attached_dp(connector)) &&
4735             intel_connector->panel.fixed_mode) {
4736                 struct drm_display_mode *mode;
4737
4738                 mode = drm_mode_duplicate(connector->dev,
4739                                           intel_connector->panel.fixed_mode);
4740                 if (mode) {
4741                         drm_mode_probed_add(connector, mode);
4742                         return 1;
4743                 }
4744         }
4745
4746         return 0;
4747 }
4748
4749 static bool
4750 intel_dp_detect_audio(struct drm_connector *connector)
4751 {
4752         bool has_audio = false;
4753         struct edid *edid;
4754
4755         edid = to_intel_connector(connector)->detect_edid;
4756         if (edid)
4757                 has_audio = drm_detect_monitor_audio(edid);
4758
4759         return has_audio;
4760 }
4761
4762 static int
4763 intel_dp_set_property(struct drm_connector *connector,
4764                       struct drm_property *property,
4765                       uint64_t val)
4766 {
4767         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4768         struct intel_connector *intel_connector = to_intel_connector(connector);
4769         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4770         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4771         int ret;
4772
4773         ret = drm_object_property_set_value(&connector->base, property, val);
4774         if (ret)
4775                 return ret;
4776
4777         if (property == dev_priv->force_audio_property) {
4778                 int i = val;
4779                 bool has_audio;
4780
4781                 if (i == intel_dp->force_audio)
4782                         return 0;
4783
4784                 intel_dp->force_audio = i;
4785
4786                 if (i == HDMI_AUDIO_AUTO)
4787                         has_audio = intel_dp_detect_audio(connector);
4788                 else
4789                         has_audio = (i == HDMI_AUDIO_ON);
4790
4791                 if (has_audio == intel_dp->has_audio)
4792                         return 0;
4793
4794                 intel_dp->has_audio = has_audio;
4795                 goto done;
4796         }
4797
4798         if (property == dev_priv->broadcast_rgb_property) {
4799                 bool old_auto = intel_dp->color_range_auto;
4800                 bool old_range = intel_dp->limited_color_range;
4801
4802                 switch (val) {
4803                 case INTEL_BROADCAST_RGB_AUTO:
4804                         intel_dp->color_range_auto = true;
4805                         break;
4806                 case INTEL_BROADCAST_RGB_FULL:
4807                         intel_dp->color_range_auto = false;
4808                         intel_dp->limited_color_range = false;
4809                         break;
4810                 case INTEL_BROADCAST_RGB_LIMITED:
4811                         intel_dp->color_range_auto = false;
4812                         intel_dp->limited_color_range = true;
4813                         break;
4814                 default:
4815                         return -EINVAL;
4816                 }
4817
4818                 if (old_auto == intel_dp->color_range_auto &&
4819                     old_range == intel_dp->limited_color_range)
4820                         return 0;
4821
4822                 goto done;
4823         }
4824
4825         if (is_edp(intel_dp) &&
4826             property == connector->dev->mode_config.scaling_mode_property) {
4827                 if (val == DRM_MODE_SCALE_NONE) {
4828                         DRM_DEBUG_KMS("no scaling not supported\n");
4829                         return -EINVAL;
4830                 }
4831
4832                 if (intel_connector->panel.fitting_mode == val) {
4833                         /* the eDP scaling property is not changed */
4834                         return 0;
4835                 }
4836                 intel_connector->panel.fitting_mode = val;
4837
4838                 goto done;
4839         }
4840
4841         return -EINVAL;
4842
4843 done:
4844         if (intel_encoder->base.crtc)
4845                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4846
4847         return 0;
4848 }
4849
4850 static void
4851 intel_dp_connector_destroy(struct drm_connector *connector)
4852 {
4853         struct intel_connector *intel_connector = to_intel_connector(connector);
4854
4855         kfree(intel_connector->detect_edid);
4856
4857         if (!IS_ERR_OR_NULL(intel_connector->edid))
4858                 kfree(intel_connector->edid);
4859
4860         /* Can't call is_edp() since the encoder may have been destroyed
4861          * already. */
4862         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4863                 intel_panel_fini(&intel_connector->panel);
4864
4865         drm_connector_cleanup(connector);
4866         kfree(connector);
4867 }
4868
4869 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4870 {
4871         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4872         struct intel_dp *intel_dp = &intel_dig_port->dp;
4873
4874         intel_dp_aux_fini(intel_dp);
4875         intel_dp_mst_encoder_cleanup(intel_dig_port);
4876         if (is_edp(intel_dp)) {
4877                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4878                 /*
4879                  * vdd might still be enabled do to the delayed vdd off.
4880                  * Make sure vdd is actually turned off here.
4881                  */
4882                 pps_lock(intel_dp);
4883                 edp_panel_vdd_off_sync(intel_dp);
4884                 pps_unlock(intel_dp);
4885
4886                 if (intel_dp->edp_notifier.notifier_call) {
4887                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4888                         intel_dp->edp_notifier.notifier_call = NULL;
4889                 }
4890         }
4891         drm_encoder_cleanup(encoder);
4892         kfree(intel_dig_port);
4893 }
4894
4895 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4896 {
4897         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4898
4899         if (!is_edp(intel_dp))
4900                 return;
4901
4902         /*
4903          * vdd might still be enabled do to the delayed vdd off.
4904          * Make sure vdd is actually turned off here.
4905          */
4906         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4907         pps_lock(intel_dp);
4908         edp_panel_vdd_off_sync(intel_dp);
4909         pps_unlock(intel_dp);
4910 }
4911
4912 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4913 {
4914         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4915         struct drm_device *dev = intel_dig_port->base.base.dev;
4916         struct drm_i915_private *dev_priv = dev->dev_private;
4917         enum intel_display_power_domain power_domain;
4918
4919         lockdep_assert_held(&dev_priv->pps_mutex);
4920
4921         if (!edp_have_panel_vdd(intel_dp))
4922                 return;
4923
4924         /*
4925          * The VDD bit needs a power domain reference, so if the bit is
4926          * already enabled when we boot or resume, grab this reference and
4927          * schedule a vdd off, so we don't hold on to the reference
4928          * indefinitely.
4929          */
4930         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4931         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4932         intel_display_power_get(dev_priv, power_domain);
4933
4934         edp_panel_vdd_schedule_off(intel_dp);
4935 }
4936
4937 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4938 {
4939         struct intel_dp *intel_dp;
4940
4941         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4942                 return;
4943
4944         intel_dp = enc_to_intel_dp(encoder);
4945
4946         pps_lock(intel_dp);
4947
4948         /*
4949          * Read out the current power sequencer assignment,
4950          * in case the BIOS did something with it.
4951          */
4952         if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4953                 vlv_initial_power_sequencer_setup(intel_dp);
4954
4955         intel_edp_panel_vdd_sanitize(intel_dp);
4956
4957         pps_unlock(intel_dp);
4958 }
4959
4960 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4961         .dpms = drm_atomic_helper_connector_dpms,
4962         .detect = intel_dp_detect,
4963         .force = intel_dp_force,
4964         .fill_modes = drm_helper_probe_single_connector_modes,
4965         .set_property = intel_dp_set_property,
4966         .atomic_get_property = intel_connector_atomic_get_property,
4967         .destroy = intel_dp_connector_destroy,
4968         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4969         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4970 };
4971
4972 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4973         .get_modes = intel_dp_get_modes,
4974         .mode_valid = intel_dp_mode_valid,
4975         .best_encoder = intel_best_encoder,
4976 };
4977
4978 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4979         .reset = intel_dp_encoder_reset,
4980         .destroy = intel_dp_encoder_destroy,
4981 };
4982
4983 enum irqreturn
4984 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4985 {
4986         struct intel_dp *intel_dp = &intel_dig_port->dp;
4987         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4988         struct drm_device *dev = intel_dig_port->base.base.dev;
4989         struct drm_i915_private *dev_priv = dev->dev_private;
4990         enum intel_display_power_domain power_domain;
4991         enum irqreturn ret = IRQ_NONE;
4992
4993         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4994             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4995                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4996
4997         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4998                 /*
4999                  * vdd off can generate a long pulse on eDP which
5000                  * would require vdd on to handle it, and thus we
5001                  * would end up in an endless cycle of
5002                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5003                  */
5004                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5005                               port_name(intel_dig_port->port));
5006                 return IRQ_HANDLED;
5007         }
5008
5009         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5010                       port_name(intel_dig_port->port),
5011                       long_hpd ? "long" : "short");
5012
5013         power_domain = intel_display_port_aux_power_domain(intel_encoder);
5014         intel_display_power_get(dev_priv, power_domain);
5015
5016         if (long_hpd) {
5017                 /* indicate that we need to restart link training */
5018                 intel_dp->train_set_valid = false;
5019
5020                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5021                         goto mst_fail;
5022
5023                 if (!intel_dp_get_dpcd(intel_dp)) {
5024                         goto mst_fail;
5025                 }
5026
5027                 intel_dp_probe_oui(intel_dp);
5028
5029                 if (!intel_dp_probe_mst(intel_dp)) {
5030                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5031                         intel_dp_check_link_status(intel_dp);
5032                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5033                         goto mst_fail;
5034                 }
5035         } else {
5036                 if (intel_dp->is_mst) {
5037                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5038                                 goto mst_fail;
5039                 }
5040
5041                 if (!intel_dp->is_mst) {
5042                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5043                         intel_dp_check_link_status(intel_dp);
5044                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5045                 }
5046         }
5047
5048         ret = IRQ_HANDLED;
5049
5050         goto put_power;
5051 mst_fail:
5052         /* if we were in MST mode, and device is not there get out of MST mode */
5053         if (intel_dp->is_mst) {
5054                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5055                 intel_dp->is_mst = false;
5056                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5057         }
5058 put_power:
5059         intel_display_power_put(dev_priv, power_domain);
5060
5061         return ret;
5062 }
5063
5064 /* check the VBT to see whether the eDP is on another port */
5065 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5066 {
5067         struct drm_i915_private *dev_priv = dev->dev_private;
5068         union child_device_config *p_child;
5069         int i;
5070         static const short port_mapping[] = {
5071                 [PORT_B] = DVO_PORT_DPB,
5072                 [PORT_C] = DVO_PORT_DPC,
5073                 [PORT_D] = DVO_PORT_DPD,
5074                 [PORT_E] = DVO_PORT_DPE,
5075         };
5076
5077         /*
5078          * eDP not supported on g4x. so bail out early just
5079          * for a bit extra safety in case the VBT is bonkers.
5080          */
5081         if (INTEL_INFO(dev)->gen < 5)
5082                 return false;
5083
5084         if (port == PORT_A)
5085                 return true;
5086
5087         if (!dev_priv->vbt.child_dev_num)
5088                 return false;
5089
5090         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5091                 p_child = dev_priv->vbt.child_dev + i;
5092
5093                 if (p_child->common.dvo_port == port_mapping[port] &&
5094                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5095                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5096                         return true;
5097         }
5098         return false;
5099 }
5100
5101 void
5102 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5103 {
5104         struct intel_connector *intel_connector = to_intel_connector(connector);
5105
5106         intel_attach_force_audio_property(connector);
5107         intel_attach_broadcast_rgb_property(connector);
5108         intel_dp->color_range_auto = true;
5109
5110         if (is_edp(intel_dp)) {
5111                 drm_mode_create_scaling_mode_property(connector->dev);
5112                 drm_object_attach_property(
5113                         &connector->base,
5114                         connector->dev->mode_config.scaling_mode_property,
5115                         DRM_MODE_SCALE_ASPECT);
5116                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5117         }
5118 }
5119
5120 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5121 {
5122         intel_dp->last_power_cycle = jiffies;
5123         intel_dp->last_power_on = jiffies;
5124         intel_dp->last_backlight_off = jiffies;
5125 }
5126
5127 static void
5128 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5129                                     struct intel_dp *intel_dp)
5130 {
5131         struct drm_i915_private *dev_priv = dev->dev_private;
5132         struct edp_power_seq cur, vbt, spec,
5133                 *final = &intel_dp->pps_delays;
5134         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5135         i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5136
5137         lockdep_assert_held(&dev_priv->pps_mutex);
5138
5139         /* already initialized? */
5140         if (final->t11_t12 != 0)
5141                 return;
5142
5143         if (IS_BROXTON(dev)) {
5144                 /*
5145                  * TODO: BXT has 2 sets of PPS registers.
5146                  * Correct Register for Broxton need to be identified
5147                  * using VBT. hardcoding for now
5148                  */
5149                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5150                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5151                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5152         } else if (HAS_PCH_SPLIT(dev)) {
5153                 pp_ctrl_reg = PCH_PP_CONTROL;
5154                 pp_on_reg = PCH_PP_ON_DELAYS;
5155                 pp_off_reg = PCH_PP_OFF_DELAYS;
5156                 pp_div_reg = PCH_PP_DIVISOR;
5157         } else {
5158                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5159
5160                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5161                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5162                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5163                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5164         }
5165
5166         /* Workaround: Need to write PP_CONTROL with the unlock key as
5167          * the very first thing. */
5168         pp_ctl = ironlake_get_pp_control(intel_dp);
5169
5170         pp_on = I915_READ(pp_on_reg);
5171         pp_off = I915_READ(pp_off_reg);
5172         if (!IS_BROXTON(dev)) {
5173                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5174                 pp_div = I915_READ(pp_div_reg);
5175         }
5176
5177         /* Pull timing values out of registers */
5178         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5179                 PANEL_POWER_UP_DELAY_SHIFT;
5180
5181         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5182                 PANEL_LIGHT_ON_DELAY_SHIFT;
5183
5184         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5185                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5186
5187         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5188                 PANEL_POWER_DOWN_DELAY_SHIFT;
5189
5190         if (IS_BROXTON(dev)) {
5191                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5192                         BXT_POWER_CYCLE_DELAY_SHIFT;
5193                 if (tmp > 0)
5194                         cur.t11_t12 = (tmp - 1) * 1000;
5195                 else
5196                         cur.t11_t12 = 0;
5197         } else {
5198                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5199                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5200         }
5201
5202         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5203                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5204
5205         vbt = dev_priv->vbt.edp_pps;
5206
5207         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5208          * our hw here, which are all in 100usec. */
5209         spec.t1_t3 = 210 * 10;
5210         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5211         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5212         spec.t10 = 500 * 10;
5213         /* This one is special and actually in units of 100ms, but zero
5214          * based in the hw (so we need to add 100 ms). But the sw vbt
5215          * table multiplies it with 1000 to make it in units of 100usec,
5216          * too. */
5217         spec.t11_t12 = (510 + 100) * 10;
5218
5219         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5220                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5221
5222         /* Use the max of the register settings and vbt. If both are
5223          * unset, fall back to the spec limits. */
5224 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5225                                        spec.field : \
5226                                        max(cur.field, vbt.field))
5227         assign_final(t1_t3);
5228         assign_final(t8);
5229         assign_final(t9);
5230         assign_final(t10);
5231         assign_final(t11_t12);
5232 #undef assign_final
5233
5234 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5235         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5236         intel_dp->backlight_on_delay = get_delay(t8);
5237         intel_dp->backlight_off_delay = get_delay(t9);
5238         intel_dp->panel_power_down_delay = get_delay(t10);
5239         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5240 #undef get_delay
5241
5242         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5243                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5244                       intel_dp->panel_power_cycle_delay);
5245
5246         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5247                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5248 }
5249
5250 static void
5251 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5252                                               struct intel_dp *intel_dp)
5253 {
5254         struct drm_i915_private *dev_priv = dev->dev_private;
5255         u32 pp_on, pp_off, pp_div, port_sel = 0;
5256         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5257         i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5258         enum port port = dp_to_dig_port(intel_dp)->port;
5259         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5260
5261         lockdep_assert_held(&dev_priv->pps_mutex);
5262
5263         if (IS_BROXTON(dev)) {
5264                 /*
5265                  * TODO: BXT has 2 sets of PPS registers.
5266                  * Correct Register for Broxton need to be identified
5267                  * using VBT. hardcoding for now
5268                  */
5269                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5270                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5271                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5272
5273         } else if (HAS_PCH_SPLIT(dev)) {
5274                 pp_on_reg = PCH_PP_ON_DELAYS;
5275                 pp_off_reg = PCH_PP_OFF_DELAYS;
5276                 pp_div_reg = PCH_PP_DIVISOR;
5277         } else {
5278                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5279
5280                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5281                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5282                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5283         }
5284
5285         /*
5286          * And finally store the new values in the power sequencer. The
5287          * backlight delays are set to 1 because we do manual waits on them. For
5288          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5289          * we'll end up waiting for the backlight off delay twice: once when we
5290          * do the manual sleep, and once when we disable the panel and wait for
5291          * the PP_STATUS bit to become zero.
5292          */
5293         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5294                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5295         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5296                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5297         /* Compute the divisor for the pp clock, simply match the Bspec
5298          * formula. */
5299         if (IS_BROXTON(dev)) {
5300                 pp_div = I915_READ(pp_ctrl_reg);
5301                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5302                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5303                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5304         } else {
5305                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5306                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5307                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5308         }
5309
5310         /* Haswell doesn't have any port selection bits for the panel
5311          * power sequencer any more. */
5312         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5313                 port_sel = PANEL_PORT_SELECT_VLV(port);
5314         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5315                 if (port == PORT_A)
5316                         port_sel = PANEL_PORT_SELECT_DPA;
5317                 else
5318                         port_sel = PANEL_PORT_SELECT_DPD;
5319         }
5320
5321         pp_on |= port_sel;
5322
5323         I915_WRITE(pp_on_reg, pp_on);
5324         I915_WRITE(pp_off_reg, pp_off);
5325         if (IS_BROXTON(dev))
5326                 I915_WRITE(pp_ctrl_reg, pp_div);
5327         else
5328                 I915_WRITE(pp_div_reg, pp_div);
5329
5330         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5331                       I915_READ(pp_on_reg),
5332                       I915_READ(pp_off_reg),
5333                       IS_BROXTON(dev) ?
5334                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5335                       I915_READ(pp_div_reg));
5336 }
5337
5338 /**
5339  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5340  * @dev: DRM device
5341  * @refresh_rate: RR to be programmed
5342  *
5343  * This function gets called when refresh rate (RR) has to be changed from
5344  * one frequency to another. Switches can be between high and low RR
5345  * supported by the panel or to any other RR based on media playback (in
5346  * this case, RR value needs to be passed from user space).
5347  *
5348  * The caller of this function needs to take a lock on dev_priv->drrs.
5349  */
5350 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5351 {
5352         struct drm_i915_private *dev_priv = dev->dev_private;
5353         struct intel_encoder *encoder;
5354         struct intel_digital_port *dig_port = NULL;
5355         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5356         struct intel_crtc_state *config = NULL;
5357         struct intel_crtc *intel_crtc = NULL;
5358         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5359
5360         if (refresh_rate <= 0) {
5361                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5362                 return;
5363         }
5364
5365         if (intel_dp == NULL) {
5366                 DRM_DEBUG_KMS("DRRS not supported.\n");
5367                 return;
5368         }
5369
5370         /*
5371          * FIXME: This needs proper synchronization with psr state for some
5372          * platforms that cannot have PSR and DRRS enabled at the same time.
5373          */
5374
5375         dig_port = dp_to_dig_port(intel_dp);
5376         encoder = &dig_port->base;
5377         intel_crtc = to_intel_crtc(encoder->base.crtc);
5378
5379         if (!intel_crtc) {
5380                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5381                 return;
5382         }
5383
5384         config = intel_crtc->config;
5385
5386         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5387                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5388                 return;
5389         }
5390
5391         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5392                         refresh_rate)
5393                 index = DRRS_LOW_RR;
5394
5395         if (index == dev_priv->drrs.refresh_rate_type) {
5396                 DRM_DEBUG_KMS(
5397                         "DRRS requested for previously set RR...ignoring\n");
5398                 return;
5399         }
5400
5401         if (!intel_crtc->active) {
5402                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5403                 return;
5404         }
5405
5406         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5407                 switch (index) {
5408                 case DRRS_HIGH_RR:
5409                         intel_dp_set_m_n(intel_crtc, M1_N1);
5410                         break;
5411                 case DRRS_LOW_RR:
5412                         intel_dp_set_m_n(intel_crtc, M2_N2);
5413                         break;
5414                 case DRRS_MAX_RR:
5415                 default:
5416                         DRM_ERROR("Unsupported refreshrate type\n");
5417                 }
5418         } else if (INTEL_INFO(dev)->gen > 6) {
5419                 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5420                 u32 val;
5421
5422                 val = I915_READ(reg);
5423                 if (index > DRRS_HIGH_RR) {
5424                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5425                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5426                         else
5427                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5428                 } else {
5429                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5430                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5431                         else
5432                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5433                 }
5434                 I915_WRITE(reg, val);
5435         }
5436
5437         dev_priv->drrs.refresh_rate_type = index;
5438
5439         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5440 }
5441
5442 /**
5443  * intel_edp_drrs_enable - init drrs struct if supported
5444  * @intel_dp: DP struct
5445  *
5446  * Initializes frontbuffer_bits and drrs.dp
5447  */
5448 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5449 {
5450         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5451         struct drm_i915_private *dev_priv = dev->dev_private;
5452         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5453         struct drm_crtc *crtc = dig_port->base.base.crtc;
5454         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5455
5456         if (!intel_crtc->config->has_drrs) {
5457                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5458                 return;
5459         }
5460
5461         mutex_lock(&dev_priv->drrs.mutex);
5462         if (WARN_ON(dev_priv->drrs.dp)) {
5463                 DRM_ERROR("DRRS already enabled\n");
5464                 goto unlock;
5465         }
5466
5467         dev_priv->drrs.busy_frontbuffer_bits = 0;
5468
5469         dev_priv->drrs.dp = intel_dp;
5470
5471 unlock:
5472         mutex_unlock(&dev_priv->drrs.mutex);
5473 }
5474
5475 /**
5476  * intel_edp_drrs_disable - Disable DRRS
5477  * @intel_dp: DP struct
5478  *
5479  */
5480 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5481 {
5482         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5483         struct drm_i915_private *dev_priv = dev->dev_private;
5484         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5485         struct drm_crtc *crtc = dig_port->base.base.crtc;
5486         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5487
5488         if (!intel_crtc->config->has_drrs)
5489                 return;
5490
5491         mutex_lock(&dev_priv->drrs.mutex);
5492         if (!dev_priv->drrs.dp) {
5493                 mutex_unlock(&dev_priv->drrs.mutex);
5494                 return;
5495         }
5496
5497         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5498                 intel_dp_set_drrs_state(dev_priv->dev,
5499                         intel_dp->attached_connector->panel.
5500                         fixed_mode->vrefresh);
5501
5502         dev_priv->drrs.dp = NULL;
5503         mutex_unlock(&dev_priv->drrs.mutex);
5504
5505         cancel_delayed_work_sync(&dev_priv->drrs.work);
5506 }
5507
5508 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5509 {
5510         struct drm_i915_private *dev_priv =
5511                 container_of(work, typeof(*dev_priv), drrs.work.work);
5512         struct intel_dp *intel_dp;
5513
5514         mutex_lock(&dev_priv->drrs.mutex);
5515
5516         intel_dp = dev_priv->drrs.dp;
5517
5518         if (!intel_dp)
5519                 goto unlock;
5520
5521         /*
5522          * The delayed work can race with an invalidate hence we need to
5523          * recheck.
5524          */
5525
5526         if (dev_priv->drrs.busy_frontbuffer_bits)
5527                 goto unlock;
5528
5529         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5530                 intel_dp_set_drrs_state(dev_priv->dev,
5531                         intel_dp->attached_connector->panel.
5532                         downclock_mode->vrefresh);
5533
5534 unlock:
5535         mutex_unlock(&dev_priv->drrs.mutex);
5536 }
5537
5538 /**
5539  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5540  * @dev: DRM device
5541  * @frontbuffer_bits: frontbuffer plane tracking bits
5542  *
5543  * This function gets called everytime rendering on the given planes start.
5544  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5545  *
5546  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5547  */
5548 void intel_edp_drrs_invalidate(struct drm_device *dev,
5549                 unsigned frontbuffer_bits)
5550 {
5551         struct drm_i915_private *dev_priv = dev->dev_private;
5552         struct drm_crtc *crtc;
5553         enum pipe pipe;
5554
5555         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5556                 return;
5557
5558         cancel_delayed_work(&dev_priv->drrs.work);
5559
5560         mutex_lock(&dev_priv->drrs.mutex);
5561         if (!dev_priv->drrs.dp) {
5562                 mutex_unlock(&dev_priv->drrs.mutex);
5563                 return;
5564         }
5565
5566         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5567         pipe = to_intel_crtc(crtc)->pipe;
5568
5569         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5570         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5571
5572         /* invalidate means busy screen hence upclock */
5573         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5574                 intel_dp_set_drrs_state(dev_priv->dev,
5575                                 dev_priv->drrs.dp->attached_connector->panel.
5576                                 fixed_mode->vrefresh);
5577
5578         mutex_unlock(&dev_priv->drrs.mutex);
5579 }
5580
5581 /**
5582  * intel_edp_drrs_flush - Restart Idleness DRRS
5583  * @dev: DRM device
5584  * @frontbuffer_bits: frontbuffer plane tracking bits
5585  *
5586  * This function gets called every time rendering on the given planes has
5587  * completed or flip on a crtc is completed. So DRRS should be upclocked
5588  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5589  * if no other planes are dirty.
5590  *
5591  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5592  */
5593 void intel_edp_drrs_flush(struct drm_device *dev,
5594                 unsigned frontbuffer_bits)
5595 {
5596         struct drm_i915_private *dev_priv = dev->dev_private;
5597         struct drm_crtc *crtc;
5598         enum pipe pipe;
5599
5600         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5601                 return;
5602
5603         cancel_delayed_work(&dev_priv->drrs.work);
5604
5605         mutex_lock(&dev_priv->drrs.mutex);
5606         if (!dev_priv->drrs.dp) {
5607                 mutex_unlock(&dev_priv->drrs.mutex);
5608                 return;
5609         }
5610
5611         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5612         pipe = to_intel_crtc(crtc)->pipe;
5613
5614         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5615         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5616
5617         /* flush means busy screen hence upclock */
5618         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5619                 intel_dp_set_drrs_state(dev_priv->dev,
5620                                 dev_priv->drrs.dp->attached_connector->panel.
5621                                 fixed_mode->vrefresh);
5622
5623         /*
5624          * flush also means no more activity hence schedule downclock, if all
5625          * other fbs are quiescent too
5626          */
5627         if (!dev_priv->drrs.busy_frontbuffer_bits)
5628                 schedule_delayed_work(&dev_priv->drrs.work,
5629                                 msecs_to_jiffies(1000));
5630         mutex_unlock(&dev_priv->drrs.mutex);
5631 }
5632
5633 /**
5634  * DOC: Display Refresh Rate Switching (DRRS)
5635  *
5636  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5637  * which enables swtching between low and high refresh rates,
5638  * dynamically, based on the usage scenario. This feature is applicable
5639  * for internal panels.
5640  *
5641  * Indication that the panel supports DRRS is given by the panel EDID, which
5642  * would list multiple refresh rates for one resolution.
5643  *
5644  * DRRS is of 2 types - static and seamless.
5645  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5646  * (may appear as a blink on screen) and is used in dock-undock scenario.
5647  * Seamless DRRS involves changing RR without any visual effect to the user
5648  * and can be used during normal system usage. This is done by programming
5649  * certain registers.
5650  *
5651  * Support for static/seamless DRRS may be indicated in the VBT based on
5652  * inputs from the panel spec.
5653  *
5654  * DRRS saves power by switching to low RR based on usage scenarios.
5655  *
5656  * eDP DRRS:-
5657  *        The implementation is based on frontbuffer tracking implementation.
5658  * When there is a disturbance on the screen triggered by user activity or a
5659  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5660  * When there is no movement on screen, after a timeout of 1 second, a switch
5661  * to low RR is made.
5662  *        For integration with frontbuffer tracking code,
5663  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5664  *
5665  * DRRS can be further extended to support other internal panels and also
5666  * the scenario of video playback wherein RR is set based on the rate
5667  * requested by userspace.
5668  */
5669
5670 /**
5671  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5672  * @intel_connector: eDP connector
5673  * @fixed_mode: preferred mode of panel
5674  *
5675  * This function is  called only once at driver load to initialize basic
5676  * DRRS stuff.
5677  *
5678  * Returns:
5679  * Downclock mode if panel supports it, else return NULL.
5680  * DRRS support is determined by the presence of downclock mode (apart
5681  * from VBT setting).
5682  */
5683 static struct drm_display_mode *
5684 intel_dp_drrs_init(struct intel_connector *intel_connector,
5685                 struct drm_display_mode *fixed_mode)
5686 {
5687         struct drm_connector *connector = &intel_connector->base;
5688         struct drm_device *dev = connector->dev;
5689         struct drm_i915_private *dev_priv = dev->dev_private;
5690         struct drm_display_mode *downclock_mode = NULL;
5691
5692         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5693         mutex_init(&dev_priv->drrs.mutex);
5694
5695         if (INTEL_INFO(dev)->gen <= 6) {
5696                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5697                 return NULL;
5698         }
5699
5700         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5701                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5702                 return NULL;
5703         }
5704
5705         downclock_mode = intel_find_panel_downclock
5706                                         (dev, fixed_mode, connector);
5707
5708         if (!downclock_mode) {
5709                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5710                 return NULL;
5711         }
5712
5713         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5714
5715         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5716         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5717         return downclock_mode;
5718 }
5719
5720 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5721                                      struct intel_connector *intel_connector)
5722 {
5723         struct drm_connector *connector = &intel_connector->base;
5724         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5725         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5726         struct drm_device *dev = intel_encoder->base.dev;
5727         struct drm_i915_private *dev_priv = dev->dev_private;
5728         struct drm_display_mode *fixed_mode = NULL;
5729         struct drm_display_mode *downclock_mode = NULL;
5730         bool has_dpcd;
5731         struct drm_display_mode *scan;
5732         struct edid *edid;
5733         enum pipe pipe = INVALID_PIPE;
5734
5735         if (!is_edp(intel_dp))
5736                 return true;
5737
5738         pps_lock(intel_dp);
5739         intel_edp_panel_vdd_sanitize(intel_dp);
5740         pps_unlock(intel_dp);
5741
5742         /* Cache DPCD and EDID for edp. */
5743         has_dpcd = intel_dp_get_dpcd(intel_dp);
5744
5745         if (has_dpcd) {
5746                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5747                         dev_priv->no_aux_handshake =
5748                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5749                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5750         } else {
5751                 /* if this fails, presume the device is a ghost */
5752                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5753                 return false;
5754         }
5755
5756         /* We now know it's not a ghost, init power sequence regs. */
5757         pps_lock(intel_dp);
5758         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5759         pps_unlock(intel_dp);
5760
5761         mutex_lock(&dev->mode_config.mutex);
5762         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5763         if (edid) {
5764                 if (drm_add_edid_modes(connector, edid)) {
5765                         drm_mode_connector_update_edid_property(connector,
5766                                                                 edid);
5767                         drm_edid_to_eld(connector, edid);
5768                 } else {
5769                         kfree(edid);
5770                         edid = ERR_PTR(-EINVAL);
5771                 }
5772         } else {
5773                 edid = ERR_PTR(-ENOENT);
5774         }
5775         intel_connector->edid = edid;
5776
5777         /* prefer fixed mode from EDID if available */
5778         list_for_each_entry(scan, &connector->probed_modes, head) {
5779                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5780                         fixed_mode = drm_mode_duplicate(dev, scan);
5781                         downclock_mode = intel_dp_drrs_init(
5782                                                 intel_connector, fixed_mode);
5783                         break;
5784                 }
5785         }
5786
5787         /* fallback to VBT if available for eDP */
5788         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5789                 fixed_mode = drm_mode_duplicate(dev,
5790                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5791                 if (fixed_mode)
5792                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5793         }
5794         mutex_unlock(&dev->mode_config.mutex);
5795
5796         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5797                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5798                 register_reboot_notifier(&intel_dp->edp_notifier);
5799
5800                 /*
5801                  * Figure out the current pipe for the initial backlight setup.
5802                  * If the current pipe isn't valid, try the PPS pipe, and if that
5803                  * fails just assume pipe A.
5804                  */
5805                 if (IS_CHERRYVIEW(dev))
5806                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5807                 else
5808                         pipe = PORT_TO_PIPE(intel_dp->DP);
5809
5810                 if (pipe != PIPE_A && pipe != PIPE_B)
5811                         pipe = intel_dp->pps_pipe;
5812
5813                 if (pipe != PIPE_A && pipe != PIPE_B)
5814                         pipe = PIPE_A;
5815
5816                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5817                               pipe_name(pipe));
5818         }
5819
5820         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5821         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5822         intel_panel_setup_backlight(connector, pipe);
5823
5824         return true;
5825 }
5826
5827 bool
5828 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5829                         struct intel_connector *intel_connector)
5830 {
5831         struct drm_connector *connector = &intel_connector->base;
5832         struct intel_dp *intel_dp = &intel_dig_port->dp;
5833         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5834         struct drm_device *dev = intel_encoder->base.dev;
5835         struct drm_i915_private *dev_priv = dev->dev_private;
5836         enum port port = intel_dig_port->port;
5837         int type, ret;
5838
5839         if (WARN(intel_dig_port->max_lanes < 1,
5840                  "Not enough lanes (%d) for DP on port %c\n",
5841                  intel_dig_port->max_lanes, port_name(port)))
5842                 return false;
5843
5844         intel_dp->pps_pipe = INVALID_PIPE;
5845
5846         /* intel_dp vfuncs */
5847         if (INTEL_INFO(dev)->gen >= 9)
5848                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5849         else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5850                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5851         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5852                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5853         else if (HAS_PCH_SPLIT(dev))
5854                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5855         else
5856                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5857
5858         if (INTEL_INFO(dev)->gen >= 9)
5859                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5860         else
5861                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5862
5863         if (HAS_DDI(dev))
5864                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5865
5866         /* Preserve the current hw state. */
5867         intel_dp->DP = I915_READ(intel_dp->output_reg);
5868         intel_dp->attached_connector = intel_connector;
5869
5870         if (intel_dp_is_edp(dev, port))
5871                 type = DRM_MODE_CONNECTOR_eDP;
5872         else
5873                 type = DRM_MODE_CONNECTOR_DisplayPort;
5874
5875         /*
5876          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5877          * for DP the encoder type can be set by the caller to
5878          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5879          */
5880         if (type == DRM_MODE_CONNECTOR_eDP)
5881                 intel_encoder->type = INTEL_OUTPUT_EDP;
5882
5883         /* eDP only on port B and/or C on vlv/chv */
5884         if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5885                     is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5886                 return false;
5887
5888         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5889                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5890                         port_name(port));
5891
5892         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5893         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5894
5895         connector->interlace_allowed = true;
5896         connector->doublescan_allowed = 0;
5897
5898         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5899                           edp_panel_vdd_work);
5900
5901         intel_connector_attach_encoder(intel_connector, intel_encoder);
5902         drm_connector_register(connector);
5903
5904         if (HAS_DDI(dev))
5905                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5906         else
5907                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5908         intel_connector->unregister = intel_dp_connector_unregister;
5909
5910         /* Set up the hotplug pin. */
5911         switch (port) {
5912         case PORT_A:
5913                 intel_encoder->hpd_pin = HPD_PORT_A;
5914                 break;
5915         case PORT_B:
5916                 intel_encoder->hpd_pin = HPD_PORT_B;
5917                 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5918                         intel_encoder->hpd_pin = HPD_PORT_A;
5919                 break;
5920         case PORT_C:
5921                 intel_encoder->hpd_pin = HPD_PORT_C;
5922                 break;
5923         case PORT_D:
5924                 intel_encoder->hpd_pin = HPD_PORT_D;
5925                 break;
5926         case PORT_E:
5927                 intel_encoder->hpd_pin = HPD_PORT_E;
5928                 break;
5929         default:
5930                 BUG();
5931         }
5932
5933         if (is_edp(intel_dp)) {
5934                 pps_lock(intel_dp);
5935                 intel_dp_init_panel_power_timestamps(intel_dp);
5936                 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5937                         vlv_initial_power_sequencer_setup(intel_dp);
5938                 else
5939                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5940                 pps_unlock(intel_dp);
5941         }
5942
5943         ret = intel_dp_aux_init(intel_dp, intel_connector);
5944         if (ret)
5945                 goto fail;
5946
5947         /* init MST on ports that can support it */
5948         if (HAS_DP_MST(dev) &&
5949             (port == PORT_B || port == PORT_C || port == PORT_D))
5950                 intel_dp_mst_encoder_init(intel_dig_port,
5951                                           intel_connector->base.base.id);
5952
5953         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5954                 intel_dp_aux_fini(intel_dp);
5955                 intel_dp_mst_encoder_cleanup(intel_dig_port);
5956                 goto fail;
5957         }
5958
5959         intel_dp_add_properties(intel_dp, connector);
5960
5961         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5962          * 0xd.  Failure to do so will result in spurious interrupts being
5963          * generated on the port when a cable is not attached.
5964          */
5965         if (IS_G4X(dev) && !IS_GM45(dev)) {
5966                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5967                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5968         }
5969
5970         i915_debugfs_connector_add(connector);
5971
5972         return true;
5973
5974 fail:
5975         if (is_edp(intel_dp)) {
5976                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5977                 /*
5978                  * vdd might still be enabled do to the delayed vdd off.
5979                  * Make sure vdd is actually turned off here.
5980                  */
5981                 pps_lock(intel_dp);
5982                 edp_panel_vdd_off_sync(intel_dp);
5983                 pps_unlock(intel_dp);
5984         }
5985         drm_connector_unregister(connector);
5986         drm_connector_cleanup(connector);
5987
5988         return false;
5989 }
5990
5991 void
5992 intel_dp_init(struct drm_device *dev,
5993               i915_reg_t output_reg, enum port port)
5994 {
5995         struct drm_i915_private *dev_priv = dev->dev_private;
5996         struct intel_digital_port *intel_dig_port;
5997         struct intel_encoder *intel_encoder;
5998         struct drm_encoder *encoder;
5999         struct intel_connector *intel_connector;
6000
6001         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6002         if (!intel_dig_port)
6003                 return;
6004
6005         intel_connector = intel_connector_alloc();
6006         if (!intel_connector)
6007                 goto err_connector_alloc;
6008
6009         intel_encoder = &intel_dig_port->base;
6010         encoder = &intel_encoder->base;
6011
6012         if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6013                              DRM_MODE_ENCODER_TMDS, NULL))
6014                 goto err_encoder_init;
6015
6016         intel_encoder->compute_config = intel_dp_compute_config;
6017         intel_encoder->disable = intel_disable_dp;
6018         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6019         intel_encoder->get_config = intel_dp_get_config;
6020         intel_encoder->suspend = intel_dp_encoder_suspend;
6021         if (IS_CHERRYVIEW(dev)) {
6022                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6023                 intel_encoder->pre_enable = chv_pre_enable_dp;
6024                 intel_encoder->enable = vlv_enable_dp;
6025                 intel_encoder->post_disable = chv_post_disable_dp;
6026                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6027         } else if (IS_VALLEYVIEW(dev)) {
6028                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6029                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6030                 intel_encoder->enable = vlv_enable_dp;
6031                 intel_encoder->post_disable = vlv_post_disable_dp;
6032         } else {
6033                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6034                 intel_encoder->enable = g4x_enable_dp;
6035                 if (INTEL_INFO(dev)->gen >= 5)
6036                         intel_encoder->post_disable = ilk_post_disable_dp;
6037         }
6038
6039         intel_dig_port->port = port;
6040         dev_priv->dig_port_map[port] = intel_encoder;
6041         intel_dig_port->dp.output_reg = output_reg;
6042         intel_dig_port->max_lanes = 4;
6043
6044         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6045         if (IS_CHERRYVIEW(dev)) {
6046                 if (port == PORT_D)
6047                         intel_encoder->crtc_mask = 1 << 2;
6048                 else
6049                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6050         } else {
6051                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6052         }
6053         intel_encoder->cloneable = 0;
6054
6055         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6056         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6057
6058         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6059                 goto err_init_connector;
6060
6061         return;
6062
6063 err_init_connector:
6064         drm_encoder_cleanup(encoder);
6065 err_encoder_init:
6066         kfree(intel_connector);
6067 err_connector_alloc:
6068         kfree(intel_dig_port);
6069
6070         return;
6071 }
6072
6073 void intel_dp_mst_suspend(struct drm_device *dev)
6074 {
6075         struct drm_i915_private *dev_priv = dev->dev_private;
6076         int i;
6077
6078         /* disable MST */
6079         for (i = 0; i < I915_MAX_PORTS; i++) {
6080                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6081                 if (!intel_dig_port)
6082                         continue;
6083
6084                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6085                         if (!intel_dig_port->dp.can_mst)
6086                                 continue;
6087                         if (intel_dig_port->dp.is_mst)
6088                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6089                 }
6090         }
6091 }
6092
6093 void intel_dp_mst_resume(struct drm_device *dev)
6094 {
6095         struct drm_i915_private *dev_priv = dev->dev_private;
6096         int i;
6097
6098         for (i = 0; i < I915_MAX_PORTS; i++) {
6099                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6100                 if (!intel_dig_port)
6101                         continue;
6102                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6103                         int ret;
6104
6105                         if (!intel_dig_port->dp.can_mst)
6106                                 continue;
6107
6108                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6109                         if (ret != 0) {
6110                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6111                         }
6112                 }
6113         }
6114 }