]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Fix possible null dereference in framebuffer_info debugfs function
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135         return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
140 {
141         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143         switch (max_link_bw) {
144         case DP_LINK_BW_1_62:
145         case DP_LINK_BW_2_7:
146         case DP_LINK_BW_5_4:
147                 break;
148         default:
149                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150                      max_link_bw);
151                 max_link_bw = DP_LINK_BW_1_62;
152                 break;
153         }
154         return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160         struct drm_device *dev = intel_dig_port->base.base.dev;
161         u8 source_max, sink_max;
162
163         source_max = 4;
164         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166                 source_max = 2;
167
168         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170         return min(source_max, sink_max);
171 }
172
173 /*
174  * The units on the numbers in the next two are... bizarre.  Examples will
175  * make it clearer; this one parallels an example in the eDP spec.
176  *
177  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178  *
179  *     270000 * 1 * 8 / 10 == 216000
180  *
181  * The actual data capacity of that configuration is 2.16Gbit/s, so the
182  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
183  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184  * 119000.  At 18bpp that's 2142000 kilobits per second.
185  *
186  * Thus the strange-looking division by 10 in intel_dp_link_required, to
187  * get the result in decakilobits instead of kilobits.
188  */
189
190 static int
191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193         return (pixel_clock * bpp + 9) / 10;
194 }
195
196 static int
197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199         return (max_link_clock * max_lanes * 8) / 10;
200 }
201
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector *connector,
204                     struct drm_display_mode *mode)
205 {
206         struct intel_dp *intel_dp = intel_attached_dp(connector);
207         struct intel_connector *intel_connector = to_intel_connector(connector);
208         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209         int target_clock = mode->clock;
210         int max_rate, mode_rate, max_lanes, max_link_clock;
211
212         if (is_edp(intel_dp) && fixed_mode) {
213                 if (mode->hdisplay > fixed_mode->hdisplay)
214                         return MODE_PANEL;
215
216                 if (mode->vdisplay > fixed_mode->vdisplay)
217                         return MODE_PANEL;
218
219                 target_clock = fixed_mode->clock;
220         }
221
222         max_link_clock = intel_dp_max_link_rate(intel_dp);
223         max_lanes = intel_dp_max_lane_count(intel_dp);
224
225         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226         mode_rate = intel_dp_link_required(target_clock, 18);
227
228         if (mode_rate > max_rate)
229                 return MODE_CLOCK_HIGH;
230
231         if (mode->clock < 10000)
232                 return MODE_CLOCK_LOW;
233
234         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235                 return MODE_H_ILLEGAL;
236
237         return MODE_OK;
238 }
239
240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242         int     i;
243         uint32_t v = 0;
244
245         if (src_bytes > 4)
246                 src_bytes = 4;
247         for (i = 0; i < src_bytes; i++)
248                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249         return v;
250 }
251
252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254         int i;
255         if (dst_bytes > 4)
256                 dst_bytes = 4;
257         for (i = 0; i < dst_bytes; i++)
258                 dst[i] = src >> ((3-i) * 8);
259 }
260
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263                                     struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266                                               struct intel_dp *intel_dp);
267
268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271         struct intel_encoder *encoder = &intel_dig_port->base;
272         struct drm_device *dev = encoder->base.dev;
273         struct drm_i915_private *dev_priv = dev->dev_private;
274         enum intel_display_power_domain power_domain;
275
276         /*
277          * See vlv_power_sequencer_reset() why we need
278          * a power domain reference here.
279          */
280         power_domain = intel_display_port_aux_power_domain(encoder);
281         intel_display_power_get(dev_priv, power_domain);
282
283         mutex_lock(&dev_priv->pps_mutex);
284 }
285
286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289         struct intel_encoder *encoder = &intel_dig_port->base;
290         struct drm_device *dev = encoder->base.dev;
291         struct drm_i915_private *dev_priv = dev->dev_private;
292         enum intel_display_power_domain power_domain;
293
294         mutex_unlock(&dev_priv->pps_mutex);
295
296         power_domain = intel_display_port_aux_power_domain(encoder);
297         intel_display_power_put(dev_priv, power_domain);
298 }
299
300 static void
301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304         struct drm_device *dev = intel_dig_port->base.base.dev;
305         struct drm_i915_private *dev_priv = dev->dev_private;
306         enum pipe pipe = intel_dp->pps_pipe;
307         bool pll_enabled, release_cl_override = false;
308         enum dpio_phy phy = DPIO_PHY(pipe);
309         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310         uint32_t DP;
311
312         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314                  pipe_name(pipe), port_name(intel_dig_port->port)))
315                 return;
316
317         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318                       pipe_name(pipe), port_name(intel_dig_port->port));
319
320         /* Preserve the BIOS-computed detected bit. This is
321          * supposed to be read-only.
322          */
323         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325         DP |= DP_PORT_WIDTH(1);
326         DP |= DP_LINK_TRAIN_PAT_1;
327
328         if (IS_CHERRYVIEW(dev))
329                 DP |= DP_PIPE_SELECT_CHV(pipe);
330         else if (pipe == PIPE_B)
331                 DP |= DP_PIPEB_SELECT;
332
333         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335         /*
336          * The DPLL for the pipe must be enabled for this to work.
337          * So enable temporarily it if it's not already enabled.
338          */
339         if (!pll_enabled) {
340                 release_cl_override = IS_CHERRYVIEW(dev) &&
341                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
343                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345         }
346
347         /*
348          * Similar magic as in intel_dp_enable_port().
349          * We _must_ do this port enable + disable trick
350          * to make this power seqeuencer lock onto the port.
351          * Otherwise even VDD force bit won't work.
352          */
353         I915_WRITE(intel_dp->output_reg, DP);
354         POSTING_READ(intel_dp->output_reg);
355
356         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357         POSTING_READ(intel_dp->output_reg);
358
359         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360         POSTING_READ(intel_dp->output_reg);
361
362         if (!pll_enabled) {
363                 vlv_force_pll_off(dev, pipe);
364
365                 if (release_cl_override)
366                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
367         }
368 }
369
370 static enum pipe
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374         struct drm_device *dev = intel_dig_port->base.base.dev;
375         struct drm_i915_private *dev_priv = dev->dev_private;
376         struct intel_encoder *encoder;
377         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378         enum pipe pipe;
379
380         lockdep_assert_held(&dev_priv->pps_mutex);
381
382         /* We should never land here with regular DP ports */
383         WARN_ON(!is_edp(intel_dp));
384
385         if (intel_dp->pps_pipe != INVALID_PIPE)
386                 return intel_dp->pps_pipe;
387
388         /*
389          * We don't have power sequencer currently.
390          * Pick one that's not used by other ports.
391          */
392         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393                             base.head) {
394                 struct intel_dp *tmp;
395
396                 if (encoder->type != INTEL_OUTPUT_EDP)
397                         continue;
398
399                 tmp = enc_to_intel_dp(&encoder->base);
400
401                 if (tmp->pps_pipe != INVALID_PIPE)
402                         pipes &= ~(1 << tmp->pps_pipe);
403         }
404
405         /*
406          * Didn't find one. This should not happen since there
407          * are two power sequencers and up to two eDP ports.
408          */
409         if (WARN_ON(pipes == 0))
410                 pipe = PIPE_A;
411         else
412                 pipe = ffs(pipes) - 1;
413
414         vlv_steal_power_sequencer(dev, pipe);
415         intel_dp->pps_pipe = pipe;
416
417         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418                       pipe_name(intel_dp->pps_pipe),
419                       port_name(intel_dig_port->port));
420
421         /* init power sequencer on this pipe and port */
422         intel_dp_init_panel_power_sequencer(dev, intel_dp);
423         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424
425         /*
426          * Even vdd force doesn't work until we've made
427          * the power sequencer lock in on the port.
428          */
429         vlv_power_sequencer_kick(intel_dp);
430
431         return intel_dp->pps_pipe;
432 }
433
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435                                enum pipe pipe);
436
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438                                enum pipe pipe)
439 {
440         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444                                 enum pipe pipe)
445 {
446         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448
449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450                          enum pipe pipe)
451 {
452         return true;
453 }
454
455 static enum pipe
456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457                      enum port port,
458                      vlv_pipe_check pipe_check)
459 {
460         enum pipe pipe;
461
462         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464                         PANEL_PORT_SELECT_MASK;
465
466                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467                         continue;
468
469                 if (!pipe_check(dev_priv, pipe))
470                         continue;
471
472                 return pipe;
473         }
474
475         return INVALID_PIPE;
476 }
477
478 static void
479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482         struct drm_device *dev = intel_dig_port->base.base.dev;
483         struct drm_i915_private *dev_priv = dev->dev_private;
484         enum port port = intel_dig_port->port;
485
486         lockdep_assert_held(&dev_priv->pps_mutex);
487
488         /* try to find a pipe with this port selected */
489         /* first pick one where the panel is on */
490         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491                                                   vlv_pipe_has_pp_on);
492         /* didn't find one? pick one where vdd is on */
493         if (intel_dp->pps_pipe == INVALID_PIPE)
494                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495                                                           vlv_pipe_has_vdd_on);
496         /* didn't find one? pick one with just the correct port */
497         if (intel_dp->pps_pipe == INVALID_PIPE)
498                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499                                                           vlv_pipe_any);
500
501         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502         if (intel_dp->pps_pipe == INVALID_PIPE) {
503                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504                               port_name(port));
505                 return;
506         }
507
508         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509                       port_name(port), pipe_name(intel_dp->pps_pipe));
510
511         intel_dp_init_panel_power_sequencer(dev, intel_dp);
512         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514
515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517         struct drm_device *dev = dev_priv->dev;
518         struct intel_encoder *encoder;
519
520         if (WARN_ON(!IS_VALLEYVIEW(dev)))
521                 return;
522
523         /*
524          * We can't grab pps_mutex here due to deadlock with power_domain
525          * mutex when power_domain functions are called while holding pps_mutex.
526          * That also means that in order to use pps_pipe the code needs to
527          * hold both a power domain reference and pps_mutex, and the power domain
528          * reference get/put must be done while _not_ holding pps_mutex.
529          * pps_{lock,unlock}() do these steps in the correct order, so one
530          * should use them always.
531          */
532
533         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534                 struct intel_dp *intel_dp;
535
536                 if (encoder->type != INTEL_OUTPUT_EDP)
537                         continue;
538
539                 intel_dp = enc_to_intel_dp(&encoder->base);
540                 intel_dp->pps_pipe = INVALID_PIPE;
541         }
542 }
543
544 static i915_reg_t
545 _pp_ctrl_reg(struct intel_dp *intel_dp)
546 {
547         struct drm_device *dev = intel_dp_to_dev(intel_dp);
548
549         if (IS_BROXTON(dev))
550                 return BXT_PP_CONTROL(0);
551         else if (HAS_PCH_SPLIT(dev))
552                 return PCH_PP_CONTROL;
553         else
554                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555 }
556
557 static i915_reg_t
558 _pp_stat_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (IS_BROXTON(dev))
563                 return BXT_PP_STATUS(0);
564         else if (HAS_PCH_SPLIT(dev))
565                 return PCH_PP_STATUS;
566         else
567                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 }
569
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571    This function only applicable when panel PM state is not to be tracked */
572 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573                               void *unused)
574 {
575         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576                                                  edp_notifier);
577         struct drm_device *dev = intel_dp_to_dev(intel_dp);
578         struct drm_i915_private *dev_priv = dev->dev_private;
579
580         if (!is_edp(intel_dp) || code != SYS_RESTART)
581                 return 0;
582
583         pps_lock(intel_dp);
584
585         if (IS_VALLEYVIEW(dev)) {
586                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
587                 i915_reg_t pp_ctrl_reg, pp_div_reg;
588                 u32 pp_div;
589
590                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
592                 pp_div = I915_READ(pp_div_reg);
593                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594
595                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
597                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598                 msleep(intel_dp->panel_power_cycle_delay);
599         }
600
601         pps_unlock(intel_dp);
602
603         return 0;
604 }
605
606 static bool edp_have_panel_power(struct intel_dp *intel_dp)
607 {
608         struct drm_device *dev = intel_dp_to_dev(intel_dp);
609         struct drm_i915_private *dev_priv = dev->dev_private;
610
611         lockdep_assert_held(&dev_priv->pps_mutex);
612
613         if (IS_VALLEYVIEW(dev) &&
614             intel_dp->pps_pipe == INVALID_PIPE)
615                 return false;
616
617         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
618 }
619
620 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
621 {
622         struct drm_device *dev = intel_dp_to_dev(intel_dp);
623         struct drm_i915_private *dev_priv = dev->dev_private;
624
625         lockdep_assert_held(&dev_priv->pps_mutex);
626
627         if (IS_VALLEYVIEW(dev) &&
628             intel_dp->pps_pipe == INVALID_PIPE)
629                 return false;
630
631         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
632 }
633
634 static void
635 intel_dp_check_edp(struct intel_dp *intel_dp)
636 {
637         struct drm_device *dev = intel_dp_to_dev(intel_dp);
638         struct drm_i915_private *dev_priv = dev->dev_private;
639
640         if (!is_edp(intel_dp))
641                 return;
642
643         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
644                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
646                               I915_READ(_pp_stat_reg(intel_dp)),
647                               I915_READ(_pp_ctrl_reg(intel_dp)));
648         }
649 }
650
651 static uint32_t
652 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653 {
654         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655         struct drm_device *dev = intel_dig_port->base.base.dev;
656         struct drm_i915_private *dev_priv = dev->dev_private;
657         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
658         uint32_t status;
659         bool done;
660
661 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662         if (has_aux_irq)
663                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
664                                           msecs_to_jiffies_timeout(10));
665         else
666                 done = wait_for_atomic(C, 10) == 0;
667         if (!done)
668                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669                           has_aux_irq);
670 #undef C
671
672         return status;
673 }
674
675 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
676 {
677         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678         struct drm_device *dev = intel_dig_port->base.base.dev;
679
680         /*
681          * The clock divider is based off the hrawclk, and would like to run at
682          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
683          */
684         return index ? 0 : intel_hrawclk(dev) / 2;
685 }
686
687 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688 {
689         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690         struct drm_device *dev = intel_dig_port->base.base.dev;
691         struct drm_i915_private *dev_priv = dev->dev_private;
692
693         if (index)
694                 return 0;
695
696         if (intel_dig_port->port == PORT_A) {
697                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
698
699         } else {
700                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
701         }
702 }
703
704 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 {
706         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707         struct drm_device *dev = intel_dig_port->base.base.dev;
708         struct drm_i915_private *dev_priv = dev->dev_private;
709
710         if (intel_dig_port->port == PORT_A) {
711                 if (index)
712                         return 0;
713                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
714         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
715                 /* Workaround for non-ULT HSW */
716                 switch (index) {
717                 case 0: return 63;
718                 case 1: return 72;
719                 default: return 0;
720                 }
721         } else  {
722                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
723         }
724 }
725
726 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727 {
728         return index ? 0 : 100;
729 }
730
731 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732 {
733         /*
734          * SKL doesn't need us to program the AUX clock divider (Hardware will
735          * derive the clock from CDCLK automatically). We still implement the
736          * get_aux_clock_divider vfunc to plug-in into the existing code.
737          */
738         return index ? 0 : 1;
739 }
740
741 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742                                       bool has_aux_irq,
743                                       int send_bytes,
744                                       uint32_t aux_clock_divider)
745 {
746         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747         struct drm_device *dev = intel_dig_port->base.base.dev;
748         uint32_t precharge, timeout;
749
750         if (IS_GEN6(dev))
751                 precharge = 3;
752         else
753                 precharge = 5;
754
755         if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
756                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757         else
758                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
760         return DP_AUX_CH_CTL_SEND_BUSY |
761                DP_AUX_CH_CTL_DONE |
762                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
763                DP_AUX_CH_CTL_TIME_OUT_ERROR |
764                timeout |
765                DP_AUX_CH_CTL_RECEIVE_ERROR |
766                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
768                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769 }
770
771 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772                                       bool has_aux_irq,
773                                       int send_bytes,
774                                       uint32_t unused)
775 {
776         return DP_AUX_CH_CTL_SEND_BUSY |
777                DP_AUX_CH_CTL_DONE |
778                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779                DP_AUX_CH_CTL_TIME_OUT_ERROR |
780                DP_AUX_CH_CTL_TIME_OUT_1600us |
781                DP_AUX_CH_CTL_RECEIVE_ERROR |
782                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784 }
785
786 static int
787 intel_dp_aux_ch(struct intel_dp *intel_dp,
788                 const uint8_t *send, int send_bytes,
789                 uint8_t *recv, int recv_size)
790 {
791         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792         struct drm_device *dev = intel_dig_port->base.base.dev;
793         struct drm_i915_private *dev_priv = dev->dev_private;
794         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
795         uint32_t aux_clock_divider;
796         int i, ret, recv_bytes;
797         uint32_t status;
798         int try, clock = 0;
799         bool has_aux_irq = HAS_AUX_IRQ(dev);
800         bool vdd;
801
802         pps_lock(intel_dp);
803
804         /*
805          * We will be called with VDD already enabled for dpcd/edid/oui reads.
806          * In such cases we want to leave VDD enabled and it's up to upper layers
807          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808          * ourselves.
809          */
810         vdd = edp_panel_vdd_on(intel_dp);
811
812         /* dp aux is extremely sensitive to irq latency, hence request the
813          * lowest possible wakeup latency and so prevent the cpu from going into
814          * deep sleep states.
815          */
816         pm_qos_update_request(&dev_priv->pm_qos, 0);
817
818         intel_dp_check_edp(intel_dp);
819
820         /* Try to wait for any previous AUX channel activity */
821         for (try = 0; try < 3; try++) {
822                 status = I915_READ_NOTRACE(ch_ctl);
823                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
824                         break;
825                 msleep(1);
826         }
827
828         if (try == 3) {
829                 static u32 last_status = -1;
830                 const u32 status = I915_READ(ch_ctl);
831
832                 if (status != last_status) {
833                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
834                              status);
835                         last_status = status;
836                 }
837
838                 ret = -EBUSY;
839                 goto out;
840         }
841
842         /* Only 5 data registers! */
843         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
844                 ret = -E2BIG;
845                 goto out;
846         }
847
848         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
849                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
850                                                           has_aux_irq,
851                                                           send_bytes,
852                                                           aux_clock_divider);
853
854                 /* Must try at least 3 times according to DP spec */
855                 for (try = 0; try < 5; try++) {
856                         /* Load the send data into the aux channel data registers */
857                         for (i = 0; i < send_bytes; i += 4)
858                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
859                                            intel_dp_pack_aux(send + i,
860                                                              send_bytes - i));
861
862                         /* Send the command and wait for it to complete */
863                         I915_WRITE(ch_ctl, send_ctl);
864
865                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
866
867                         /* Clear done status and any errors */
868                         I915_WRITE(ch_ctl,
869                                    status |
870                                    DP_AUX_CH_CTL_DONE |
871                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
872                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
873
874                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
875                                 continue;
876
877                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878                          *   400us delay required for errors and timeouts
879                          *   Timeout errors from the HW already meet this
880                          *   requirement so skip to next iteration
881                          */
882                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
883                                 usleep_range(400, 500);
884                                 continue;
885                         }
886                         if (status & DP_AUX_CH_CTL_DONE)
887                                 goto done;
888                 }
889         }
890
891         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
892                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
893                 ret = -EBUSY;
894                 goto out;
895         }
896
897 done:
898         /* Check for timeout or receive error.
899          * Timeouts occur when the sink is not connected
900          */
901         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
902                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
903                 ret = -EIO;
904                 goto out;
905         }
906
907         /* Timeouts occur when the device isn't connected, so they're
908          * "normal" -- don't fill the kernel log with these */
909         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
910                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
911                 ret = -ETIMEDOUT;
912                 goto out;
913         }
914
915         /* Unload any bytes sent back from the other side */
916         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
917                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
918         if (recv_bytes > recv_size)
919                 recv_bytes = recv_size;
920
921         for (i = 0; i < recv_bytes; i += 4)
922                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
923                                     recv + i, recv_bytes - i);
924
925         ret = recv_bytes;
926 out:
927         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
928
929         if (vdd)
930                 edp_panel_vdd_off(intel_dp, false);
931
932         pps_unlock(intel_dp);
933
934         return ret;
935 }
936
937 #define BARE_ADDRESS_SIZE       3
938 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
939 static ssize_t
940 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
941 {
942         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943         uint8_t txbuf[20], rxbuf[20];
944         size_t txsize, rxsize;
945         int ret;
946
947         txbuf[0] = (msg->request << 4) |
948                 ((msg->address >> 16) & 0xf);
949         txbuf[1] = (msg->address >> 8) & 0xff;
950         txbuf[2] = msg->address & 0xff;
951         txbuf[3] = msg->size - 1;
952
953         switch (msg->request & ~DP_AUX_I2C_MOT) {
954         case DP_AUX_NATIVE_WRITE:
955         case DP_AUX_I2C_WRITE:
956         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
957                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
958                 rxsize = 2; /* 0 or 1 data bytes */
959
960                 if (WARN_ON(txsize > 20))
961                         return -E2BIG;
962
963                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
964
965                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
966                 if (ret > 0) {
967                         msg->reply = rxbuf[0] >> 4;
968
969                         if (ret > 1) {
970                                 /* Number of bytes written in a short write. */
971                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
972                         } else {
973                                 /* Return payload size. */
974                                 ret = msg->size;
975                         }
976                 }
977                 break;
978
979         case DP_AUX_NATIVE_READ:
980         case DP_AUX_I2C_READ:
981                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
982                 rxsize = msg->size + 1;
983
984                 if (WARN_ON(rxsize > 20))
985                         return -E2BIG;
986
987                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988                 if (ret > 0) {
989                         msg->reply = rxbuf[0] >> 4;
990                         /*
991                          * Assume happy day, and copy the data. The caller is
992                          * expected to check msg->reply before touching it.
993                          *
994                          * Return payload size.
995                          */
996                         ret--;
997                         memcpy(msg->buffer, rxbuf + 1, ret);
998                 }
999                 break;
1000
1001         default:
1002                 ret = -EINVAL;
1003                 break;
1004         }
1005
1006         return ret;
1007 }
1008
1009 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1010                                        enum port port)
1011 {
1012         switch (port) {
1013         case PORT_B:
1014         case PORT_C:
1015         case PORT_D:
1016                 return DP_AUX_CH_CTL(port);
1017         default:
1018                 MISSING_CASE(port);
1019                 return DP_AUX_CH_CTL(PORT_B);
1020         }
1021 }
1022
1023 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024                                         enum port port, int index)
1025 {
1026         switch (port) {
1027         case PORT_B:
1028         case PORT_C:
1029         case PORT_D:
1030                 return DP_AUX_CH_DATA(port, index);
1031         default:
1032                 MISSING_CASE(port);
1033                 return DP_AUX_CH_DATA(PORT_B, index);
1034         }
1035 }
1036
1037 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038                                        enum port port)
1039 {
1040         switch (port) {
1041         case PORT_A:
1042                 return DP_AUX_CH_CTL(port);
1043         case PORT_B:
1044         case PORT_C:
1045         case PORT_D:
1046                 return PCH_DP_AUX_CH_CTL(port);
1047         default:
1048                 MISSING_CASE(port);
1049                 return DP_AUX_CH_CTL(PORT_A);
1050         }
1051 }
1052
1053 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1054                                         enum port port, int index)
1055 {
1056         switch (port) {
1057         case PORT_A:
1058                 return DP_AUX_CH_DATA(port, index);
1059         case PORT_B:
1060         case PORT_C:
1061         case PORT_D:
1062                 return PCH_DP_AUX_CH_DATA(port, index);
1063         default:
1064                 MISSING_CASE(port);
1065                 return DP_AUX_CH_DATA(PORT_A, index);
1066         }
1067 }
1068
1069 /*
1070  * On SKL we don't have Aux for port E so we rely
1071  * on VBT to set a proper alternate aux channel.
1072  */
1073 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074 {
1075         const struct ddi_vbt_port_info *info =
1076                 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078         switch (info->alternate_aux_channel) {
1079         case DP_AUX_A:
1080                 return PORT_A;
1081         case DP_AUX_B:
1082                 return PORT_B;
1083         case DP_AUX_C:
1084                 return PORT_C;
1085         case DP_AUX_D:
1086                 return PORT_D;
1087         default:
1088                 MISSING_CASE(info->alternate_aux_channel);
1089                 return PORT_A;
1090         }
1091 }
1092
1093 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094                                        enum port port)
1095 {
1096         if (port == PORT_E)
1097                 port = skl_porte_aux_port(dev_priv);
1098
1099         switch (port) {
1100         case PORT_A:
1101         case PORT_B:
1102         case PORT_C:
1103         case PORT_D:
1104                 return DP_AUX_CH_CTL(port);
1105         default:
1106                 MISSING_CASE(port);
1107                 return DP_AUX_CH_CTL(PORT_A);
1108         }
1109 }
1110
1111 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112                                         enum port port, int index)
1113 {
1114         if (port == PORT_E)
1115                 port = skl_porte_aux_port(dev_priv);
1116
1117         switch (port) {
1118         case PORT_A:
1119         case PORT_B:
1120         case PORT_C:
1121         case PORT_D:
1122                 return DP_AUX_CH_DATA(port, index);
1123         default:
1124                 MISSING_CASE(port);
1125                 return DP_AUX_CH_DATA(PORT_A, index);
1126         }
1127 }
1128
1129 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130                                          enum port port)
1131 {
1132         if (INTEL_INFO(dev_priv)->gen >= 9)
1133                 return skl_aux_ctl_reg(dev_priv, port);
1134         else if (HAS_PCH_SPLIT(dev_priv))
1135                 return ilk_aux_ctl_reg(dev_priv, port);
1136         else
1137                 return g4x_aux_ctl_reg(dev_priv, port);
1138 }
1139
1140 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141                                           enum port port, int index)
1142 {
1143         if (INTEL_INFO(dev_priv)->gen >= 9)
1144                 return skl_aux_data_reg(dev_priv, port, index);
1145         else if (HAS_PCH_SPLIT(dev_priv))
1146                 return ilk_aux_data_reg(dev_priv, port, index);
1147         else
1148                 return g4x_aux_data_reg(dev_priv, port, index);
1149 }
1150
1151 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152 {
1153         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154         enum port port = dp_to_dig_port(intel_dp)->port;
1155         int i;
1156
1157         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160 }
1161
1162 static void
1163 intel_dp_aux_fini(struct intel_dp *intel_dp)
1164 {
1165         drm_dp_aux_unregister(&intel_dp->aux);
1166         kfree(intel_dp->aux.name);
1167 }
1168
1169 static int
1170 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171 {
1172         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174         enum port port = intel_dig_port->port;
1175         int ret;
1176
1177         intel_aux_reg_init(intel_dp);
1178
1179         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180         if (!intel_dp->aux.name)
1181                 return -ENOMEM;
1182
1183         intel_dp->aux.dev = dev->dev;
1184         intel_dp->aux.transfer = intel_dp_aux_transfer;
1185
1186         DRM_DEBUG_KMS("registering %s bus for %s\n",
1187                       intel_dp->aux.name,
1188                       connector->base.kdev->kobj.name);
1189
1190         ret = drm_dp_aux_register(&intel_dp->aux);
1191         if (ret < 0) {
1192                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1193                           intel_dp->aux.name, ret);
1194                 kfree(intel_dp->aux.name);
1195                 return ret;
1196         }
1197
1198         ret = sysfs_create_link(&connector->base.kdev->kobj,
1199                                 &intel_dp->aux.ddc.dev.kobj,
1200                                 intel_dp->aux.ddc.dev.kobj.name);
1201         if (ret < 0) {
1202                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1203                           intel_dp->aux.name, ret);
1204                 intel_dp_aux_fini(intel_dp);
1205                 return ret;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static void
1212 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1213 {
1214         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1215
1216         if (!intel_connector->mst_port)
1217                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1218                                   intel_dp->aux.ddc.dev.kobj.name);
1219         intel_connector_unregister(intel_connector);
1220 }
1221
1222 static void
1223 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1224 {
1225         u32 ctrl1;
1226
1227         memset(&pipe_config->dpll_hw_state, 0,
1228                sizeof(pipe_config->dpll_hw_state));
1229
1230         pipe_config->ddi_pll_sel = SKL_DPLL0;
1231         pipe_config->dpll_hw_state.cfgcr1 = 0;
1232         pipe_config->dpll_hw_state.cfgcr2 = 0;
1233
1234         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1235         switch (pipe_config->port_clock / 2) {
1236         case 81000:
1237                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1238                                               SKL_DPLL0);
1239                 break;
1240         case 135000:
1241                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1242                                               SKL_DPLL0);
1243                 break;
1244         case 270000:
1245                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1246                                               SKL_DPLL0);
1247                 break;
1248         case 162000:
1249                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1250                                               SKL_DPLL0);
1251                 break;
1252         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1253         results in CDCLK change. Need to handle the change of CDCLK by
1254         disabling pipes and re-enabling them */
1255         case 108000:
1256                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1257                                               SKL_DPLL0);
1258                 break;
1259         case 216000:
1260                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1261                                               SKL_DPLL0);
1262                 break;
1263
1264         }
1265         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1266 }
1267
1268 void
1269 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1270 {
1271         memset(&pipe_config->dpll_hw_state, 0,
1272                sizeof(pipe_config->dpll_hw_state));
1273
1274         switch (pipe_config->port_clock / 2) {
1275         case 81000:
1276                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1277                 break;
1278         case 135000:
1279                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1280                 break;
1281         case 270000:
1282                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1283                 break;
1284         }
1285 }
1286
1287 static int
1288 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1289 {
1290         if (intel_dp->num_sink_rates) {
1291                 *sink_rates = intel_dp->sink_rates;
1292                 return intel_dp->num_sink_rates;
1293         }
1294
1295         *sink_rates = default_rates;
1296
1297         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1298 }
1299
1300 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1301 {
1302         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303         struct drm_device *dev = dig_port->base.base.dev;
1304
1305         /* WaDisableHBR2:skl */
1306         if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1307                 return false;
1308
1309         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1310             (INTEL_INFO(dev)->gen >= 9))
1311                 return true;
1312         else
1313                 return false;
1314 }
1315
1316 static int
1317 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1318 {
1319         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320         struct drm_device *dev = dig_port->base.base.dev;
1321         int size;
1322
1323         if (IS_BROXTON(dev)) {
1324                 *source_rates = bxt_rates;
1325                 size = ARRAY_SIZE(bxt_rates);
1326         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1327                 *source_rates = skl_rates;
1328                 size = ARRAY_SIZE(skl_rates);
1329         } else {
1330                 *source_rates = default_rates;
1331                 size = ARRAY_SIZE(default_rates);
1332         }
1333
1334         /* This depends on the fact that 5.4 is last value in the array */
1335         if (!intel_dp_source_supports_hbr2(intel_dp))
1336                 size--;
1337
1338         return size;
1339 }
1340
1341 static void
1342 intel_dp_set_clock(struct intel_encoder *encoder,
1343                    struct intel_crtc_state *pipe_config)
1344 {
1345         struct drm_device *dev = encoder->base.dev;
1346         const struct dp_link_dpll *divisor = NULL;
1347         int i, count = 0;
1348
1349         if (IS_G4X(dev)) {
1350                 divisor = gen4_dpll;
1351                 count = ARRAY_SIZE(gen4_dpll);
1352         } else if (HAS_PCH_SPLIT(dev)) {
1353                 divisor = pch_dpll;
1354                 count = ARRAY_SIZE(pch_dpll);
1355         } else if (IS_CHERRYVIEW(dev)) {
1356                 divisor = chv_dpll;
1357                 count = ARRAY_SIZE(chv_dpll);
1358         } else if (IS_VALLEYVIEW(dev)) {
1359                 divisor = vlv_dpll;
1360                 count = ARRAY_SIZE(vlv_dpll);
1361         }
1362
1363         if (divisor && count) {
1364                 for (i = 0; i < count; i++) {
1365                         if (pipe_config->port_clock == divisor[i].clock) {
1366                                 pipe_config->dpll = divisor[i].dpll;
1367                                 pipe_config->clock_set = true;
1368                                 break;
1369                         }
1370                 }
1371         }
1372 }
1373
1374 static int intersect_rates(const int *source_rates, int source_len,
1375                            const int *sink_rates, int sink_len,
1376                            int *common_rates)
1377 {
1378         int i = 0, j = 0, k = 0;
1379
1380         while (i < source_len && j < sink_len) {
1381                 if (source_rates[i] == sink_rates[j]) {
1382                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1383                                 return k;
1384                         common_rates[k] = source_rates[i];
1385                         ++k;
1386                         ++i;
1387                         ++j;
1388                 } else if (source_rates[i] < sink_rates[j]) {
1389                         ++i;
1390                 } else {
1391                         ++j;
1392                 }
1393         }
1394         return k;
1395 }
1396
1397 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1398                                  int *common_rates)
1399 {
1400         const int *source_rates, *sink_rates;
1401         int source_len, sink_len;
1402
1403         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1404         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1405
1406         return intersect_rates(source_rates, source_len,
1407                                sink_rates, sink_len,
1408                                common_rates);
1409 }
1410
1411 static void snprintf_int_array(char *str, size_t len,
1412                                const int *array, int nelem)
1413 {
1414         int i;
1415
1416         str[0] = '\0';
1417
1418         for (i = 0; i < nelem; i++) {
1419                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1420                 if (r >= len)
1421                         return;
1422                 str += r;
1423                 len -= r;
1424         }
1425 }
1426
1427 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1428 {
1429         const int *source_rates, *sink_rates;
1430         int source_len, sink_len, common_len;
1431         int common_rates[DP_MAX_SUPPORTED_RATES];
1432         char str[128]; /* FIXME: too big for stack? */
1433
1434         if ((drm_debug & DRM_UT_KMS) == 0)
1435                 return;
1436
1437         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1438         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1439         DRM_DEBUG_KMS("source rates: %s\n", str);
1440
1441         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1442         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1443         DRM_DEBUG_KMS("sink rates: %s\n", str);
1444
1445         common_len = intel_dp_common_rates(intel_dp, common_rates);
1446         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1447         DRM_DEBUG_KMS("common rates: %s\n", str);
1448 }
1449
1450 static int rate_to_index(int find, const int *rates)
1451 {
1452         int i = 0;
1453
1454         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1455                 if (find == rates[i])
1456                         break;
1457
1458         return i;
1459 }
1460
1461 int
1462 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1463 {
1464         int rates[DP_MAX_SUPPORTED_RATES] = {};
1465         int len;
1466
1467         len = intel_dp_common_rates(intel_dp, rates);
1468         if (WARN_ON(len <= 0))
1469                 return 162000;
1470
1471         return rates[rate_to_index(0, rates) - 1];
1472 }
1473
1474 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1475 {
1476         return rate_to_index(rate, intel_dp->sink_rates);
1477 }
1478
1479 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1480                            uint8_t *link_bw, uint8_t *rate_select)
1481 {
1482         if (intel_dp->num_sink_rates) {
1483                 *link_bw = 0;
1484                 *rate_select =
1485                         intel_dp_rate_select(intel_dp, port_clock);
1486         } else {
1487                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1488                 *rate_select = 0;
1489         }
1490 }
1491
1492 bool
1493 intel_dp_compute_config(struct intel_encoder *encoder,
1494                         struct intel_crtc_state *pipe_config)
1495 {
1496         struct drm_device *dev = encoder->base.dev;
1497         struct drm_i915_private *dev_priv = dev->dev_private;
1498         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1499         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1500         enum port port = dp_to_dig_port(intel_dp)->port;
1501         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1502         struct intel_connector *intel_connector = intel_dp->attached_connector;
1503         int lane_count, clock;
1504         int min_lane_count = 1;
1505         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1506         /* Conveniently, the link BW constants become indices with a shift...*/
1507         int min_clock = 0;
1508         int max_clock;
1509         int bpp, mode_rate;
1510         int link_avail, link_clock;
1511         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1512         int common_len;
1513         uint8_t link_bw, rate_select;
1514
1515         common_len = intel_dp_common_rates(intel_dp, common_rates);
1516
1517         /* No common link rates between source and sink */
1518         WARN_ON(common_len <= 0);
1519
1520         max_clock = common_len - 1;
1521
1522         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1523                 pipe_config->has_pch_encoder = true;
1524
1525         pipe_config->has_dp_encoder = true;
1526         pipe_config->has_drrs = false;
1527         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1528
1529         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1530                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1531                                        adjusted_mode);
1532
1533                 if (INTEL_INFO(dev)->gen >= 9) {
1534                         int ret;
1535                         ret = skl_update_scaler_crtc(pipe_config);
1536                         if (ret)
1537                                 return ret;
1538                 }
1539
1540                 if (HAS_GMCH_DISPLAY(dev))
1541                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1542                                                  intel_connector->panel.fitting_mode);
1543                 else
1544                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1545                                                 intel_connector->panel.fitting_mode);
1546         }
1547
1548         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1549                 return false;
1550
1551         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1552                       "max bw %d pixel clock %iKHz\n",
1553                       max_lane_count, common_rates[max_clock],
1554                       adjusted_mode->crtc_clock);
1555
1556         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1557          * bpc in between. */
1558         bpp = pipe_config->pipe_bpp;
1559         if (is_edp(intel_dp)) {
1560
1561                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1562                 if (intel_connector->base.display_info.bpc == 0 &&
1563                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1564                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1565                                       dev_priv->vbt.edp_bpp);
1566                         bpp = dev_priv->vbt.edp_bpp;
1567                 }
1568
1569                 /*
1570                  * Use the maximum clock and number of lanes the eDP panel
1571                  * advertizes being capable of. The panels are generally
1572                  * designed to support only a single clock and lane
1573                  * configuration, and typically these values correspond to the
1574                  * native resolution of the panel.
1575                  */
1576                 min_lane_count = max_lane_count;
1577                 min_clock = max_clock;
1578         }
1579
1580         for (; bpp >= 6*3; bpp -= 2*3) {
1581                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1582                                                    bpp);
1583
1584                 for (clock = min_clock; clock <= max_clock; clock++) {
1585                         for (lane_count = min_lane_count;
1586                                 lane_count <= max_lane_count;
1587                                 lane_count <<= 1) {
1588
1589                                 link_clock = common_rates[clock];
1590                                 link_avail = intel_dp_max_data_rate(link_clock,
1591                                                                     lane_count);
1592
1593                                 if (mode_rate <= link_avail) {
1594                                         goto found;
1595                                 }
1596                         }
1597                 }
1598         }
1599
1600         return false;
1601
1602 found:
1603         if (intel_dp->color_range_auto) {
1604                 /*
1605                  * See:
1606                  * CEA-861-E - 5.1 Default Encoding Parameters
1607                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1608                  */
1609                 pipe_config->limited_color_range =
1610                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1611         } else {
1612                 pipe_config->limited_color_range =
1613                         intel_dp->limited_color_range;
1614         }
1615
1616         pipe_config->lane_count = lane_count;
1617
1618         pipe_config->pipe_bpp = bpp;
1619         pipe_config->port_clock = common_rates[clock];
1620
1621         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1622                               &link_bw, &rate_select);
1623
1624         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1625                       link_bw, rate_select, pipe_config->lane_count,
1626                       pipe_config->port_clock, bpp);
1627         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1628                       mode_rate, link_avail);
1629
1630         intel_link_compute_m_n(bpp, lane_count,
1631                                adjusted_mode->crtc_clock,
1632                                pipe_config->port_clock,
1633                                &pipe_config->dp_m_n);
1634
1635         if (intel_connector->panel.downclock_mode != NULL &&
1636                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1637                         pipe_config->has_drrs = true;
1638                         intel_link_compute_m_n(bpp, lane_count,
1639                                 intel_connector->panel.downclock_mode->clock,
1640                                 pipe_config->port_clock,
1641                                 &pipe_config->dp_m2_n2);
1642         }
1643
1644         if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1645                 skl_edp_set_pll_config(pipe_config);
1646         else if (IS_BROXTON(dev))
1647                 /* handled in ddi */;
1648         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1649                 hsw_dp_set_ddi_pll_sel(pipe_config);
1650         else
1651                 intel_dp_set_clock(encoder, pipe_config);
1652
1653         return true;
1654 }
1655
1656 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1657                               const struct intel_crtc_state *pipe_config)
1658 {
1659         intel_dp->link_rate = pipe_config->port_clock;
1660         intel_dp->lane_count = pipe_config->lane_count;
1661 }
1662
1663 static void intel_dp_prepare(struct intel_encoder *encoder)
1664 {
1665         struct drm_device *dev = encoder->base.dev;
1666         struct drm_i915_private *dev_priv = dev->dev_private;
1667         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1668         enum port port = dp_to_dig_port(intel_dp)->port;
1669         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1670         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1671
1672         intel_dp_set_link_params(intel_dp, crtc->config);
1673
1674         /*
1675          * There are four kinds of DP registers:
1676          *
1677          *      IBX PCH
1678          *      SNB CPU
1679          *      IVB CPU
1680          *      CPT PCH
1681          *
1682          * IBX PCH and CPU are the same for almost everything,
1683          * except that the CPU DP PLL is configured in this
1684          * register
1685          *
1686          * CPT PCH is quite different, having many bits moved
1687          * to the TRANS_DP_CTL register instead. That
1688          * configuration happens (oddly) in ironlake_pch_enable
1689          */
1690
1691         /* Preserve the BIOS-computed detected bit. This is
1692          * supposed to be read-only.
1693          */
1694         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1695
1696         /* Handle DP bits in common between all three register formats */
1697         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1698         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1699
1700         /* Split out the IBX/CPU vs CPT settings */
1701
1702         if (IS_GEN7(dev) && port == PORT_A) {
1703                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1704                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1705                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1706                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1707                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1708
1709                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1710                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1711
1712                 intel_dp->DP |= crtc->pipe << 29;
1713         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1714                 u32 trans_dp;
1715
1716                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1717
1718                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1719                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720                         trans_dp |= TRANS_DP_ENH_FRAMING;
1721                 else
1722                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1723                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1724         } else {
1725                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1726                     crtc->config->limited_color_range)
1727                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1728
1729                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1730                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1731                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1732                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1733                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1734
1735                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1736                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1737
1738                 if (IS_CHERRYVIEW(dev))
1739                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1740                 else if (crtc->pipe == PIPE_B)
1741                         intel_dp->DP |= DP_PIPEB_SELECT;
1742         }
1743 }
1744
1745 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1746 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1747
1748 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1749 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1750
1751 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1752 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1753
1754 static void wait_panel_status(struct intel_dp *intel_dp,
1755                                        u32 mask,
1756                                        u32 value)
1757 {
1758         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1759         struct drm_i915_private *dev_priv = dev->dev_private;
1760         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1761
1762         lockdep_assert_held(&dev_priv->pps_mutex);
1763
1764         pp_stat_reg = _pp_stat_reg(intel_dp);
1765         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1766
1767         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1768                         mask, value,
1769                         I915_READ(pp_stat_reg),
1770                         I915_READ(pp_ctrl_reg));
1771
1772         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1773                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1774                                 I915_READ(pp_stat_reg),
1775                                 I915_READ(pp_ctrl_reg));
1776         }
1777
1778         DRM_DEBUG_KMS("Wait complete\n");
1779 }
1780
1781 static void wait_panel_on(struct intel_dp *intel_dp)
1782 {
1783         DRM_DEBUG_KMS("Wait for panel power on\n");
1784         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1785 }
1786
1787 static void wait_panel_off(struct intel_dp *intel_dp)
1788 {
1789         DRM_DEBUG_KMS("Wait for panel power off time\n");
1790         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1791 }
1792
1793 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1794 {
1795         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1796
1797         /* When we disable the VDD override bit last we have to do the manual
1798          * wait. */
1799         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1800                                        intel_dp->panel_power_cycle_delay);
1801
1802         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1803 }
1804
1805 static void wait_backlight_on(struct intel_dp *intel_dp)
1806 {
1807         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1808                                        intel_dp->backlight_on_delay);
1809 }
1810
1811 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1812 {
1813         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1814                                        intel_dp->backlight_off_delay);
1815 }
1816
1817 /* Read the current pp_control value, unlocking the register if it
1818  * is locked
1819  */
1820
1821 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1822 {
1823         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1824         struct drm_i915_private *dev_priv = dev->dev_private;
1825         u32 control;
1826
1827         lockdep_assert_held(&dev_priv->pps_mutex);
1828
1829         control = I915_READ(_pp_ctrl_reg(intel_dp));
1830         if (!IS_BROXTON(dev)) {
1831                 control &= ~PANEL_UNLOCK_MASK;
1832                 control |= PANEL_UNLOCK_REGS;
1833         }
1834         return control;
1835 }
1836
1837 /*
1838  * Must be paired with edp_panel_vdd_off().
1839  * Must hold pps_mutex around the whole on/off sequence.
1840  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1841  */
1842 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1843 {
1844         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1846         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1847         struct drm_i915_private *dev_priv = dev->dev_private;
1848         enum intel_display_power_domain power_domain;
1849         u32 pp;
1850         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1851         bool need_to_disable = !intel_dp->want_panel_vdd;
1852
1853         lockdep_assert_held(&dev_priv->pps_mutex);
1854
1855         if (!is_edp(intel_dp))
1856                 return false;
1857
1858         cancel_delayed_work(&intel_dp->panel_vdd_work);
1859         intel_dp->want_panel_vdd = true;
1860
1861         if (edp_have_panel_vdd(intel_dp))
1862                 return need_to_disable;
1863
1864         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1865         intel_display_power_get(dev_priv, power_domain);
1866
1867         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1868                       port_name(intel_dig_port->port));
1869
1870         if (!edp_have_panel_power(intel_dp))
1871                 wait_panel_power_cycle(intel_dp);
1872
1873         pp = ironlake_get_pp_control(intel_dp);
1874         pp |= EDP_FORCE_VDD;
1875
1876         pp_stat_reg = _pp_stat_reg(intel_dp);
1877         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1878
1879         I915_WRITE(pp_ctrl_reg, pp);
1880         POSTING_READ(pp_ctrl_reg);
1881         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1882                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1883         /*
1884          * If the panel wasn't on, delay before accessing aux channel
1885          */
1886         if (!edp_have_panel_power(intel_dp)) {
1887                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1888                               port_name(intel_dig_port->port));
1889                 msleep(intel_dp->panel_power_up_delay);
1890         }
1891
1892         return need_to_disable;
1893 }
1894
1895 /*
1896  * Must be paired with intel_edp_panel_vdd_off() or
1897  * intel_edp_panel_off().
1898  * Nested calls to these functions are not allowed since
1899  * we drop the lock. Caller must use some higher level
1900  * locking to prevent nested calls from other threads.
1901  */
1902 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1903 {
1904         bool vdd;
1905
1906         if (!is_edp(intel_dp))
1907                 return;
1908
1909         pps_lock(intel_dp);
1910         vdd = edp_panel_vdd_on(intel_dp);
1911         pps_unlock(intel_dp);
1912
1913         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1914              port_name(dp_to_dig_port(intel_dp)->port));
1915 }
1916
1917 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1918 {
1919         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1920         struct drm_i915_private *dev_priv = dev->dev_private;
1921         struct intel_digital_port *intel_dig_port =
1922                 dp_to_dig_port(intel_dp);
1923         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1924         enum intel_display_power_domain power_domain;
1925         u32 pp;
1926         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1927
1928         lockdep_assert_held(&dev_priv->pps_mutex);
1929
1930         WARN_ON(intel_dp->want_panel_vdd);
1931
1932         if (!edp_have_panel_vdd(intel_dp))
1933                 return;
1934
1935         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1936                       port_name(intel_dig_port->port));
1937
1938         pp = ironlake_get_pp_control(intel_dp);
1939         pp &= ~EDP_FORCE_VDD;
1940
1941         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1942         pp_stat_reg = _pp_stat_reg(intel_dp);
1943
1944         I915_WRITE(pp_ctrl_reg, pp);
1945         POSTING_READ(pp_ctrl_reg);
1946
1947         /* Make sure sequencer is idle before allowing subsequent activity */
1948         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1950
1951         if ((pp & POWER_TARGET_ON) == 0)
1952                 intel_dp->last_power_cycle = jiffies;
1953
1954         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1955         intel_display_power_put(dev_priv, power_domain);
1956 }
1957
1958 static void edp_panel_vdd_work(struct work_struct *__work)
1959 {
1960         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1961                                                  struct intel_dp, panel_vdd_work);
1962
1963         pps_lock(intel_dp);
1964         if (!intel_dp->want_panel_vdd)
1965                 edp_panel_vdd_off_sync(intel_dp);
1966         pps_unlock(intel_dp);
1967 }
1968
1969 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1970 {
1971         unsigned long delay;
1972
1973         /*
1974          * Queue the timer to fire a long time from now (relative to the power
1975          * down delay) to keep the panel power up across a sequence of
1976          * operations.
1977          */
1978         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1979         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1980 }
1981
1982 /*
1983  * Must be paired with edp_panel_vdd_on().
1984  * Must hold pps_mutex around the whole on/off sequence.
1985  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1986  */
1987 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1988 {
1989         struct drm_i915_private *dev_priv =
1990                 intel_dp_to_dev(intel_dp)->dev_private;
1991
1992         lockdep_assert_held(&dev_priv->pps_mutex);
1993
1994         if (!is_edp(intel_dp))
1995                 return;
1996
1997         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1998              port_name(dp_to_dig_port(intel_dp)->port));
1999
2000         intel_dp->want_panel_vdd = false;
2001
2002         if (sync)
2003                 edp_panel_vdd_off_sync(intel_dp);
2004         else
2005                 edp_panel_vdd_schedule_off(intel_dp);
2006 }
2007
2008 static void edp_panel_on(struct intel_dp *intel_dp)
2009 {
2010         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2011         struct drm_i915_private *dev_priv = dev->dev_private;
2012         u32 pp;
2013         i915_reg_t pp_ctrl_reg;
2014
2015         lockdep_assert_held(&dev_priv->pps_mutex);
2016
2017         if (!is_edp(intel_dp))
2018                 return;
2019
2020         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2021                       port_name(dp_to_dig_port(intel_dp)->port));
2022
2023         if (WARN(edp_have_panel_power(intel_dp),
2024                  "eDP port %c panel power already on\n",
2025                  port_name(dp_to_dig_port(intel_dp)->port)))
2026                 return;
2027
2028         wait_panel_power_cycle(intel_dp);
2029
2030         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2031         pp = ironlake_get_pp_control(intel_dp);
2032         if (IS_GEN5(dev)) {
2033                 /* ILK workaround: disable reset around power sequence */
2034                 pp &= ~PANEL_POWER_RESET;
2035                 I915_WRITE(pp_ctrl_reg, pp);
2036                 POSTING_READ(pp_ctrl_reg);
2037         }
2038
2039         pp |= POWER_TARGET_ON;
2040         if (!IS_GEN5(dev))
2041                 pp |= PANEL_POWER_RESET;
2042
2043         I915_WRITE(pp_ctrl_reg, pp);
2044         POSTING_READ(pp_ctrl_reg);
2045
2046         wait_panel_on(intel_dp);
2047         intel_dp->last_power_on = jiffies;
2048
2049         if (IS_GEN5(dev)) {
2050                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2051                 I915_WRITE(pp_ctrl_reg, pp);
2052                 POSTING_READ(pp_ctrl_reg);
2053         }
2054 }
2055
2056 void intel_edp_panel_on(struct intel_dp *intel_dp)
2057 {
2058         if (!is_edp(intel_dp))
2059                 return;
2060
2061         pps_lock(intel_dp);
2062         edp_panel_on(intel_dp);
2063         pps_unlock(intel_dp);
2064 }
2065
2066
2067 static void edp_panel_off(struct intel_dp *intel_dp)
2068 {
2069         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2070         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2071         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2072         struct drm_i915_private *dev_priv = dev->dev_private;
2073         enum intel_display_power_domain power_domain;
2074         u32 pp;
2075         i915_reg_t pp_ctrl_reg;
2076
2077         lockdep_assert_held(&dev_priv->pps_mutex);
2078
2079         if (!is_edp(intel_dp))
2080                 return;
2081
2082         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2083                       port_name(dp_to_dig_port(intel_dp)->port));
2084
2085         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2086              port_name(dp_to_dig_port(intel_dp)->port));
2087
2088         pp = ironlake_get_pp_control(intel_dp);
2089         /* We need to switch off panel power _and_ force vdd, for otherwise some
2090          * panels get very unhappy and cease to work. */
2091         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2092                 EDP_BLC_ENABLE);
2093
2094         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2095
2096         intel_dp->want_panel_vdd = false;
2097
2098         I915_WRITE(pp_ctrl_reg, pp);
2099         POSTING_READ(pp_ctrl_reg);
2100
2101         intel_dp->last_power_cycle = jiffies;
2102         wait_panel_off(intel_dp);
2103
2104         /* We got a reference when we enabled the VDD. */
2105         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2106         intel_display_power_put(dev_priv, power_domain);
2107 }
2108
2109 void intel_edp_panel_off(struct intel_dp *intel_dp)
2110 {
2111         if (!is_edp(intel_dp))
2112                 return;
2113
2114         pps_lock(intel_dp);
2115         edp_panel_off(intel_dp);
2116         pps_unlock(intel_dp);
2117 }
2118
2119 /* Enable backlight in the panel power control. */
2120 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2121 {
2122         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2123         struct drm_device *dev = intel_dig_port->base.base.dev;
2124         struct drm_i915_private *dev_priv = dev->dev_private;
2125         u32 pp;
2126         i915_reg_t pp_ctrl_reg;
2127
2128         /*
2129          * If we enable the backlight right away following a panel power
2130          * on, we may see slight flicker as the panel syncs with the eDP
2131          * link.  So delay a bit to make sure the image is solid before
2132          * allowing it to appear.
2133          */
2134         wait_backlight_on(intel_dp);
2135
2136         pps_lock(intel_dp);
2137
2138         pp = ironlake_get_pp_control(intel_dp);
2139         pp |= EDP_BLC_ENABLE;
2140
2141         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2142
2143         I915_WRITE(pp_ctrl_reg, pp);
2144         POSTING_READ(pp_ctrl_reg);
2145
2146         pps_unlock(intel_dp);
2147 }
2148
2149 /* Enable backlight PWM and backlight PP control. */
2150 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2151 {
2152         if (!is_edp(intel_dp))
2153                 return;
2154
2155         DRM_DEBUG_KMS("\n");
2156
2157         intel_panel_enable_backlight(intel_dp->attached_connector);
2158         _intel_edp_backlight_on(intel_dp);
2159 }
2160
2161 /* Disable backlight in the panel power control. */
2162 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2163 {
2164         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2165         struct drm_i915_private *dev_priv = dev->dev_private;
2166         u32 pp;
2167         i915_reg_t pp_ctrl_reg;
2168
2169         if (!is_edp(intel_dp))
2170                 return;
2171
2172         pps_lock(intel_dp);
2173
2174         pp = ironlake_get_pp_control(intel_dp);
2175         pp &= ~EDP_BLC_ENABLE;
2176
2177         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2178
2179         I915_WRITE(pp_ctrl_reg, pp);
2180         POSTING_READ(pp_ctrl_reg);
2181
2182         pps_unlock(intel_dp);
2183
2184         intel_dp->last_backlight_off = jiffies;
2185         edp_wait_backlight_off(intel_dp);
2186 }
2187
2188 /* Disable backlight PP control and backlight PWM. */
2189 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2190 {
2191         if (!is_edp(intel_dp))
2192                 return;
2193
2194         DRM_DEBUG_KMS("\n");
2195
2196         _intel_edp_backlight_off(intel_dp);
2197         intel_panel_disable_backlight(intel_dp->attached_connector);
2198 }
2199
2200 /*
2201  * Hook for controlling the panel power control backlight through the bl_power
2202  * sysfs attribute. Take care to handle multiple calls.
2203  */
2204 static void intel_edp_backlight_power(struct intel_connector *connector,
2205                                       bool enable)
2206 {
2207         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2208         bool is_enabled;
2209
2210         pps_lock(intel_dp);
2211         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2212         pps_unlock(intel_dp);
2213
2214         if (is_enabled == enable)
2215                 return;
2216
2217         DRM_DEBUG_KMS("panel power control backlight %s\n",
2218                       enable ? "enable" : "disable");
2219
2220         if (enable)
2221                 _intel_edp_backlight_on(intel_dp);
2222         else
2223                 _intel_edp_backlight_off(intel_dp);
2224 }
2225
2226 static const char *state_string(bool enabled)
2227 {
2228         return enabled ? "on" : "off";
2229 }
2230
2231 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232 {
2233         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237         I915_STATE_WARN(cur_state != state,
2238                         "DP port %c state assertion failure (expected %s, current %s)\n",
2239                         port_name(dig_port->port),
2240                         state_string(state), state_string(cur_state));
2241 }
2242 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245 {
2246         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248         I915_STATE_WARN(cur_state != state,
2249                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2250                         state_string(state), state_string(cur_state));
2251 }
2252 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2255 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2256 {
2257         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2258         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2259         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2260
2261         assert_pipe_disabled(dev_priv, crtc->pipe);
2262         assert_dp_port_disabled(intel_dp);
2263         assert_edp_pll_disabled(dev_priv);
2264
2265         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266                       crtc->config->port_clock);
2267
2268         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270         if (crtc->config->port_clock == 162000)
2271                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272         else
2273                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275         I915_WRITE(DP_A, intel_dp->DP);
2276         POSTING_READ(DP_A);
2277         udelay(500);
2278
2279         intel_dp->DP |= DP_PLL_ENABLE;
2280
2281         I915_WRITE(DP_A, intel_dp->DP);
2282         POSTING_READ(DP_A);
2283         udelay(200);
2284 }
2285
2286 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2287 {
2288         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2289         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2290         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2291
2292         assert_pipe_disabled(dev_priv, crtc->pipe);
2293         assert_dp_port_disabled(intel_dp);
2294         assert_edp_pll_enabled(dev_priv);
2295
2296         DRM_DEBUG_KMS("disabling eDP PLL\n");
2297
2298         intel_dp->DP &= ~DP_PLL_ENABLE;
2299
2300         I915_WRITE(DP_A, intel_dp->DP);
2301         POSTING_READ(DP_A);
2302         udelay(200);
2303 }
2304
2305 /* If the sink supports it, try to set the power state appropriately */
2306 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2307 {
2308         int ret, i;
2309
2310         /* Should have a valid DPCD by this point */
2311         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2312                 return;
2313
2314         if (mode != DRM_MODE_DPMS_ON) {
2315                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2316                                          DP_SET_POWER_D3);
2317         } else {
2318                 /*
2319                  * When turning on, we need to retry for 1ms to give the sink
2320                  * time to wake up.
2321                  */
2322                 for (i = 0; i < 3; i++) {
2323                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2324                                                  DP_SET_POWER_D0);
2325                         if (ret == 1)
2326                                 break;
2327                         msleep(1);
2328                 }
2329         }
2330
2331         if (ret != 1)
2332                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2333                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2334 }
2335
2336 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2337                                   enum pipe *pipe)
2338 {
2339         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2340         enum port port = dp_to_dig_port(intel_dp)->port;
2341         struct drm_device *dev = encoder->base.dev;
2342         struct drm_i915_private *dev_priv = dev->dev_private;
2343         enum intel_display_power_domain power_domain;
2344         u32 tmp;
2345
2346         power_domain = intel_display_port_power_domain(encoder);
2347         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2348                 return false;
2349
2350         tmp = I915_READ(intel_dp->output_reg);
2351
2352         if (!(tmp & DP_PORT_EN))
2353                 return false;
2354
2355         if (IS_GEN7(dev) && port == PORT_A) {
2356                 *pipe = PORT_TO_PIPE_CPT(tmp);
2357         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2358                 enum pipe p;
2359
2360                 for_each_pipe(dev_priv, p) {
2361                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2362                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2363                                 *pipe = p;
2364                                 return true;
2365                         }
2366                 }
2367
2368                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2369                               i915_mmio_reg_offset(intel_dp->output_reg));
2370         } else if (IS_CHERRYVIEW(dev)) {
2371                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2372         } else {
2373                 *pipe = PORT_TO_PIPE(tmp);
2374         }
2375
2376         return true;
2377 }
2378
2379 static void intel_dp_get_config(struct intel_encoder *encoder,
2380                                 struct intel_crtc_state *pipe_config)
2381 {
2382         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2383         u32 tmp, flags = 0;
2384         struct drm_device *dev = encoder->base.dev;
2385         struct drm_i915_private *dev_priv = dev->dev_private;
2386         enum port port = dp_to_dig_port(intel_dp)->port;
2387         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2388         int dotclock;
2389
2390         tmp = I915_READ(intel_dp->output_reg);
2391
2392         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2393
2394         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2395                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2396
2397                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2398                         flags |= DRM_MODE_FLAG_PHSYNC;
2399                 else
2400                         flags |= DRM_MODE_FLAG_NHSYNC;
2401
2402                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2403                         flags |= DRM_MODE_FLAG_PVSYNC;
2404                 else
2405                         flags |= DRM_MODE_FLAG_NVSYNC;
2406         } else {
2407                 if (tmp & DP_SYNC_HS_HIGH)
2408                         flags |= DRM_MODE_FLAG_PHSYNC;
2409                 else
2410                         flags |= DRM_MODE_FLAG_NHSYNC;
2411
2412                 if (tmp & DP_SYNC_VS_HIGH)
2413                         flags |= DRM_MODE_FLAG_PVSYNC;
2414                 else
2415                         flags |= DRM_MODE_FLAG_NVSYNC;
2416         }
2417
2418         pipe_config->base.adjusted_mode.flags |= flags;
2419
2420         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2421             tmp & DP_COLOR_RANGE_16_235)
2422                 pipe_config->limited_color_range = true;
2423
2424         pipe_config->has_dp_encoder = true;
2425
2426         pipe_config->lane_count =
2427                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2428
2429         intel_dp_get_m_n(crtc, pipe_config);
2430
2431         if (port == PORT_A) {
2432                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2433                         pipe_config->port_clock = 162000;
2434                 else
2435                         pipe_config->port_clock = 270000;
2436         }
2437
2438         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2439                                             &pipe_config->dp_m_n);
2440
2441         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2442                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2443
2444         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2445
2446         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2447             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2448                 /*
2449                  * This is a big fat ugly hack.
2450                  *
2451                  * Some machines in UEFI boot mode provide us a VBT that has 18
2452                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2453                  * unknown we fail to light up. Yet the same BIOS boots up with
2454                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2455                  * max, not what it tells us to use.
2456                  *
2457                  * Note: This will still be broken if the eDP panel is not lit
2458                  * up by the BIOS, and thus we can't get the mode at module
2459                  * load.
2460                  */
2461                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2462                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2463                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2464         }
2465 }
2466
2467 static void intel_disable_dp(struct intel_encoder *encoder)
2468 {
2469         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470         struct drm_device *dev = encoder->base.dev;
2471         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2472
2473         if (crtc->config->has_audio)
2474                 intel_audio_codec_disable(encoder);
2475
2476         if (HAS_PSR(dev) && !HAS_DDI(dev))
2477                 intel_psr_disable(intel_dp);
2478
2479         /* Make sure the panel is off before trying to change the mode. But also
2480          * ensure that we have vdd while we switch off the panel. */
2481         intel_edp_panel_vdd_on(intel_dp);
2482         intel_edp_backlight_off(intel_dp);
2483         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2484         intel_edp_panel_off(intel_dp);
2485
2486         /* disable the port before the pipe on g4x */
2487         if (INTEL_INFO(dev)->gen < 5)
2488                 intel_dp_link_down(intel_dp);
2489 }
2490
2491 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2492 {
2493         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494         enum port port = dp_to_dig_port(intel_dp)->port;
2495
2496         intel_dp_link_down(intel_dp);
2497
2498         /* Only ilk+ has port A */
2499         if (port == PORT_A)
2500                 ironlake_edp_pll_off(intel_dp);
2501 }
2502
2503 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2504 {
2505         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2506
2507         intel_dp_link_down(intel_dp);
2508 }
2509
2510 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2511                                      bool reset)
2512 {
2513         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2514         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2515         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2516         enum pipe pipe = crtc->pipe;
2517         uint32_t val;
2518
2519         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2520         if (reset)
2521                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522         else
2523                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2524         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2525
2526         if (crtc->config->lane_count > 2) {
2527                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2528                 if (reset)
2529                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2530                 else
2531                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2532                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2533         }
2534
2535         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2536         val |= CHV_PCS_REQ_SOFTRESET_EN;
2537         if (reset)
2538                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2539         else
2540                 val |= DPIO_PCS_CLK_SOFT_RESET;
2541         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2542
2543         if (crtc->config->lane_count > 2) {
2544                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2545                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2546                 if (reset)
2547                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2548                 else
2549                         val |= DPIO_PCS_CLK_SOFT_RESET;
2550                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2551         }
2552 }
2553
2554 static void chv_post_disable_dp(struct intel_encoder *encoder)
2555 {
2556         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2557         struct drm_device *dev = encoder->base.dev;
2558         struct drm_i915_private *dev_priv = dev->dev_private;
2559
2560         intel_dp_link_down(intel_dp);
2561
2562         mutex_lock(&dev_priv->sb_lock);
2563
2564         /* Assert data lane reset */
2565         chv_data_lane_soft_reset(encoder, true);
2566
2567         mutex_unlock(&dev_priv->sb_lock);
2568 }
2569
2570 static void
2571 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2572                          uint32_t *DP,
2573                          uint8_t dp_train_pat)
2574 {
2575         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2576         struct drm_device *dev = intel_dig_port->base.base.dev;
2577         struct drm_i915_private *dev_priv = dev->dev_private;
2578         enum port port = intel_dig_port->port;
2579
2580         if (HAS_DDI(dev)) {
2581                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2582
2583                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2584                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2585                 else
2586                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2587
2588                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2589                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590                 case DP_TRAINING_PATTERN_DISABLE:
2591                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2592
2593                         break;
2594                 case DP_TRAINING_PATTERN_1:
2595                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2596                         break;
2597                 case DP_TRAINING_PATTERN_2:
2598                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2599                         break;
2600                 case DP_TRAINING_PATTERN_3:
2601                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2602                         break;
2603                 }
2604                 I915_WRITE(DP_TP_CTL(port), temp);
2605
2606         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2607                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2608                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2609
2610                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2611                 case DP_TRAINING_PATTERN_DISABLE:
2612                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2613                         break;
2614                 case DP_TRAINING_PATTERN_1:
2615                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2616                         break;
2617                 case DP_TRAINING_PATTERN_2:
2618                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2619                         break;
2620                 case DP_TRAINING_PATTERN_3:
2621                         DRM_ERROR("DP training pattern 3 not supported\n");
2622                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2623                         break;
2624                 }
2625
2626         } else {
2627                 if (IS_CHERRYVIEW(dev))
2628                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2629                 else
2630                         *DP &= ~DP_LINK_TRAIN_MASK;
2631
2632                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2633                 case DP_TRAINING_PATTERN_DISABLE:
2634                         *DP |= DP_LINK_TRAIN_OFF;
2635                         break;
2636                 case DP_TRAINING_PATTERN_1:
2637                         *DP |= DP_LINK_TRAIN_PAT_1;
2638                         break;
2639                 case DP_TRAINING_PATTERN_2:
2640                         *DP |= DP_LINK_TRAIN_PAT_2;
2641                         break;
2642                 case DP_TRAINING_PATTERN_3:
2643                         if (IS_CHERRYVIEW(dev)) {
2644                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2645                         } else {
2646                                 DRM_ERROR("DP training pattern 3 not supported\n");
2647                                 *DP |= DP_LINK_TRAIN_PAT_2;
2648                         }
2649                         break;
2650                 }
2651         }
2652 }
2653
2654 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2655 {
2656         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2657         struct drm_i915_private *dev_priv = dev->dev_private;
2658         struct intel_crtc *crtc =
2659                 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2660
2661         /* enable with pattern 1 (as per spec) */
2662         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2663                                  DP_TRAINING_PATTERN_1);
2664
2665         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2666         POSTING_READ(intel_dp->output_reg);
2667
2668         /*
2669          * Magic for VLV/CHV. We _must_ first set up the register
2670          * without actually enabling the port, and then do another
2671          * write to enable the port. Otherwise link training will
2672          * fail when the power sequencer is freshly used for this port.
2673          */
2674         intel_dp->DP |= DP_PORT_EN;
2675         if (crtc->config->has_audio)
2676                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2677
2678         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2679         POSTING_READ(intel_dp->output_reg);
2680 }
2681
2682 static void intel_enable_dp(struct intel_encoder *encoder)
2683 {
2684         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2685         struct drm_device *dev = encoder->base.dev;
2686         struct drm_i915_private *dev_priv = dev->dev_private;
2687         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2688         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2689         enum port port = dp_to_dig_port(intel_dp)->port;
2690         enum pipe pipe = crtc->pipe;
2691
2692         if (WARN_ON(dp_reg & DP_PORT_EN))
2693                 return;
2694
2695         pps_lock(intel_dp);
2696
2697         if (IS_VALLEYVIEW(dev))
2698                 vlv_init_panel_power_sequencer(intel_dp);
2699
2700         /*
2701          * We get an occasional spurious underrun between the port
2702          * enable and vdd enable, when enabling port A eDP.
2703          *
2704          * FIXME: Not sure if this applies to (PCH) port D eDP as well
2705          */
2706         if (port == PORT_A)
2707                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2708
2709         intel_dp_enable_port(intel_dp);
2710
2711         if (port == PORT_A && IS_GEN5(dev_priv)) {
2712                 /*
2713                  * Underrun reporting for the other pipe was disabled in
2714                  * g4x_pre_enable_dp(). The eDP PLL and port have now been
2715                  * enabled, so it's now safe to re-enable underrun reporting.
2716                  */
2717                 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2718                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2719                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2720         }
2721
2722         edp_panel_vdd_on(intel_dp);
2723         edp_panel_on(intel_dp);
2724         edp_panel_vdd_off(intel_dp, true);
2725
2726         if (port == PORT_A)
2727                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2728
2729         pps_unlock(intel_dp);
2730
2731         if (IS_VALLEYVIEW(dev)) {
2732                 unsigned int lane_mask = 0x0;
2733
2734                 if (IS_CHERRYVIEW(dev))
2735                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2736
2737                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2738                                     lane_mask);
2739         }
2740
2741         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2742         intel_dp_start_link_train(intel_dp);
2743         intel_dp_stop_link_train(intel_dp);
2744
2745         if (crtc->config->has_audio) {
2746                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2747                                  pipe_name(pipe));
2748                 intel_audio_codec_enable(encoder);
2749         }
2750 }
2751
2752 static void g4x_enable_dp(struct intel_encoder *encoder)
2753 {
2754         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2755
2756         intel_enable_dp(encoder);
2757         intel_edp_backlight_on(intel_dp);
2758 }
2759
2760 static void vlv_enable_dp(struct intel_encoder *encoder)
2761 {
2762         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2763
2764         intel_edp_backlight_on(intel_dp);
2765         intel_psr_enable(intel_dp);
2766 }
2767
2768 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2769 {
2770         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2771         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2772         enum port port = dp_to_dig_port(intel_dp)->port;
2773         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2774
2775         intel_dp_prepare(encoder);
2776
2777         if (port == PORT_A && IS_GEN5(dev_priv)) {
2778                 /*
2779                  * We get FIFO underruns on the other pipe when
2780                  * enabling the CPU eDP PLL, and when enabling CPU
2781                  * eDP port. We could potentially avoid the PLL
2782                  * underrun with a vblank wait just prior to enabling
2783                  * the PLL, but that doesn't appear to help the port
2784                  * enable case. Just sweep it all under the rug.
2785                  */
2786                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2787                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2788         }
2789
2790         /* Only ilk+ has port A */
2791         if (port == PORT_A)
2792                 ironlake_edp_pll_on(intel_dp);
2793 }
2794
2795 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2796 {
2797         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2798         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2799         enum pipe pipe = intel_dp->pps_pipe;
2800         i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2801
2802         edp_panel_vdd_off_sync(intel_dp);
2803
2804         /*
2805          * VLV seems to get confused when multiple power seqeuencers
2806          * have the same port selected (even if only one has power/vdd
2807          * enabled). The failure manifests as vlv_wait_port_ready() failing
2808          * CHV on the other hand doesn't seem to mind having the same port
2809          * selected in multiple power seqeuencers, but let's clear the
2810          * port select always when logically disconnecting a power sequencer
2811          * from a port.
2812          */
2813         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2814                       pipe_name(pipe), port_name(intel_dig_port->port));
2815         I915_WRITE(pp_on_reg, 0);
2816         POSTING_READ(pp_on_reg);
2817
2818         intel_dp->pps_pipe = INVALID_PIPE;
2819 }
2820
2821 static void vlv_steal_power_sequencer(struct drm_device *dev,
2822                                       enum pipe pipe)
2823 {
2824         struct drm_i915_private *dev_priv = dev->dev_private;
2825         struct intel_encoder *encoder;
2826
2827         lockdep_assert_held(&dev_priv->pps_mutex);
2828
2829         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2830                 return;
2831
2832         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2833                             base.head) {
2834                 struct intel_dp *intel_dp;
2835                 enum port port;
2836
2837                 if (encoder->type != INTEL_OUTPUT_EDP)
2838                         continue;
2839
2840                 intel_dp = enc_to_intel_dp(&encoder->base);
2841                 port = dp_to_dig_port(intel_dp)->port;
2842
2843                 if (intel_dp->pps_pipe != pipe)
2844                         continue;
2845
2846                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2847                               pipe_name(pipe), port_name(port));
2848
2849                 WARN(encoder->base.crtc,
2850                      "stealing pipe %c power sequencer from active eDP port %c\n",
2851                      pipe_name(pipe), port_name(port));
2852
2853                 /* make sure vdd is off before we steal it */
2854                 vlv_detach_power_sequencer(intel_dp);
2855         }
2856 }
2857
2858 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2859 {
2860         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2861         struct intel_encoder *encoder = &intel_dig_port->base;
2862         struct drm_device *dev = encoder->base.dev;
2863         struct drm_i915_private *dev_priv = dev->dev_private;
2864         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2865
2866         lockdep_assert_held(&dev_priv->pps_mutex);
2867
2868         if (!is_edp(intel_dp))
2869                 return;
2870
2871         if (intel_dp->pps_pipe == crtc->pipe)
2872                 return;
2873
2874         /*
2875          * If another power sequencer was being used on this
2876          * port previously make sure to turn off vdd there while
2877          * we still have control of it.
2878          */
2879         if (intel_dp->pps_pipe != INVALID_PIPE)
2880                 vlv_detach_power_sequencer(intel_dp);
2881
2882         /*
2883          * We may be stealing the power
2884          * sequencer from another port.
2885          */
2886         vlv_steal_power_sequencer(dev, crtc->pipe);
2887
2888         /* now it's all ours */
2889         intel_dp->pps_pipe = crtc->pipe;
2890
2891         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2892                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2893
2894         /* init power sequencer on this pipe and port */
2895         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2896         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2897 }
2898
2899 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2900 {
2901         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2902         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2903         struct drm_device *dev = encoder->base.dev;
2904         struct drm_i915_private *dev_priv = dev->dev_private;
2905         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2906         enum dpio_channel port = vlv_dport_to_channel(dport);
2907         int pipe = intel_crtc->pipe;
2908         u32 val;
2909
2910         mutex_lock(&dev_priv->sb_lock);
2911
2912         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2913         val = 0;
2914         if (pipe)
2915                 val |= (1<<21);
2916         else
2917                 val &= ~(1<<21);
2918         val |= 0x001000c4;
2919         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2920         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2921         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2922
2923         mutex_unlock(&dev_priv->sb_lock);
2924
2925         intel_enable_dp(encoder);
2926 }
2927
2928 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2929 {
2930         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2931         struct drm_device *dev = encoder->base.dev;
2932         struct drm_i915_private *dev_priv = dev->dev_private;
2933         struct intel_crtc *intel_crtc =
2934                 to_intel_crtc(encoder->base.crtc);
2935         enum dpio_channel port = vlv_dport_to_channel(dport);
2936         int pipe = intel_crtc->pipe;
2937
2938         intel_dp_prepare(encoder);
2939
2940         /* Program Tx lane resets to default */
2941         mutex_lock(&dev_priv->sb_lock);
2942         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2943                          DPIO_PCS_TX_LANE2_RESET |
2944                          DPIO_PCS_TX_LANE1_RESET);
2945         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2946                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2947                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2948                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2949                                  DPIO_PCS_CLK_SOFT_RESET);
2950
2951         /* Fix up inter-pair skew failure */
2952         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2953         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2954         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2955         mutex_unlock(&dev_priv->sb_lock);
2956 }
2957
2958 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2959 {
2960         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2961         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2962         struct drm_device *dev = encoder->base.dev;
2963         struct drm_i915_private *dev_priv = dev->dev_private;
2964         struct intel_crtc *intel_crtc =
2965                 to_intel_crtc(encoder->base.crtc);
2966         enum dpio_channel ch = vlv_dport_to_channel(dport);
2967         int pipe = intel_crtc->pipe;
2968         int data, i, stagger;
2969         u32 val;
2970
2971         mutex_lock(&dev_priv->sb_lock);
2972
2973         /* allow hardware to manage TX FIFO reset source */
2974         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2975         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2976         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2977
2978         if (intel_crtc->config->lane_count > 2) {
2979                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2980                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2981                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2982         }
2983
2984         /* Program Tx lane latency optimal setting*/
2985         for (i = 0; i < intel_crtc->config->lane_count; i++) {
2986                 /* Set the upar bit */
2987                 if (intel_crtc->config->lane_count == 1)
2988                         data = 0x0;
2989                 else
2990                         data = (i == 1) ? 0x0 : 0x1;
2991                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2992                                 data << DPIO_UPAR_SHIFT);
2993         }
2994
2995         /* Data lane stagger programming */
2996         if (intel_crtc->config->port_clock > 270000)
2997                 stagger = 0x18;
2998         else if (intel_crtc->config->port_clock > 135000)
2999                 stagger = 0xd;
3000         else if (intel_crtc->config->port_clock > 67500)
3001                 stagger = 0x7;
3002         else if (intel_crtc->config->port_clock > 33750)
3003                 stagger = 0x4;
3004         else
3005                 stagger = 0x2;
3006
3007         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3008         val |= DPIO_TX2_STAGGER_MASK(0x1f);
3009         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3010
3011         if (intel_crtc->config->lane_count > 2) {
3012                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3013                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3014                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3015         }
3016
3017         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3018                        DPIO_LANESTAGGER_STRAP(stagger) |
3019                        DPIO_LANESTAGGER_STRAP_OVRD |
3020                        DPIO_TX1_STAGGER_MASK(0x1f) |
3021                        DPIO_TX1_STAGGER_MULT(6) |
3022                        DPIO_TX2_STAGGER_MULT(0));
3023
3024         if (intel_crtc->config->lane_count > 2) {
3025                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3026                                DPIO_LANESTAGGER_STRAP(stagger) |
3027                                DPIO_LANESTAGGER_STRAP_OVRD |
3028                                DPIO_TX1_STAGGER_MASK(0x1f) |
3029                                DPIO_TX1_STAGGER_MULT(7) |
3030                                DPIO_TX2_STAGGER_MULT(5));
3031         }
3032
3033         /* Deassert data lane reset */
3034         chv_data_lane_soft_reset(encoder, false);
3035
3036         mutex_unlock(&dev_priv->sb_lock);
3037
3038         intel_enable_dp(encoder);
3039
3040         /* Second common lane will stay alive on its own now */
3041         if (dport->release_cl2_override) {
3042                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3043                 dport->release_cl2_override = false;
3044         }
3045 }
3046
3047 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3048 {
3049         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3050         struct drm_device *dev = encoder->base.dev;
3051         struct drm_i915_private *dev_priv = dev->dev_private;
3052         struct intel_crtc *intel_crtc =
3053                 to_intel_crtc(encoder->base.crtc);
3054         enum dpio_channel ch = vlv_dport_to_channel(dport);
3055         enum pipe pipe = intel_crtc->pipe;
3056         unsigned int lane_mask =
3057                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3058         u32 val;
3059
3060         intel_dp_prepare(encoder);
3061
3062         /*
3063          * Must trick the second common lane into life.
3064          * Otherwise we can't even access the PLL.
3065          */
3066         if (ch == DPIO_CH0 && pipe == PIPE_B)
3067                 dport->release_cl2_override =
3068                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3069
3070         chv_phy_powergate_lanes(encoder, true, lane_mask);
3071
3072         mutex_lock(&dev_priv->sb_lock);
3073
3074         /* Assert data lane reset */
3075         chv_data_lane_soft_reset(encoder, true);
3076
3077         /* program left/right clock distribution */
3078         if (pipe != PIPE_B) {
3079                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3080                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3081                 if (ch == DPIO_CH0)
3082                         val |= CHV_BUFLEFTENA1_FORCE;
3083                 if (ch == DPIO_CH1)
3084                         val |= CHV_BUFRIGHTENA1_FORCE;
3085                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3086         } else {
3087                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3088                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3089                 if (ch == DPIO_CH0)
3090                         val |= CHV_BUFLEFTENA2_FORCE;
3091                 if (ch == DPIO_CH1)
3092                         val |= CHV_BUFRIGHTENA2_FORCE;
3093                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3094         }
3095
3096         /* program clock channel usage */
3097         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3098         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3099         if (pipe != PIPE_B)
3100                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3101         else
3102                 val |= CHV_PCS_USEDCLKCHANNEL;
3103         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3104
3105         if (intel_crtc->config->lane_count > 2) {
3106                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3107                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3108                 if (pipe != PIPE_B)
3109                         val &= ~CHV_PCS_USEDCLKCHANNEL;
3110                 else
3111                         val |= CHV_PCS_USEDCLKCHANNEL;
3112                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3113         }
3114
3115         /*
3116          * This a a bit weird since generally CL
3117          * matches the pipe, but here we need to
3118          * pick the CL based on the port.
3119          */
3120         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3121         if (pipe != PIPE_B)
3122                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3123         else
3124                 val |= CHV_CMN_USEDCLKCHANNEL;
3125         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3126
3127         mutex_unlock(&dev_priv->sb_lock);
3128 }
3129
3130 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3131 {
3132         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3133         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3134         u32 val;
3135
3136         mutex_lock(&dev_priv->sb_lock);
3137
3138         /* disable left/right clock distribution */
3139         if (pipe != PIPE_B) {
3140                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3141                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3142                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3143         } else {
3144                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3145                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3146                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3147         }
3148
3149         mutex_unlock(&dev_priv->sb_lock);
3150
3151         /*
3152          * Leave the power down bit cleared for at least one
3153          * lane so that chv_powergate_phy_ch() will power
3154          * on something when the channel is otherwise unused.
3155          * When the port is off and the override is removed
3156          * the lanes power down anyway, so otherwise it doesn't
3157          * really matter what the state of power down bits is
3158          * after this.
3159          */
3160         chv_phy_powergate_lanes(encoder, false, 0x0);
3161 }
3162
3163 /*
3164  * Native read with retry for link status and receiver capability reads for
3165  * cases where the sink may still be asleep.
3166  *
3167  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3168  * supposed to retry 3 times per the spec.
3169  */
3170 static ssize_t
3171 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3172                         void *buffer, size_t size)
3173 {
3174         ssize_t ret;
3175         int i;
3176
3177         /*
3178          * Sometime we just get the same incorrect byte repeated
3179          * over the entire buffer. Doing just one throw away read
3180          * initially seems to "solve" it.
3181          */
3182         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3183
3184         for (i = 0; i < 3; i++) {
3185                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3186                 if (ret == size)
3187                         return ret;
3188                 msleep(1);
3189         }
3190
3191         return ret;
3192 }
3193
3194 /*
3195  * Fetch AUX CH registers 0x202 - 0x207 which contain
3196  * link status information
3197  */
3198 bool
3199 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3200 {
3201         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3202                                        DP_LANE0_1_STATUS,
3203                                        link_status,
3204                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3205 }
3206
3207 /* These are source-specific values. */
3208 uint8_t
3209 intel_dp_voltage_max(struct intel_dp *intel_dp)
3210 {
3211         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3212         struct drm_i915_private *dev_priv = dev->dev_private;
3213         enum port port = dp_to_dig_port(intel_dp)->port;
3214
3215         if (IS_BROXTON(dev))
3216                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3217         else if (INTEL_INFO(dev)->gen >= 9) {
3218                 if (dev_priv->edp_low_vswing && port == PORT_A)
3219                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3220                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3221         } else if (IS_VALLEYVIEW(dev))
3222                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3223         else if (IS_GEN7(dev) && port == PORT_A)
3224                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3225         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3226                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3227         else
3228                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3229 }
3230
3231 uint8_t
3232 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3233 {
3234         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3235         enum port port = dp_to_dig_port(intel_dp)->port;
3236
3237         if (INTEL_INFO(dev)->gen >= 9) {
3238                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3239                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3240                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3241                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3242                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3243                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3244                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3245                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3246                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3247                 default:
3248                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3249                 }
3250         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3251                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3252                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3253                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3254                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3255                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3258                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3259                 default:
3260                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3261                 }
3262         } else if (IS_VALLEYVIEW(dev)) {
3263                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3266                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3268                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3269                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3270                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3271                 default:
3272                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3273                 }
3274         } else if (IS_GEN7(dev) && port == PORT_A) {
3275                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3276                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3278                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3281                 default:
3282                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3283                 }
3284         } else {
3285                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3286                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3287                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3288                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3289                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3290                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3291                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3292                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3293                 default:
3294                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3295                 }
3296         }
3297 }
3298
3299 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3300 {
3301         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3302         struct drm_i915_private *dev_priv = dev->dev_private;
3303         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3304         struct intel_crtc *intel_crtc =
3305                 to_intel_crtc(dport->base.base.crtc);
3306         unsigned long demph_reg_value, preemph_reg_value,
3307                 uniqtranscale_reg_value;
3308         uint8_t train_set = intel_dp->train_set[0];
3309         enum dpio_channel port = vlv_dport_to_channel(dport);
3310         int pipe = intel_crtc->pipe;
3311
3312         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3313         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3314                 preemph_reg_value = 0x0004000;
3315                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3316                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3317                         demph_reg_value = 0x2B405555;
3318                         uniqtranscale_reg_value = 0x552AB83A;
3319                         break;
3320                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3321                         demph_reg_value = 0x2B404040;
3322                         uniqtranscale_reg_value = 0x5548B83A;
3323                         break;
3324                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3325                         demph_reg_value = 0x2B245555;
3326                         uniqtranscale_reg_value = 0x5560B83A;
3327                         break;
3328                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3329                         demph_reg_value = 0x2B405555;
3330                         uniqtranscale_reg_value = 0x5598DA3A;
3331                         break;
3332                 default:
3333                         return 0;
3334                 }
3335                 break;
3336         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3337                 preemph_reg_value = 0x0002000;
3338                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340                         demph_reg_value = 0x2B404040;
3341                         uniqtranscale_reg_value = 0x5552B83A;
3342                         break;
3343                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344                         demph_reg_value = 0x2B404848;
3345                         uniqtranscale_reg_value = 0x5580B83A;
3346                         break;
3347                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3348                         demph_reg_value = 0x2B404040;
3349                         uniqtranscale_reg_value = 0x55ADDA3A;
3350                         break;
3351                 default:
3352                         return 0;
3353                 }
3354                 break;
3355         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3356                 preemph_reg_value = 0x0000000;
3357                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3358                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3359                         demph_reg_value = 0x2B305555;
3360                         uniqtranscale_reg_value = 0x5570B83A;
3361                         break;
3362                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3363                         demph_reg_value = 0x2B2B4040;
3364                         uniqtranscale_reg_value = 0x55ADDA3A;
3365                         break;
3366                 default:
3367                         return 0;
3368                 }
3369                 break;
3370         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3371                 preemph_reg_value = 0x0006000;
3372                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3373                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3374                         demph_reg_value = 0x1B405555;
3375                         uniqtranscale_reg_value = 0x55ADDA3A;
3376                         break;
3377                 default:
3378                         return 0;
3379                 }
3380                 break;
3381         default:
3382                 return 0;
3383         }
3384
3385         mutex_lock(&dev_priv->sb_lock);
3386         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3387         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3388         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3389                          uniqtranscale_reg_value);
3390         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3391         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3392         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3393         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3394         mutex_unlock(&dev_priv->sb_lock);
3395
3396         return 0;
3397 }
3398
3399 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3400 {
3401         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3402                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3403 }
3404
3405 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3406 {
3407         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3408         struct drm_i915_private *dev_priv = dev->dev_private;
3409         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3410         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3411         u32 deemph_reg_value, margin_reg_value, val;
3412         uint8_t train_set = intel_dp->train_set[0];
3413         enum dpio_channel ch = vlv_dport_to_channel(dport);
3414         enum pipe pipe = intel_crtc->pipe;
3415         int i;
3416
3417         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3418         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3419                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3420                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3421                         deemph_reg_value = 128;
3422                         margin_reg_value = 52;
3423                         break;
3424                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3425                         deemph_reg_value = 128;
3426                         margin_reg_value = 77;
3427                         break;
3428                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3429                         deemph_reg_value = 128;
3430                         margin_reg_value = 102;
3431                         break;
3432                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3433                         deemph_reg_value = 128;
3434                         margin_reg_value = 154;
3435                         /* FIXME extra to set for 1200 */
3436                         break;
3437                 default:
3438                         return 0;
3439                 }
3440                 break;
3441         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3442                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3443                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3444                         deemph_reg_value = 85;
3445                         margin_reg_value = 78;
3446                         break;
3447                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3448                         deemph_reg_value = 85;
3449                         margin_reg_value = 116;
3450                         break;
3451                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3452                         deemph_reg_value = 85;
3453                         margin_reg_value = 154;
3454                         break;
3455                 default:
3456                         return 0;
3457                 }
3458                 break;
3459         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3460                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3461                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3462                         deemph_reg_value = 64;
3463                         margin_reg_value = 104;
3464                         break;
3465                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3466                         deemph_reg_value = 64;
3467                         margin_reg_value = 154;
3468                         break;
3469                 default:
3470                         return 0;
3471                 }
3472                 break;
3473         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3474                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3475                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3476                         deemph_reg_value = 43;
3477                         margin_reg_value = 154;
3478                         break;
3479                 default:
3480                         return 0;
3481                 }
3482                 break;
3483         default:
3484                 return 0;
3485         }
3486
3487         mutex_lock(&dev_priv->sb_lock);
3488
3489         /* Clear calc init */
3490         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3491         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3492         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3493         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3494         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3495
3496         if (intel_crtc->config->lane_count > 2) {
3497                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3498                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3499                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3500                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3501                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3502         }
3503
3504         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3505         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3506         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3507         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3508
3509         if (intel_crtc->config->lane_count > 2) {
3510                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3511                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3512                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3513                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3514         }
3515
3516         /* Program swing deemph */
3517         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3518                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3519                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3520                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3521                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3522         }
3523
3524         /* Program swing margin */
3525         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3526                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3527
3528                 val &= ~DPIO_SWING_MARGIN000_MASK;
3529                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3530
3531                 /*
3532                  * Supposedly this value shouldn't matter when unique transition
3533                  * scale is disabled, but in fact it does matter. Let's just
3534                  * always program the same value and hope it's OK.
3535                  */
3536                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3537                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3538
3539                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3540         }
3541
3542         /*
3543          * The document said it needs to set bit 27 for ch0 and bit 26
3544          * for ch1. Might be a typo in the doc.
3545          * For now, for this unique transition scale selection, set bit
3546          * 27 for ch0 and ch1.
3547          */
3548         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3549                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3550                 if (chv_need_uniq_trans_scale(train_set))
3551                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3552                 else
3553                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3554                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3555         }
3556
3557         /* Start swing calculation */
3558         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3559         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3560         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3561
3562         if (intel_crtc->config->lane_count > 2) {
3563                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3564                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3565                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3566         }
3567
3568         mutex_unlock(&dev_priv->sb_lock);
3569
3570         return 0;
3571 }
3572
3573 static uint32_t
3574 gen4_signal_levels(uint8_t train_set)
3575 {
3576         uint32_t        signal_levels = 0;
3577
3578         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3579         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3580         default:
3581                 signal_levels |= DP_VOLTAGE_0_4;
3582                 break;
3583         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3584                 signal_levels |= DP_VOLTAGE_0_6;
3585                 break;
3586         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3587                 signal_levels |= DP_VOLTAGE_0_8;
3588                 break;
3589         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3590                 signal_levels |= DP_VOLTAGE_1_2;
3591                 break;
3592         }
3593         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3594         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3595         default:
3596                 signal_levels |= DP_PRE_EMPHASIS_0;
3597                 break;
3598         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3599                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3600                 break;
3601         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3602                 signal_levels |= DP_PRE_EMPHASIS_6;
3603                 break;
3604         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3605                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3606                 break;
3607         }
3608         return signal_levels;
3609 }
3610
3611 /* Gen6's DP voltage swing and pre-emphasis control */
3612 static uint32_t
3613 gen6_edp_signal_levels(uint8_t train_set)
3614 {
3615         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3616                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3617         switch (signal_levels) {
3618         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3619         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3620                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3621         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3622                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3623         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3624         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3625                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3626         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3627         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3628                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3629         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3630         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3631                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3632         default:
3633                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3634                               "0x%x\n", signal_levels);
3635                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3636         }
3637 }
3638
3639 /* Gen7's DP voltage swing and pre-emphasis control */
3640 static uint32_t
3641 gen7_edp_signal_levels(uint8_t train_set)
3642 {
3643         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3644                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3645         switch (signal_levels) {
3646         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3648         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3649                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3650         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3651                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3652
3653         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3654                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3655         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3656                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3657
3658         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3659                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3660         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3661                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3662
3663         default:
3664                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3665                               "0x%x\n", signal_levels);
3666                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3667         }
3668 }
3669
3670 void
3671 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3672 {
3673         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3674         enum port port = intel_dig_port->port;
3675         struct drm_device *dev = intel_dig_port->base.base.dev;
3676         struct drm_i915_private *dev_priv = to_i915(dev);
3677         uint32_t signal_levels, mask = 0;
3678         uint8_t train_set = intel_dp->train_set[0];
3679
3680         if (HAS_DDI(dev)) {
3681                 signal_levels = ddi_signal_levels(intel_dp);
3682
3683                 if (IS_BROXTON(dev))
3684                         signal_levels = 0;
3685                 else
3686                         mask = DDI_BUF_EMP_MASK;
3687         } else if (IS_CHERRYVIEW(dev)) {
3688                 signal_levels = chv_signal_levels(intel_dp);
3689         } else if (IS_VALLEYVIEW(dev)) {
3690                 signal_levels = vlv_signal_levels(intel_dp);
3691         } else if (IS_GEN7(dev) && port == PORT_A) {
3692                 signal_levels = gen7_edp_signal_levels(train_set);
3693                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3694         } else if (IS_GEN6(dev) && port == PORT_A) {
3695                 signal_levels = gen6_edp_signal_levels(train_set);
3696                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3697         } else {
3698                 signal_levels = gen4_signal_levels(train_set);
3699                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3700         }
3701
3702         if (mask)
3703                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3704
3705         DRM_DEBUG_KMS("Using vswing level %d\n",
3706                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3707         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3708                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3709                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3710
3711         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3712
3713         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3714         POSTING_READ(intel_dp->output_reg);
3715 }
3716
3717 void
3718 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3719                                        uint8_t dp_train_pat)
3720 {
3721         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3722         struct drm_i915_private *dev_priv =
3723                 to_i915(intel_dig_port->base.base.dev);
3724
3725         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3726
3727         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3728         POSTING_READ(intel_dp->output_reg);
3729 }
3730
3731 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3732 {
3733         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3734         struct drm_device *dev = intel_dig_port->base.base.dev;
3735         struct drm_i915_private *dev_priv = dev->dev_private;
3736         enum port port = intel_dig_port->port;
3737         uint32_t val;
3738
3739         if (!HAS_DDI(dev))
3740                 return;
3741
3742         val = I915_READ(DP_TP_CTL(port));
3743         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3744         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3745         I915_WRITE(DP_TP_CTL(port), val);
3746
3747         /*
3748          * On PORT_A we can have only eDP in SST mode. There the only reason
3749          * we need to set idle transmission mode is to work around a HW issue
3750          * where we enable the pipe while not in idle link-training mode.
3751          * In this case there is requirement to wait for a minimum number of
3752          * idle patterns to be sent.
3753          */
3754         if (port == PORT_A)
3755                 return;
3756
3757         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3758                      1))
3759                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3760 }
3761
3762 static void
3763 intel_dp_link_down(struct intel_dp *intel_dp)
3764 {
3765         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3766         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3767         enum port port = intel_dig_port->port;
3768         struct drm_device *dev = intel_dig_port->base.base.dev;
3769         struct drm_i915_private *dev_priv = dev->dev_private;
3770         uint32_t DP = intel_dp->DP;
3771
3772         if (WARN_ON(HAS_DDI(dev)))
3773                 return;
3774
3775         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3776                 return;
3777
3778         DRM_DEBUG_KMS("\n");
3779
3780         if ((IS_GEN7(dev) && port == PORT_A) ||
3781             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3782                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3783                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3784         } else {
3785                 if (IS_CHERRYVIEW(dev))
3786                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3787                 else
3788                         DP &= ~DP_LINK_TRAIN_MASK;
3789                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3790         }
3791         I915_WRITE(intel_dp->output_reg, DP);
3792         POSTING_READ(intel_dp->output_reg);
3793
3794         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3795         I915_WRITE(intel_dp->output_reg, DP);
3796         POSTING_READ(intel_dp->output_reg);
3797
3798         /*
3799          * HW workaround for IBX, we need to move the port
3800          * to transcoder A after disabling it to allow the
3801          * matching HDMI port to be enabled on transcoder A.
3802          */
3803         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3804                 /*
3805                  * We get CPU/PCH FIFO underruns on the other pipe when
3806                  * doing the workaround. Sweep them under the rug.
3807                  */
3808                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3809                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3810
3811                 /* always enable with pattern 1 (as per spec) */
3812                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3813                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3814                 I915_WRITE(intel_dp->output_reg, DP);
3815                 POSTING_READ(intel_dp->output_reg);
3816
3817                 DP &= ~DP_PORT_EN;
3818                 I915_WRITE(intel_dp->output_reg, DP);
3819                 POSTING_READ(intel_dp->output_reg);
3820
3821                 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3822                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3823                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3824         }
3825
3826         msleep(intel_dp->panel_power_down_delay);
3827
3828         intel_dp->DP = DP;
3829 }
3830
3831 static bool
3832 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3833 {
3834         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3835         struct drm_device *dev = dig_port->base.base.dev;
3836         struct drm_i915_private *dev_priv = dev->dev_private;
3837         uint8_t rev;
3838
3839         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3840                                     sizeof(intel_dp->dpcd)) < 0)
3841                 return false; /* aux transfer failed */
3842
3843         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3844
3845         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3846                 return false; /* DPCD not present */
3847
3848         /* Check if the panel supports PSR */
3849         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3850         if (is_edp(intel_dp)) {
3851                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3852                                         intel_dp->psr_dpcd,
3853                                         sizeof(intel_dp->psr_dpcd));
3854                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3855                         dev_priv->psr.sink_support = true;
3856                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3857                 }
3858
3859                 if (INTEL_INFO(dev)->gen >= 9 &&
3860                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3861                         uint8_t frame_sync_cap;
3862
3863                         dev_priv->psr.sink_support = true;
3864                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3865                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3866                                         &frame_sync_cap, 1);
3867                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3868                         /* PSR2 needs frame sync as well */
3869                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3870                         DRM_DEBUG_KMS("PSR2 %s on sink",
3871                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3872                 }
3873         }
3874
3875         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3876                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
3877                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3878
3879         /* Intermediate frequency support */
3880         if (is_edp(intel_dp) &&
3881             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3882             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3883             (rev >= 0x03)) { /* eDp v1.4 or higher */
3884                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3885                 int i;
3886
3887                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3888                                 DP_SUPPORTED_LINK_RATES,
3889                                 sink_rates,
3890                                 sizeof(sink_rates));
3891
3892                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3893                         int val = le16_to_cpu(sink_rates[i]);
3894
3895                         if (val == 0)
3896                                 break;
3897
3898                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3899                         intel_dp->sink_rates[i] = (val * 200) / 10;
3900                 }
3901                 intel_dp->num_sink_rates = i;
3902         }
3903
3904         intel_dp_print_rates(intel_dp);
3905
3906         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3907               DP_DWN_STRM_PORT_PRESENT))
3908                 return true; /* native DP sink */
3909
3910         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3911                 return true; /* no per-port downstream info */
3912
3913         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3914                                     intel_dp->downstream_ports,
3915                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3916                 return false; /* downstream port status fetch failed */
3917
3918         return true;
3919 }
3920
3921 static void
3922 intel_dp_probe_oui(struct intel_dp *intel_dp)
3923 {
3924         u8 buf[3];
3925
3926         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3927                 return;
3928
3929         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3930                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3931                               buf[0], buf[1], buf[2]);
3932
3933         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3934                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3935                               buf[0], buf[1], buf[2]);
3936 }
3937
3938 static bool
3939 intel_dp_probe_mst(struct intel_dp *intel_dp)
3940 {
3941         u8 buf[1];
3942
3943         if (!intel_dp->can_mst)
3944                 return false;
3945
3946         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3947                 return false;
3948
3949         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3950                 if (buf[0] & DP_MST_CAP) {
3951                         DRM_DEBUG_KMS("Sink is MST capable\n");
3952                         intel_dp->is_mst = true;
3953                 } else {
3954                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3955                         intel_dp->is_mst = false;
3956                 }
3957         }
3958
3959         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3960         return intel_dp->is_mst;
3961 }
3962
3963 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3964 {
3965         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3966         struct drm_device *dev = dig_port->base.base.dev;
3967         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3968         u8 buf;
3969         int ret = 0;
3970         int count = 0;
3971         int attempts = 10;
3972
3973         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3974                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3975                 ret = -EIO;
3976                 goto out;
3977         }
3978
3979         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3980                                buf & ~DP_TEST_SINK_START) < 0) {
3981                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3982                 ret = -EIO;
3983                 goto out;
3984         }
3985
3986         do {
3987                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3988
3989                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3990                                       DP_TEST_SINK_MISC, &buf) < 0) {
3991                         ret = -EIO;
3992                         goto out;
3993                 }
3994                 count = buf & DP_TEST_COUNT_MASK;
3995         } while (--attempts && count);
3996
3997         if (attempts == 0) {
3998                 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3999                 ret = -ETIMEDOUT;
4000         }
4001
4002  out:
4003         hsw_enable_ips(intel_crtc);
4004         return ret;
4005 }
4006
4007 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4008 {
4009         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4010         struct drm_device *dev = dig_port->base.base.dev;
4011         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4012         u8 buf;
4013         int ret;
4014
4015         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4016                 return -EIO;
4017
4018         if (!(buf & DP_TEST_CRC_SUPPORTED))
4019                 return -ENOTTY;
4020
4021         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4022                 return -EIO;
4023
4024         if (buf & DP_TEST_SINK_START) {
4025                 ret = intel_dp_sink_crc_stop(intel_dp);
4026                 if (ret)
4027                         return ret;
4028         }
4029
4030         hsw_disable_ips(intel_crtc);
4031
4032         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4033                                buf | DP_TEST_SINK_START) < 0) {
4034                 hsw_enable_ips(intel_crtc);
4035                 return -EIO;
4036         }
4037
4038         intel_wait_for_vblank(dev, intel_crtc->pipe);
4039         return 0;
4040 }
4041
4042 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4043 {
4044         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4045         struct drm_device *dev = dig_port->base.base.dev;
4046         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4047         u8 buf;
4048         int count, ret;
4049         int attempts = 6;
4050
4051         ret = intel_dp_sink_crc_start(intel_dp);
4052         if (ret)
4053                 return ret;
4054
4055         do {
4056                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4057
4058                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4059                                       DP_TEST_SINK_MISC, &buf) < 0) {
4060                         ret = -EIO;
4061                         goto stop;
4062                 }
4063                 count = buf & DP_TEST_COUNT_MASK;
4064
4065         } while (--attempts && count == 0);
4066
4067         if (attempts == 0) {
4068                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4069                 ret = -ETIMEDOUT;
4070                 goto stop;
4071         }
4072
4073         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4074                 ret = -EIO;
4075                 goto stop;
4076         }
4077
4078 stop:
4079         intel_dp_sink_crc_stop(intel_dp);
4080         return ret;
4081 }
4082
4083 static bool
4084 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4085 {
4086         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4087                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4088                                        sink_irq_vector, 1) == 1;
4089 }
4090
4091 static bool
4092 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4093 {
4094         int ret;
4095
4096         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4097                                              DP_SINK_COUNT_ESI,
4098                                              sink_irq_vector, 14);
4099         if (ret != 14)
4100                 return false;
4101
4102         return true;
4103 }
4104
4105 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4106 {
4107         uint8_t test_result = DP_TEST_ACK;
4108         return test_result;
4109 }
4110
4111 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4112 {
4113         uint8_t test_result = DP_TEST_NAK;
4114         return test_result;
4115 }
4116
4117 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4118 {
4119         uint8_t test_result = DP_TEST_NAK;
4120         struct intel_connector *intel_connector = intel_dp->attached_connector;
4121         struct drm_connector *connector = &intel_connector->base;
4122
4123         if (intel_connector->detect_edid == NULL ||
4124             connector->edid_corrupt ||
4125             intel_dp->aux.i2c_defer_count > 6) {
4126                 /* Check EDID read for NACKs, DEFERs and corruption
4127                  * (DP CTS 1.2 Core r1.1)
4128                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4129                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4130                  *    4.2.2.6 : EDID corruption detected
4131                  * Use failsafe mode for all cases
4132                  */
4133                 if (intel_dp->aux.i2c_nack_count > 0 ||
4134                         intel_dp->aux.i2c_defer_count > 0)
4135                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4136                                       intel_dp->aux.i2c_nack_count,
4137                                       intel_dp->aux.i2c_defer_count);
4138                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4139         } else {
4140                 struct edid *block = intel_connector->detect_edid;
4141
4142                 /* We have to write the checksum
4143                  * of the last block read
4144                  */
4145                 block += intel_connector->detect_edid->extensions;
4146
4147                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4148                                         DP_TEST_EDID_CHECKSUM,
4149                                         &block->checksum,
4150                                         1))
4151                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4152
4153                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4154                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4155         }
4156
4157         /* Set test active flag here so userspace doesn't interrupt things */
4158         intel_dp->compliance_test_active = 1;
4159
4160         return test_result;
4161 }
4162
4163 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4164 {
4165         uint8_t test_result = DP_TEST_NAK;
4166         return test_result;
4167 }
4168
4169 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4170 {
4171         uint8_t response = DP_TEST_NAK;
4172         uint8_t rxdata = 0;
4173         int status = 0;
4174
4175         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4176         if (status <= 0) {
4177                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4178                 goto update_status;
4179         }
4180
4181         switch (rxdata) {
4182         case DP_TEST_LINK_TRAINING:
4183                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4184                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4185                 response = intel_dp_autotest_link_training(intel_dp);
4186                 break;
4187         case DP_TEST_LINK_VIDEO_PATTERN:
4188                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4189                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4190                 response = intel_dp_autotest_video_pattern(intel_dp);
4191                 break;
4192         case DP_TEST_LINK_EDID_READ:
4193                 DRM_DEBUG_KMS("EDID test requested\n");
4194                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4195                 response = intel_dp_autotest_edid(intel_dp);
4196                 break;
4197         case DP_TEST_LINK_PHY_TEST_PATTERN:
4198                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4199                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4200                 response = intel_dp_autotest_phy_pattern(intel_dp);
4201                 break;
4202         default:
4203                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4204                 break;
4205         }
4206
4207 update_status:
4208         status = drm_dp_dpcd_write(&intel_dp->aux,
4209                                    DP_TEST_RESPONSE,
4210                                    &response, 1);
4211         if (status <= 0)
4212                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4213 }
4214
4215 static int
4216 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4217 {
4218         bool bret;
4219
4220         if (intel_dp->is_mst) {
4221                 u8 esi[16] = { 0 };
4222                 int ret = 0;
4223                 int retry;
4224                 bool handled;
4225                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4226 go_again:
4227                 if (bret == true) {
4228
4229                         /* check link status - esi[10] = 0x200c */
4230                         if (intel_dp->active_mst_links &&
4231                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4232                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4233                                 intel_dp_start_link_train(intel_dp);
4234                                 intel_dp_stop_link_train(intel_dp);
4235                         }
4236
4237                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4238                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4239
4240                         if (handled) {
4241                                 for (retry = 0; retry < 3; retry++) {
4242                                         int wret;
4243                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4244                                                                  DP_SINK_COUNT_ESI+1,
4245                                                                  &esi[1], 3);
4246                                         if (wret == 3) {
4247                                                 break;
4248                                         }
4249                                 }
4250
4251                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252                                 if (bret == true) {
4253                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4254                                         goto go_again;
4255                                 }
4256                         } else
4257                                 ret = 0;
4258
4259                         return ret;
4260                 } else {
4261                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4262                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4263                         intel_dp->is_mst = false;
4264                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4265                         /* send a hotplug event */
4266                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4267                 }
4268         }
4269         return -EINVAL;
4270 }
4271
4272 /*
4273  * According to DP spec
4274  * 5.1.2:
4275  *  1. Read DPCD
4276  *  2. Configure link according to Receiver Capabilities
4277  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4278  *  4. Check link status on receipt of hot-plug interrupt
4279  */
4280 static void
4281 intel_dp_check_link_status(struct intel_dp *intel_dp)
4282 {
4283         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4284         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4285         u8 sink_irq_vector;
4286         u8 link_status[DP_LINK_STATUS_SIZE];
4287
4288         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4289
4290         /*
4291          * Clearing compliance test variables to allow capturing
4292          * of values for next automated test request.
4293          */
4294         intel_dp->compliance_test_active = 0;
4295         intel_dp->compliance_test_type = 0;
4296         intel_dp->compliance_test_data = 0;
4297
4298         if (!intel_encoder->base.crtc)
4299                 return;
4300
4301         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4302                 return;
4303
4304         /* Try to read receiver status if the link appears to be up */
4305         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4306                 return;
4307         }
4308
4309         /* Now read the DPCD to see if it's actually running */
4310         if (!intel_dp_get_dpcd(intel_dp)) {
4311                 return;
4312         }
4313
4314         /* Try to read the source of the interrupt */
4315         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4316             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4317                 /* Clear interrupt source */
4318                 drm_dp_dpcd_writeb(&intel_dp->aux,
4319                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4320                                    sink_irq_vector);
4321
4322                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4323                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4324                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4325                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4326         }
4327
4328         /* if link training is requested we should perform it always */
4329         if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4330                 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4331                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4332                               intel_encoder->base.name);
4333                 intel_dp_start_link_train(intel_dp);
4334                 intel_dp_stop_link_train(intel_dp);
4335         }
4336 }
4337
4338 /* XXX this is probably wrong for multiple downstream ports */
4339 static enum drm_connector_status
4340 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4341 {
4342         uint8_t *dpcd = intel_dp->dpcd;
4343         uint8_t type;
4344
4345         if (!intel_dp_get_dpcd(intel_dp))
4346                 return connector_status_disconnected;
4347
4348         /* if there's no downstream port, we're done */
4349         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4350                 return connector_status_connected;
4351
4352         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4353         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4354             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4355                 uint8_t reg;
4356
4357                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4358                                             &reg, 1) < 0)
4359                         return connector_status_unknown;
4360
4361                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4362                                               : connector_status_disconnected;
4363         }
4364
4365         /* If no HPD, poke DDC gently */
4366         if (drm_probe_ddc(&intel_dp->aux.ddc))
4367                 return connector_status_connected;
4368
4369         /* Well we tried, say unknown for unreliable port types */
4370         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4371                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4372                 if (type == DP_DS_PORT_TYPE_VGA ||
4373                     type == DP_DS_PORT_TYPE_NON_EDID)
4374                         return connector_status_unknown;
4375         } else {
4376                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4377                         DP_DWN_STRM_PORT_TYPE_MASK;
4378                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4379                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4380                         return connector_status_unknown;
4381         }
4382
4383         /* Anything else is out of spec, warn and ignore */
4384         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4385         return connector_status_disconnected;
4386 }
4387
4388 static enum drm_connector_status
4389 edp_detect(struct intel_dp *intel_dp)
4390 {
4391         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4392         enum drm_connector_status status;
4393
4394         status = intel_panel_detect(dev);
4395         if (status == connector_status_unknown)
4396                 status = connector_status_connected;
4397
4398         return status;
4399 }
4400
4401 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4402                                        struct intel_digital_port *port)
4403 {
4404         u32 bit;
4405
4406         switch (port->port) {
4407         case PORT_A:
4408                 return true;
4409         case PORT_B:
4410                 bit = SDE_PORTB_HOTPLUG;
4411                 break;
4412         case PORT_C:
4413                 bit = SDE_PORTC_HOTPLUG;
4414                 break;
4415         case PORT_D:
4416                 bit = SDE_PORTD_HOTPLUG;
4417                 break;
4418         default:
4419                 MISSING_CASE(port->port);
4420                 return false;
4421         }
4422
4423         return I915_READ(SDEISR) & bit;
4424 }
4425
4426 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4427                                        struct intel_digital_port *port)
4428 {
4429         u32 bit;
4430
4431         switch (port->port) {
4432         case PORT_A:
4433                 return true;
4434         case PORT_B:
4435                 bit = SDE_PORTB_HOTPLUG_CPT;
4436                 break;
4437         case PORT_C:
4438                 bit = SDE_PORTC_HOTPLUG_CPT;
4439                 break;
4440         case PORT_D:
4441                 bit = SDE_PORTD_HOTPLUG_CPT;
4442                 break;
4443         case PORT_E:
4444                 bit = SDE_PORTE_HOTPLUG_SPT;
4445                 break;
4446         default:
4447                 MISSING_CASE(port->port);
4448                 return false;
4449         }
4450
4451         return I915_READ(SDEISR) & bit;
4452 }
4453
4454 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4455                                        struct intel_digital_port *port)
4456 {
4457         u32 bit;
4458
4459         switch (port->port) {
4460         case PORT_B:
4461                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4462                 break;
4463         case PORT_C:
4464                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4465                 break;
4466         case PORT_D:
4467                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4468                 break;
4469         default:
4470                 MISSING_CASE(port->port);
4471                 return false;
4472         }
4473
4474         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4475 }
4476
4477 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4478                                        struct intel_digital_port *port)
4479 {
4480         u32 bit;
4481
4482         switch (port->port) {
4483         case PORT_B:
4484                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4485                 break;
4486         case PORT_C:
4487                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4488                 break;
4489         case PORT_D:
4490                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4491                 break;
4492         default:
4493                 MISSING_CASE(port->port);
4494                 return false;
4495         }
4496
4497         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4498 }
4499
4500 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4501                                        struct intel_digital_port *intel_dig_port)
4502 {
4503         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4504         enum port port;
4505         u32 bit;
4506
4507         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4508         switch (port) {
4509         case PORT_A:
4510                 bit = BXT_DE_PORT_HP_DDIA;
4511                 break;
4512         case PORT_B:
4513                 bit = BXT_DE_PORT_HP_DDIB;
4514                 break;
4515         case PORT_C:
4516                 bit = BXT_DE_PORT_HP_DDIC;
4517                 break;
4518         default:
4519                 MISSING_CASE(port);
4520                 return false;
4521         }
4522
4523         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4524 }
4525
4526 /*
4527  * intel_digital_port_connected - is the specified port connected?
4528  * @dev_priv: i915 private structure
4529  * @port: the port to test
4530  *
4531  * Return %true if @port is connected, %false otherwise.
4532  */
4533 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4534                                          struct intel_digital_port *port)
4535 {
4536         if (HAS_PCH_IBX(dev_priv))
4537                 return ibx_digital_port_connected(dev_priv, port);
4538         if (HAS_PCH_SPLIT(dev_priv))
4539                 return cpt_digital_port_connected(dev_priv, port);
4540         else if (IS_BROXTON(dev_priv))
4541                 return bxt_digital_port_connected(dev_priv, port);
4542         else if (IS_VALLEYVIEW(dev_priv))
4543                 return vlv_digital_port_connected(dev_priv, port);
4544         else
4545                 return g4x_digital_port_connected(dev_priv, port);
4546 }
4547
4548 static struct edid *
4549 intel_dp_get_edid(struct intel_dp *intel_dp)
4550 {
4551         struct intel_connector *intel_connector = intel_dp->attached_connector;
4552
4553         /* use cached edid if we have one */
4554         if (intel_connector->edid) {
4555                 /* invalid edid */
4556                 if (IS_ERR(intel_connector->edid))
4557                         return NULL;
4558
4559                 return drm_edid_duplicate(intel_connector->edid);
4560         } else
4561                 return drm_get_edid(&intel_connector->base,
4562                                     &intel_dp->aux.ddc);
4563 }
4564
4565 static void
4566 intel_dp_set_edid(struct intel_dp *intel_dp)
4567 {
4568         struct intel_connector *intel_connector = intel_dp->attached_connector;
4569         struct edid *edid;
4570
4571         edid = intel_dp_get_edid(intel_dp);
4572         intel_connector->detect_edid = edid;
4573
4574         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4575                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4576         else
4577                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4578 }
4579
4580 static void
4581 intel_dp_unset_edid(struct intel_dp *intel_dp)
4582 {
4583         struct intel_connector *intel_connector = intel_dp->attached_connector;
4584
4585         kfree(intel_connector->detect_edid);
4586         intel_connector->detect_edid = NULL;
4587
4588         intel_dp->has_audio = false;
4589 }
4590
4591 static enum drm_connector_status
4592 intel_dp_detect(struct drm_connector *connector, bool force)
4593 {
4594         struct intel_dp *intel_dp = intel_attached_dp(connector);
4595         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4596         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4597         struct drm_device *dev = connector->dev;
4598         enum drm_connector_status status;
4599         enum intel_display_power_domain power_domain;
4600         bool ret;
4601         u8 sink_irq_vector;
4602
4603         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4604                       connector->base.id, connector->name);
4605         intel_dp_unset_edid(intel_dp);
4606
4607         if (intel_dp->is_mst) {
4608                 /* MST devices are disconnected from a monitor POV */
4609                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4610                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4611                 return connector_status_disconnected;
4612         }
4613
4614         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4615         intel_display_power_get(to_i915(dev), power_domain);
4616
4617         /* Can't disconnect eDP, but you can close the lid... */
4618         if (is_edp(intel_dp))
4619                 status = edp_detect(intel_dp);
4620         else if (intel_digital_port_connected(to_i915(dev),
4621                                               dp_to_dig_port(intel_dp)))
4622                 status = intel_dp_detect_dpcd(intel_dp);
4623         else
4624                 status = connector_status_disconnected;
4625
4626         if (status != connector_status_connected) {
4627                 intel_dp->compliance_test_active = 0;
4628                 intel_dp->compliance_test_type = 0;
4629                 intel_dp->compliance_test_data = 0;
4630
4631                 goto out;
4632         }
4633
4634         intel_dp_probe_oui(intel_dp);
4635
4636         ret = intel_dp_probe_mst(intel_dp);
4637         if (ret) {
4638                 /* if we are in MST mode then this connector
4639                    won't appear connected or have anything with EDID on it */
4640                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4641                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4642                 status = connector_status_disconnected;
4643                 goto out;
4644         }
4645
4646         /*
4647          * Clearing NACK and defer counts to get their exact values
4648          * while reading EDID which are required by Compliance tests
4649          * 4.2.2.4 and 4.2.2.5
4650          */
4651         intel_dp->aux.i2c_nack_count = 0;
4652         intel_dp->aux.i2c_defer_count = 0;
4653
4654         intel_dp_set_edid(intel_dp);
4655
4656         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4657                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4658         status = connector_status_connected;
4659
4660         /* Try to read the source of the interrupt */
4661         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4662             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4663                 /* Clear interrupt source */
4664                 drm_dp_dpcd_writeb(&intel_dp->aux,
4665                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4666                                    sink_irq_vector);
4667
4668                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4669                         intel_dp_handle_test_request(intel_dp);
4670                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4671                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4672         }
4673
4674 out:
4675         intel_display_power_put(to_i915(dev), power_domain);
4676         return status;
4677 }
4678
4679 static void
4680 intel_dp_force(struct drm_connector *connector)
4681 {
4682         struct intel_dp *intel_dp = intel_attached_dp(connector);
4683         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4684         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4685         enum intel_display_power_domain power_domain;
4686
4687         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4688                       connector->base.id, connector->name);
4689         intel_dp_unset_edid(intel_dp);
4690
4691         if (connector->status != connector_status_connected)
4692                 return;
4693
4694         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4695         intel_display_power_get(dev_priv, power_domain);
4696
4697         intel_dp_set_edid(intel_dp);
4698
4699         intel_display_power_put(dev_priv, power_domain);
4700
4701         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4702                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4703 }
4704
4705 static int intel_dp_get_modes(struct drm_connector *connector)
4706 {
4707         struct intel_connector *intel_connector = to_intel_connector(connector);
4708         struct edid *edid;
4709
4710         edid = intel_connector->detect_edid;
4711         if (edid) {
4712                 int ret = intel_connector_update_modes(connector, edid);
4713                 if (ret)
4714                         return ret;
4715         }
4716
4717         /* if eDP has no EDID, fall back to fixed mode */
4718         if (is_edp(intel_attached_dp(connector)) &&
4719             intel_connector->panel.fixed_mode) {
4720                 struct drm_display_mode *mode;
4721
4722                 mode = drm_mode_duplicate(connector->dev,
4723                                           intel_connector->panel.fixed_mode);
4724                 if (mode) {
4725                         drm_mode_probed_add(connector, mode);
4726                         return 1;
4727                 }
4728         }
4729
4730         return 0;
4731 }
4732
4733 static bool
4734 intel_dp_detect_audio(struct drm_connector *connector)
4735 {
4736         bool has_audio = false;
4737         struct edid *edid;
4738
4739         edid = to_intel_connector(connector)->detect_edid;
4740         if (edid)
4741                 has_audio = drm_detect_monitor_audio(edid);
4742
4743         return has_audio;
4744 }
4745
4746 static int
4747 intel_dp_set_property(struct drm_connector *connector,
4748                       struct drm_property *property,
4749                       uint64_t val)
4750 {
4751         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4752         struct intel_connector *intel_connector = to_intel_connector(connector);
4753         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4754         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4755         int ret;
4756
4757         ret = drm_object_property_set_value(&connector->base, property, val);
4758         if (ret)
4759                 return ret;
4760
4761         if (property == dev_priv->force_audio_property) {
4762                 int i = val;
4763                 bool has_audio;
4764
4765                 if (i == intel_dp->force_audio)
4766                         return 0;
4767
4768                 intel_dp->force_audio = i;
4769
4770                 if (i == HDMI_AUDIO_AUTO)
4771                         has_audio = intel_dp_detect_audio(connector);
4772                 else
4773                         has_audio = (i == HDMI_AUDIO_ON);
4774
4775                 if (has_audio == intel_dp->has_audio)
4776                         return 0;
4777
4778                 intel_dp->has_audio = has_audio;
4779                 goto done;
4780         }
4781
4782         if (property == dev_priv->broadcast_rgb_property) {
4783                 bool old_auto = intel_dp->color_range_auto;
4784                 bool old_range = intel_dp->limited_color_range;
4785
4786                 switch (val) {
4787                 case INTEL_BROADCAST_RGB_AUTO:
4788                         intel_dp->color_range_auto = true;
4789                         break;
4790                 case INTEL_BROADCAST_RGB_FULL:
4791                         intel_dp->color_range_auto = false;
4792                         intel_dp->limited_color_range = false;
4793                         break;
4794                 case INTEL_BROADCAST_RGB_LIMITED:
4795                         intel_dp->color_range_auto = false;
4796                         intel_dp->limited_color_range = true;
4797                         break;
4798                 default:
4799                         return -EINVAL;
4800                 }
4801
4802                 if (old_auto == intel_dp->color_range_auto &&
4803                     old_range == intel_dp->limited_color_range)
4804                         return 0;
4805
4806                 goto done;
4807         }
4808
4809         if (is_edp(intel_dp) &&
4810             property == connector->dev->mode_config.scaling_mode_property) {
4811                 if (val == DRM_MODE_SCALE_NONE) {
4812                         DRM_DEBUG_KMS("no scaling not supported\n");
4813                         return -EINVAL;
4814                 }
4815
4816                 if (intel_connector->panel.fitting_mode == val) {
4817                         /* the eDP scaling property is not changed */
4818                         return 0;
4819                 }
4820                 intel_connector->panel.fitting_mode = val;
4821
4822                 goto done;
4823         }
4824
4825         return -EINVAL;
4826
4827 done:
4828         if (intel_encoder->base.crtc)
4829                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4830
4831         return 0;
4832 }
4833
4834 static void
4835 intel_dp_connector_destroy(struct drm_connector *connector)
4836 {
4837         struct intel_connector *intel_connector = to_intel_connector(connector);
4838
4839         kfree(intel_connector->detect_edid);
4840
4841         if (!IS_ERR_OR_NULL(intel_connector->edid))
4842                 kfree(intel_connector->edid);
4843
4844         /* Can't call is_edp() since the encoder may have been destroyed
4845          * already. */
4846         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4847                 intel_panel_fini(&intel_connector->panel);
4848
4849         drm_connector_cleanup(connector);
4850         kfree(connector);
4851 }
4852
4853 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4854 {
4855         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4856         struct intel_dp *intel_dp = &intel_dig_port->dp;
4857
4858         intel_dp_aux_fini(intel_dp);
4859         intel_dp_mst_encoder_cleanup(intel_dig_port);
4860         if (is_edp(intel_dp)) {
4861                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4862                 /*
4863                  * vdd might still be enabled do to the delayed vdd off.
4864                  * Make sure vdd is actually turned off here.
4865                  */
4866                 pps_lock(intel_dp);
4867                 edp_panel_vdd_off_sync(intel_dp);
4868                 pps_unlock(intel_dp);
4869
4870                 if (intel_dp->edp_notifier.notifier_call) {
4871                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4872                         intel_dp->edp_notifier.notifier_call = NULL;
4873                 }
4874         }
4875         drm_encoder_cleanup(encoder);
4876         kfree(intel_dig_port);
4877 }
4878
4879 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4880 {
4881         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4882
4883         if (!is_edp(intel_dp))
4884                 return;
4885
4886         /*
4887          * vdd might still be enabled do to the delayed vdd off.
4888          * Make sure vdd is actually turned off here.
4889          */
4890         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4891         pps_lock(intel_dp);
4892         edp_panel_vdd_off_sync(intel_dp);
4893         pps_unlock(intel_dp);
4894 }
4895
4896 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4897 {
4898         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4899         struct drm_device *dev = intel_dig_port->base.base.dev;
4900         struct drm_i915_private *dev_priv = dev->dev_private;
4901         enum intel_display_power_domain power_domain;
4902
4903         lockdep_assert_held(&dev_priv->pps_mutex);
4904
4905         if (!edp_have_panel_vdd(intel_dp))
4906                 return;
4907
4908         /*
4909          * The VDD bit needs a power domain reference, so if the bit is
4910          * already enabled when we boot or resume, grab this reference and
4911          * schedule a vdd off, so we don't hold on to the reference
4912          * indefinitely.
4913          */
4914         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4915         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4916         intel_display_power_get(dev_priv, power_domain);
4917
4918         edp_panel_vdd_schedule_off(intel_dp);
4919 }
4920
4921 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4922 {
4923         struct intel_dp *intel_dp;
4924
4925         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4926                 return;
4927
4928         intel_dp = enc_to_intel_dp(encoder);
4929
4930         pps_lock(intel_dp);
4931
4932         /*
4933          * Read out the current power sequencer assignment,
4934          * in case the BIOS did something with it.
4935          */
4936         if (IS_VALLEYVIEW(encoder->dev))
4937                 vlv_initial_power_sequencer_setup(intel_dp);
4938
4939         intel_edp_panel_vdd_sanitize(intel_dp);
4940
4941         pps_unlock(intel_dp);
4942 }
4943
4944 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4945         .dpms = drm_atomic_helper_connector_dpms,
4946         .detect = intel_dp_detect,
4947         .force = intel_dp_force,
4948         .fill_modes = drm_helper_probe_single_connector_modes,
4949         .set_property = intel_dp_set_property,
4950         .atomic_get_property = intel_connector_atomic_get_property,
4951         .destroy = intel_dp_connector_destroy,
4952         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4953         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4954 };
4955
4956 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4957         .get_modes = intel_dp_get_modes,
4958         .mode_valid = intel_dp_mode_valid,
4959         .best_encoder = intel_best_encoder,
4960 };
4961
4962 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4963         .reset = intel_dp_encoder_reset,
4964         .destroy = intel_dp_encoder_destroy,
4965 };
4966
4967 enum irqreturn
4968 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4969 {
4970         struct intel_dp *intel_dp = &intel_dig_port->dp;
4971         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4972         struct drm_device *dev = intel_dig_port->base.base.dev;
4973         struct drm_i915_private *dev_priv = dev->dev_private;
4974         enum intel_display_power_domain power_domain;
4975         enum irqreturn ret = IRQ_NONE;
4976
4977         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4978                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4979
4980         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4981                 /*
4982                  * vdd off can generate a long pulse on eDP which
4983                  * would require vdd on to handle it, and thus we
4984                  * would end up in an endless cycle of
4985                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4986                  */
4987                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4988                               port_name(intel_dig_port->port));
4989                 return IRQ_HANDLED;
4990         }
4991
4992         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4993                       port_name(intel_dig_port->port),
4994                       long_hpd ? "long" : "short");
4995
4996         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4997         intel_display_power_get(dev_priv, power_domain);
4998
4999         if (long_hpd) {
5000                 /* indicate that we need to restart link training */
5001                 intel_dp->train_set_valid = false;
5002
5003                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5004                         goto mst_fail;
5005
5006                 if (!intel_dp_get_dpcd(intel_dp)) {
5007                         goto mst_fail;
5008                 }
5009
5010                 intel_dp_probe_oui(intel_dp);
5011
5012                 if (!intel_dp_probe_mst(intel_dp)) {
5013                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5014                         intel_dp_check_link_status(intel_dp);
5015                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5016                         goto mst_fail;
5017                 }
5018         } else {
5019                 if (intel_dp->is_mst) {
5020                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5021                                 goto mst_fail;
5022                 }
5023
5024                 if (!intel_dp->is_mst) {
5025                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5026                         intel_dp_check_link_status(intel_dp);
5027                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5028                 }
5029         }
5030
5031         ret = IRQ_HANDLED;
5032
5033         goto put_power;
5034 mst_fail:
5035         /* if we were in MST mode, and device is not there get out of MST mode */
5036         if (intel_dp->is_mst) {
5037                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5038                 intel_dp->is_mst = false;
5039                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5040         }
5041 put_power:
5042         intel_display_power_put(dev_priv, power_domain);
5043
5044         return ret;
5045 }
5046
5047 /* check the VBT to see whether the eDP is on another port */
5048 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5049 {
5050         struct drm_i915_private *dev_priv = dev->dev_private;
5051         union child_device_config *p_child;
5052         int i;
5053         static const short port_mapping[] = {
5054                 [PORT_B] = DVO_PORT_DPB,
5055                 [PORT_C] = DVO_PORT_DPC,
5056                 [PORT_D] = DVO_PORT_DPD,
5057                 [PORT_E] = DVO_PORT_DPE,
5058         };
5059
5060         /*
5061          * eDP not supported on g4x. so bail out early just
5062          * for a bit extra safety in case the VBT is bonkers.
5063          */
5064         if (INTEL_INFO(dev)->gen < 5)
5065                 return false;
5066
5067         if (port == PORT_A)
5068                 return true;
5069
5070         if (!dev_priv->vbt.child_dev_num)
5071                 return false;
5072
5073         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5074                 p_child = dev_priv->vbt.child_dev + i;
5075
5076                 if (p_child->common.dvo_port == port_mapping[port] &&
5077                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5078                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5079                         return true;
5080         }
5081         return false;
5082 }
5083
5084 void
5085 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5086 {
5087         struct intel_connector *intel_connector = to_intel_connector(connector);
5088
5089         intel_attach_force_audio_property(connector);
5090         intel_attach_broadcast_rgb_property(connector);
5091         intel_dp->color_range_auto = true;
5092
5093         if (is_edp(intel_dp)) {
5094                 drm_mode_create_scaling_mode_property(connector->dev);
5095                 drm_object_attach_property(
5096                         &connector->base,
5097                         connector->dev->mode_config.scaling_mode_property,
5098                         DRM_MODE_SCALE_ASPECT);
5099                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5100         }
5101 }
5102
5103 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5104 {
5105         intel_dp->last_power_cycle = jiffies;
5106         intel_dp->last_power_on = jiffies;
5107         intel_dp->last_backlight_off = jiffies;
5108 }
5109
5110 static void
5111 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5112                                     struct intel_dp *intel_dp)
5113 {
5114         struct drm_i915_private *dev_priv = dev->dev_private;
5115         struct edp_power_seq cur, vbt, spec,
5116                 *final = &intel_dp->pps_delays;
5117         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5118         i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5119
5120         lockdep_assert_held(&dev_priv->pps_mutex);
5121
5122         /* already initialized? */
5123         if (final->t11_t12 != 0)
5124                 return;
5125
5126         if (IS_BROXTON(dev)) {
5127                 /*
5128                  * TODO: BXT has 2 sets of PPS registers.
5129                  * Correct Register for Broxton need to be identified
5130                  * using VBT. hardcoding for now
5131                  */
5132                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5133                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5134                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5135         } else if (HAS_PCH_SPLIT(dev)) {
5136                 pp_ctrl_reg = PCH_PP_CONTROL;
5137                 pp_on_reg = PCH_PP_ON_DELAYS;
5138                 pp_off_reg = PCH_PP_OFF_DELAYS;
5139                 pp_div_reg = PCH_PP_DIVISOR;
5140         } else {
5141                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5142
5143                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5144                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5145                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5146                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5147         }
5148
5149         /* Workaround: Need to write PP_CONTROL with the unlock key as
5150          * the very first thing. */
5151         pp_ctl = ironlake_get_pp_control(intel_dp);
5152
5153         pp_on = I915_READ(pp_on_reg);
5154         pp_off = I915_READ(pp_off_reg);
5155         if (!IS_BROXTON(dev)) {
5156                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5157                 pp_div = I915_READ(pp_div_reg);
5158         }
5159
5160         /* Pull timing values out of registers */
5161         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5162                 PANEL_POWER_UP_DELAY_SHIFT;
5163
5164         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5165                 PANEL_LIGHT_ON_DELAY_SHIFT;
5166
5167         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5168                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5169
5170         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5171                 PANEL_POWER_DOWN_DELAY_SHIFT;
5172
5173         if (IS_BROXTON(dev)) {
5174                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5175                         BXT_POWER_CYCLE_DELAY_SHIFT;
5176                 if (tmp > 0)
5177                         cur.t11_t12 = (tmp - 1) * 1000;
5178                 else
5179                         cur.t11_t12 = 0;
5180         } else {
5181                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5182                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5183         }
5184
5185         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5186                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5187
5188         vbt = dev_priv->vbt.edp_pps;
5189
5190         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5191          * our hw here, which are all in 100usec. */
5192         spec.t1_t3 = 210 * 10;
5193         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5194         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5195         spec.t10 = 500 * 10;
5196         /* This one is special and actually in units of 100ms, but zero
5197          * based in the hw (so we need to add 100 ms). But the sw vbt
5198          * table multiplies it with 1000 to make it in units of 100usec,
5199          * too. */
5200         spec.t11_t12 = (510 + 100) * 10;
5201
5202         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5203                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5204
5205         /* Use the max of the register settings and vbt. If both are
5206          * unset, fall back to the spec limits. */
5207 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5208                                        spec.field : \
5209                                        max(cur.field, vbt.field))
5210         assign_final(t1_t3);
5211         assign_final(t8);
5212         assign_final(t9);
5213         assign_final(t10);
5214         assign_final(t11_t12);
5215 #undef assign_final
5216
5217 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5218         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5219         intel_dp->backlight_on_delay = get_delay(t8);
5220         intel_dp->backlight_off_delay = get_delay(t9);
5221         intel_dp->panel_power_down_delay = get_delay(t10);
5222         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5223 #undef get_delay
5224
5225         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5226                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5227                       intel_dp->panel_power_cycle_delay);
5228
5229         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5230                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5231 }
5232
5233 static void
5234 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5235                                               struct intel_dp *intel_dp)
5236 {
5237         struct drm_i915_private *dev_priv = dev->dev_private;
5238         u32 pp_on, pp_off, pp_div, port_sel = 0;
5239         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5240         i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5241         enum port port = dp_to_dig_port(intel_dp)->port;
5242         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5243
5244         lockdep_assert_held(&dev_priv->pps_mutex);
5245
5246         if (IS_BROXTON(dev)) {
5247                 /*
5248                  * TODO: BXT has 2 sets of PPS registers.
5249                  * Correct Register for Broxton need to be identified
5250                  * using VBT. hardcoding for now
5251                  */
5252                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5253                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5254                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5255
5256         } else if (HAS_PCH_SPLIT(dev)) {
5257                 pp_on_reg = PCH_PP_ON_DELAYS;
5258                 pp_off_reg = PCH_PP_OFF_DELAYS;
5259                 pp_div_reg = PCH_PP_DIVISOR;
5260         } else {
5261                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5262
5263                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5264                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5265                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5266         }
5267
5268         /*
5269          * And finally store the new values in the power sequencer. The
5270          * backlight delays are set to 1 because we do manual waits on them. For
5271          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5272          * we'll end up waiting for the backlight off delay twice: once when we
5273          * do the manual sleep, and once when we disable the panel and wait for
5274          * the PP_STATUS bit to become zero.
5275          */
5276         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5277                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5278         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5279                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5280         /* Compute the divisor for the pp clock, simply match the Bspec
5281          * formula. */
5282         if (IS_BROXTON(dev)) {
5283                 pp_div = I915_READ(pp_ctrl_reg);
5284                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5285                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5286                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5287         } else {
5288                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5289                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5290                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5291         }
5292
5293         /* Haswell doesn't have any port selection bits for the panel
5294          * power sequencer any more. */
5295         if (IS_VALLEYVIEW(dev)) {
5296                 port_sel = PANEL_PORT_SELECT_VLV(port);
5297         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5298                 if (port == PORT_A)
5299                         port_sel = PANEL_PORT_SELECT_DPA;
5300                 else
5301                         port_sel = PANEL_PORT_SELECT_DPD;
5302         }
5303
5304         pp_on |= port_sel;
5305
5306         I915_WRITE(pp_on_reg, pp_on);
5307         I915_WRITE(pp_off_reg, pp_off);
5308         if (IS_BROXTON(dev))
5309                 I915_WRITE(pp_ctrl_reg, pp_div);
5310         else
5311                 I915_WRITE(pp_div_reg, pp_div);
5312
5313         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5314                       I915_READ(pp_on_reg),
5315                       I915_READ(pp_off_reg),
5316                       IS_BROXTON(dev) ?
5317                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5318                       I915_READ(pp_div_reg));
5319 }
5320
5321 /**
5322  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5323  * @dev: DRM device
5324  * @refresh_rate: RR to be programmed
5325  *
5326  * This function gets called when refresh rate (RR) has to be changed from
5327  * one frequency to another. Switches can be between high and low RR
5328  * supported by the panel or to any other RR based on media playback (in
5329  * this case, RR value needs to be passed from user space).
5330  *
5331  * The caller of this function needs to take a lock on dev_priv->drrs.
5332  */
5333 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5334 {
5335         struct drm_i915_private *dev_priv = dev->dev_private;
5336         struct intel_encoder *encoder;
5337         struct intel_digital_port *dig_port = NULL;
5338         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5339         struct intel_crtc_state *config = NULL;
5340         struct intel_crtc *intel_crtc = NULL;
5341         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5342
5343         if (refresh_rate <= 0) {
5344                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5345                 return;
5346         }
5347
5348         if (intel_dp == NULL) {
5349                 DRM_DEBUG_KMS("DRRS not supported.\n");
5350                 return;
5351         }
5352
5353         /*
5354          * FIXME: This needs proper synchronization with psr state for some
5355          * platforms that cannot have PSR and DRRS enabled at the same time.
5356          */
5357
5358         dig_port = dp_to_dig_port(intel_dp);
5359         encoder = &dig_port->base;
5360         intel_crtc = to_intel_crtc(encoder->base.crtc);
5361
5362         if (!intel_crtc) {
5363                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5364                 return;
5365         }
5366
5367         config = intel_crtc->config;
5368
5369         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5370                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5371                 return;
5372         }
5373
5374         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5375                         refresh_rate)
5376                 index = DRRS_LOW_RR;
5377
5378         if (index == dev_priv->drrs.refresh_rate_type) {
5379                 DRM_DEBUG_KMS(
5380                         "DRRS requested for previously set RR...ignoring\n");
5381                 return;
5382         }
5383
5384         if (!intel_crtc->active) {
5385                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5386                 return;
5387         }
5388
5389         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5390                 switch (index) {
5391                 case DRRS_HIGH_RR:
5392                         intel_dp_set_m_n(intel_crtc, M1_N1);
5393                         break;
5394                 case DRRS_LOW_RR:
5395                         intel_dp_set_m_n(intel_crtc, M2_N2);
5396                         break;
5397                 case DRRS_MAX_RR:
5398                 default:
5399                         DRM_ERROR("Unsupported refreshrate type\n");
5400                 }
5401         } else if (INTEL_INFO(dev)->gen > 6) {
5402                 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5403                 u32 val;
5404
5405                 val = I915_READ(reg);
5406                 if (index > DRRS_HIGH_RR) {
5407                         if (IS_VALLEYVIEW(dev))
5408                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5409                         else
5410                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5411                 } else {
5412                         if (IS_VALLEYVIEW(dev))
5413                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5414                         else
5415                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5416                 }
5417                 I915_WRITE(reg, val);
5418         }
5419
5420         dev_priv->drrs.refresh_rate_type = index;
5421
5422         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5423 }
5424
5425 /**
5426  * intel_edp_drrs_enable - init drrs struct if supported
5427  * @intel_dp: DP struct
5428  *
5429  * Initializes frontbuffer_bits and drrs.dp
5430  */
5431 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5432 {
5433         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5434         struct drm_i915_private *dev_priv = dev->dev_private;
5435         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5436         struct drm_crtc *crtc = dig_port->base.base.crtc;
5437         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5438
5439         if (!intel_crtc->config->has_drrs) {
5440                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5441                 return;
5442         }
5443
5444         mutex_lock(&dev_priv->drrs.mutex);
5445         if (WARN_ON(dev_priv->drrs.dp)) {
5446                 DRM_ERROR("DRRS already enabled\n");
5447                 goto unlock;
5448         }
5449
5450         dev_priv->drrs.busy_frontbuffer_bits = 0;
5451
5452         dev_priv->drrs.dp = intel_dp;
5453
5454 unlock:
5455         mutex_unlock(&dev_priv->drrs.mutex);
5456 }
5457
5458 /**
5459  * intel_edp_drrs_disable - Disable DRRS
5460  * @intel_dp: DP struct
5461  *
5462  */
5463 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5464 {
5465         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5466         struct drm_i915_private *dev_priv = dev->dev_private;
5467         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5468         struct drm_crtc *crtc = dig_port->base.base.crtc;
5469         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5470
5471         if (!intel_crtc->config->has_drrs)
5472                 return;
5473
5474         mutex_lock(&dev_priv->drrs.mutex);
5475         if (!dev_priv->drrs.dp) {
5476                 mutex_unlock(&dev_priv->drrs.mutex);
5477                 return;
5478         }
5479
5480         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5481                 intel_dp_set_drrs_state(dev_priv->dev,
5482                         intel_dp->attached_connector->panel.
5483                         fixed_mode->vrefresh);
5484
5485         dev_priv->drrs.dp = NULL;
5486         mutex_unlock(&dev_priv->drrs.mutex);
5487
5488         cancel_delayed_work_sync(&dev_priv->drrs.work);
5489 }
5490
5491 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5492 {
5493         struct drm_i915_private *dev_priv =
5494                 container_of(work, typeof(*dev_priv), drrs.work.work);
5495         struct intel_dp *intel_dp;
5496
5497         mutex_lock(&dev_priv->drrs.mutex);
5498
5499         intel_dp = dev_priv->drrs.dp;
5500
5501         if (!intel_dp)
5502                 goto unlock;
5503
5504         /*
5505          * The delayed work can race with an invalidate hence we need to
5506          * recheck.
5507          */
5508
5509         if (dev_priv->drrs.busy_frontbuffer_bits)
5510                 goto unlock;
5511
5512         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5513                 intel_dp_set_drrs_state(dev_priv->dev,
5514                         intel_dp->attached_connector->panel.
5515                         downclock_mode->vrefresh);
5516
5517 unlock:
5518         mutex_unlock(&dev_priv->drrs.mutex);
5519 }
5520
5521 /**
5522  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5523  * @dev: DRM device
5524  * @frontbuffer_bits: frontbuffer plane tracking bits
5525  *
5526  * This function gets called everytime rendering on the given planes start.
5527  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5528  *
5529  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5530  */
5531 void intel_edp_drrs_invalidate(struct drm_device *dev,
5532                 unsigned frontbuffer_bits)
5533 {
5534         struct drm_i915_private *dev_priv = dev->dev_private;
5535         struct drm_crtc *crtc;
5536         enum pipe pipe;
5537
5538         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5539                 return;
5540
5541         cancel_delayed_work(&dev_priv->drrs.work);
5542
5543         mutex_lock(&dev_priv->drrs.mutex);
5544         if (!dev_priv->drrs.dp) {
5545                 mutex_unlock(&dev_priv->drrs.mutex);
5546                 return;
5547         }
5548
5549         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5550         pipe = to_intel_crtc(crtc)->pipe;
5551
5552         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5553         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5554
5555         /* invalidate means busy screen hence upclock */
5556         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5557                 intel_dp_set_drrs_state(dev_priv->dev,
5558                                 dev_priv->drrs.dp->attached_connector->panel.
5559                                 fixed_mode->vrefresh);
5560
5561         mutex_unlock(&dev_priv->drrs.mutex);
5562 }
5563
5564 /**
5565  * intel_edp_drrs_flush - Restart Idleness DRRS
5566  * @dev: DRM device
5567  * @frontbuffer_bits: frontbuffer plane tracking bits
5568  *
5569  * This function gets called every time rendering on the given planes has
5570  * completed or flip on a crtc is completed. So DRRS should be upclocked
5571  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5572  * if no other planes are dirty.
5573  *
5574  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5575  */
5576 void intel_edp_drrs_flush(struct drm_device *dev,
5577                 unsigned frontbuffer_bits)
5578 {
5579         struct drm_i915_private *dev_priv = dev->dev_private;
5580         struct drm_crtc *crtc;
5581         enum pipe pipe;
5582
5583         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5584                 return;
5585
5586         cancel_delayed_work(&dev_priv->drrs.work);
5587
5588         mutex_lock(&dev_priv->drrs.mutex);
5589         if (!dev_priv->drrs.dp) {
5590                 mutex_unlock(&dev_priv->drrs.mutex);
5591                 return;
5592         }
5593
5594         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5595         pipe = to_intel_crtc(crtc)->pipe;
5596
5597         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5598         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5599
5600         /* flush means busy screen hence upclock */
5601         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5602                 intel_dp_set_drrs_state(dev_priv->dev,
5603                                 dev_priv->drrs.dp->attached_connector->panel.
5604                                 fixed_mode->vrefresh);
5605
5606         /*
5607          * flush also means no more activity hence schedule downclock, if all
5608          * other fbs are quiescent too
5609          */
5610         if (!dev_priv->drrs.busy_frontbuffer_bits)
5611                 schedule_delayed_work(&dev_priv->drrs.work,
5612                                 msecs_to_jiffies(1000));
5613         mutex_unlock(&dev_priv->drrs.mutex);
5614 }
5615
5616 /**
5617  * DOC: Display Refresh Rate Switching (DRRS)
5618  *
5619  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5620  * which enables swtching between low and high refresh rates,
5621  * dynamically, based on the usage scenario. This feature is applicable
5622  * for internal panels.
5623  *
5624  * Indication that the panel supports DRRS is given by the panel EDID, which
5625  * would list multiple refresh rates for one resolution.
5626  *
5627  * DRRS is of 2 types - static and seamless.
5628  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5629  * (may appear as a blink on screen) and is used in dock-undock scenario.
5630  * Seamless DRRS involves changing RR without any visual effect to the user
5631  * and can be used during normal system usage. This is done by programming
5632  * certain registers.
5633  *
5634  * Support for static/seamless DRRS may be indicated in the VBT based on
5635  * inputs from the panel spec.
5636  *
5637  * DRRS saves power by switching to low RR based on usage scenarios.
5638  *
5639  * eDP DRRS:-
5640  *        The implementation is based on frontbuffer tracking implementation.
5641  * When there is a disturbance on the screen triggered by user activity or a
5642  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5643  * When there is no movement on screen, after a timeout of 1 second, a switch
5644  * to low RR is made.
5645  *        For integration with frontbuffer tracking code,
5646  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5647  *
5648  * DRRS can be further extended to support other internal panels and also
5649  * the scenario of video playback wherein RR is set based on the rate
5650  * requested by userspace.
5651  */
5652
5653 /**
5654  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5655  * @intel_connector: eDP connector
5656  * @fixed_mode: preferred mode of panel
5657  *
5658  * This function is  called only once at driver load to initialize basic
5659  * DRRS stuff.
5660  *
5661  * Returns:
5662  * Downclock mode if panel supports it, else return NULL.
5663  * DRRS support is determined by the presence of downclock mode (apart
5664  * from VBT setting).
5665  */
5666 static struct drm_display_mode *
5667 intel_dp_drrs_init(struct intel_connector *intel_connector,
5668                 struct drm_display_mode *fixed_mode)
5669 {
5670         struct drm_connector *connector = &intel_connector->base;
5671         struct drm_device *dev = connector->dev;
5672         struct drm_i915_private *dev_priv = dev->dev_private;
5673         struct drm_display_mode *downclock_mode = NULL;
5674
5675         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5676         mutex_init(&dev_priv->drrs.mutex);
5677
5678         if (INTEL_INFO(dev)->gen <= 6) {
5679                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5680                 return NULL;
5681         }
5682
5683         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5684                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5685                 return NULL;
5686         }
5687
5688         downclock_mode = intel_find_panel_downclock
5689                                         (dev, fixed_mode, connector);
5690
5691         if (!downclock_mode) {
5692                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5693                 return NULL;
5694         }
5695
5696         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5697
5698         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5699         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5700         return downclock_mode;
5701 }
5702
5703 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5704                                      struct intel_connector *intel_connector)
5705 {
5706         struct drm_connector *connector = &intel_connector->base;
5707         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5708         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5709         struct drm_device *dev = intel_encoder->base.dev;
5710         struct drm_i915_private *dev_priv = dev->dev_private;
5711         struct drm_display_mode *fixed_mode = NULL;
5712         struct drm_display_mode *downclock_mode = NULL;
5713         bool has_dpcd;
5714         struct drm_display_mode *scan;
5715         struct edid *edid;
5716         enum pipe pipe = INVALID_PIPE;
5717
5718         if (!is_edp(intel_dp))
5719                 return true;
5720
5721         pps_lock(intel_dp);
5722         intel_edp_panel_vdd_sanitize(intel_dp);
5723         pps_unlock(intel_dp);
5724
5725         /* Cache DPCD and EDID for edp. */
5726         has_dpcd = intel_dp_get_dpcd(intel_dp);
5727
5728         if (has_dpcd) {
5729                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5730                         dev_priv->no_aux_handshake =
5731                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5732                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5733         } else {
5734                 /* if this fails, presume the device is a ghost */
5735                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5736                 return false;
5737         }
5738
5739         /* We now know it's not a ghost, init power sequence regs. */
5740         pps_lock(intel_dp);
5741         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5742         pps_unlock(intel_dp);
5743
5744         mutex_lock(&dev->mode_config.mutex);
5745         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5746         if (edid) {
5747                 if (drm_add_edid_modes(connector, edid)) {
5748                         drm_mode_connector_update_edid_property(connector,
5749                                                                 edid);
5750                         drm_edid_to_eld(connector, edid);
5751                 } else {
5752                         kfree(edid);
5753                         edid = ERR_PTR(-EINVAL);
5754                 }
5755         } else {
5756                 edid = ERR_PTR(-ENOENT);
5757         }
5758         intel_connector->edid = edid;
5759
5760         /* prefer fixed mode from EDID if available */
5761         list_for_each_entry(scan, &connector->probed_modes, head) {
5762                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5763                         fixed_mode = drm_mode_duplicate(dev, scan);
5764                         downclock_mode = intel_dp_drrs_init(
5765                                                 intel_connector, fixed_mode);
5766                         break;
5767                 }
5768         }
5769
5770         /* fallback to VBT if available for eDP */
5771         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5772                 fixed_mode = drm_mode_duplicate(dev,
5773                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5774                 if (fixed_mode)
5775                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5776         }
5777         mutex_unlock(&dev->mode_config.mutex);
5778
5779         if (IS_VALLEYVIEW(dev)) {
5780                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5781                 register_reboot_notifier(&intel_dp->edp_notifier);
5782
5783                 /*
5784                  * Figure out the current pipe for the initial backlight setup.
5785                  * If the current pipe isn't valid, try the PPS pipe, and if that
5786                  * fails just assume pipe A.
5787                  */
5788                 if (IS_CHERRYVIEW(dev))
5789                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5790                 else
5791                         pipe = PORT_TO_PIPE(intel_dp->DP);
5792
5793                 if (pipe != PIPE_A && pipe != PIPE_B)
5794                         pipe = intel_dp->pps_pipe;
5795
5796                 if (pipe != PIPE_A && pipe != PIPE_B)
5797                         pipe = PIPE_A;
5798
5799                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5800                               pipe_name(pipe));
5801         }
5802
5803         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5804         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5805         intel_panel_setup_backlight(connector, pipe);
5806
5807         return true;
5808 }
5809
5810 bool
5811 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5812                         struct intel_connector *intel_connector)
5813 {
5814         struct drm_connector *connector = &intel_connector->base;
5815         struct intel_dp *intel_dp = &intel_dig_port->dp;
5816         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5817         struct drm_device *dev = intel_encoder->base.dev;
5818         struct drm_i915_private *dev_priv = dev->dev_private;
5819         enum port port = intel_dig_port->port;
5820         int type, ret;
5821
5822         intel_dp->pps_pipe = INVALID_PIPE;
5823
5824         /* intel_dp vfuncs */
5825         if (INTEL_INFO(dev)->gen >= 9)
5826                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5827         else if (IS_VALLEYVIEW(dev))
5828                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5829         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5830                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5831         else if (HAS_PCH_SPLIT(dev))
5832                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5833         else
5834                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5835
5836         if (INTEL_INFO(dev)->gen >= 9)
5837                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5838         else
5839                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5840
5841         if (HAS_DDI(dev))
5842                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5843
5844         /* Preserve the current hw state. */
5845         intel_dp->DP = I915_READ(intel_dp->output_reg);
5846         intel_dp->attached_connector = intel_connector;
5847
5848         if (intel_dp_is_edp(dev, port))
5849                 type = DRM_MODE_CONNECTOR_eDP;
5850         else
5851                 type = DRM_MODE_CONNECTOR_DisplayPort;
5852
5853         /*
5854          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5855          * for DP the encoder type can be set by the caller to
5856          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5857          */
5858         if (type == DRM_MODE_CONNECTOR_eDP)
5859                 intel_encoder->type = INTEL_OUTPUT_EDP;
5860
5861         /* eDP only on port B and/or C on vlv/chv */
5862         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5863                     port != PORT_B && port != PORT_C))
5864                 return false;
5865
5866         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5867                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5868                         port_name(port));
5869
5870         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5871         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5872
5873         connector->interlace_allowed = true;
5874         connector->doublescan_allowed = 0;
5875
5876         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5877                           edp_panel_vdd_work);
5878
5879         intel_connector_attach_encoder(intel_connector, intel_encoder);
5880         drm_connector_register(connector);
5881
5882         if (HAS_DDI(dev))
5883                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5884         else
5885                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5886         intel_connector->unregister = intel_dp_connector_unregister;
5887
5888         /* Set up the hotplug pin. */
5889         switch (port) {
5890         case PORT_A:
5891                 intel_encoder->hpd_pin = HPD_PORT_A;
5892                 break;
5893         case PORT_B:
5894                 intel_encoder->hpd_pin = HPD_PORT_B;
5895                 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5896                         intel_encoder->hpd_pin = HPD_PORT_A;
5897                 break;
5898         case PORT_C:
5899                 intel_encoder->hpd_pin = HPD_PORT_C;
5900                 break;
5901         case PORT_D:
5902                 intel_encoder->hpd_pin = HPD_PORT_D;
5903                 break;
5904         case PORT_E:
5905                 intel_encoder->hpd_pin = HPD_PORT_E;
5906                 break;
5907         default:
5908                 BUG();
5909         }
5910
5911         if (is_edp(intel_dp)) {
5912                 pps_lock(intel_dp);
5913                 intel_dp_init_panel_power_timestamps(intel_dp);
5914                 if (IS_VALLEYVIEW(dev))
5915                         vlv_initial_power_sequencer_setup(intel_dp);
5916                 else
5917                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5918                 pps_unlock(intel_dp);
5919         }
5920
5921         ret = intel_dp_aux_init(intel_dp, intel_connector);
5922         if (ret)
5923                 goto fail;
5924
5925         /* init MST on ports that can support it */
5926         if (HAS_DP_MST(dev) &&
5927             (port == PORT_B || port == PORT_C || port == PORT_D))
5928                 intel_dp_mst_encoder_init(intel_dig_port,
5929                                           intel_connector->base.base.id);
5930
5931         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5932                 intel_dp_aux_fini(intel_dp);
5933                 intel_dp_mst_encoder_cleanup(intel_dig_port);
5934                 goto fail;
5935         }
5936
5937         intel_dp_add_properties(intel_dp, connector);
5938
5939         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5940          * 0xd.  Failure to do so will result in spurious interrupts being
5941          * generated on the port when a cable is not attached.
5942          */
5943         if (IS_G4X(dev) && !IS_GM45(dev)) {
5944                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5945                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5946         }
5947
5948         i915_debugfs_connector_add(connector);
5949
5950         return true;
5951
5952 fail:
5953         if (is_edp(intel_dp)) {
5954                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5955                 /*
5956                  * vdd might still be enabled do to the delayed vdd off.
5957                  * Make sure vdd is actually turned off here.
5958                  */
5959                 pps_lock(intel_dp);
5960                 edp_panel_vdd_off_sync(intel_dp);
5961                 pps_unlock(intel_dp);
5962         }
5963         drm_connector_unregister(connector);
5964         drm_connector_cleanup(connector);
5965
5966         return false;
5967 }
5968
5969 void
5970 intel_dp_init(struct drm_device *dev,
5971               i915_reg_t output_reg, enum port port)
5972 {
5973         struct drm_i915_private *dev_priv = dev->dev_private;
5974         struct intel_digital_port *intel_dig_port;
5975         struct intel_encoder *intel_encoder;
5976         struct drm_encoder *encoder;
5977         struct intel_connector *intel_connector;
5978
5979         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5980         if (!intel_dig_port)
5981                 return;
5982
5983         intel_connector = intel_connector_alloc();
5984         if (!intel_connector)
5985                 goto err_connector_alloc;
5986
5987         intel_encoder = &intel_dig_port->base;
5988         encoder = &intel_encoder->base;
5989
5990         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5991                          DRM_MODE_ENCODER_TMDS);
5992
5993         intel_encoder->compute_config = intel_dp_compute_config;
5994         intel_encoder->disable = intel_disable_dp;
5995         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5996         intel_encoder->get_config = intel_dp_get_config;
5997         intel_encoder->suspend = intel_dp_encoder_suspend;
5998         if (IS_CHERRYVIEW(dev)) {
5999                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6000                 intel_encoder->pre_enable = chv_pre_enable_dp;
6001                 intel_encoder->enable = vlv_enable_dp;
6002                 intel_encoder->post_disable = chv_post_disable_dp;
6003                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6004         } else if (IS_VALLEYVIEW(dev)) {
6005                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6006                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6007                 intel_encoder->enable = vlv_enable_dp;
6008                 intel_encoder->post_disable = vlv_post_disable_dp;
6009         } else {
6010                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6011                 intel_encoder->enable = g4x_enable_dp;
6012                 if (INTEL_INFO(dev)->gen >= 5)
6013                         intel_encoder->post_disable = ilk_post_disable_dp;
6014         }
6015
6016         intel_dig_port->port = port;
6017         intel_dig_port->dp.output_reg = output_reg;
6018
6019         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6020         if (IS_CHERRYVIEW(dev)) {
6021                 if (port == PORT_D)
6022                         intel_encoder->crtc_mask = 1 << 2;
6023                 else
6024                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6025         } else {
6026                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6027         }
6028         intel_encoder->cloneable = 0;
6029
6030         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6031         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6032
6033         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6034                 goto err_init_connector;
6035
6036         return;
6037
6038 err_init_connector:
6039         drm_encoder_cleanup(encoder);
6040         kfree(intel_connector);
6041 err_connector_alloc:
6042         kfree(intel_dig_port);
6043
6044         return;
6045 }
6046
6047 void intel_dp_mst_suspend(struct drm_device *dev)
6048 {
6049         struct drm_i915_private *dev_priv = dev->dev_private;
6050         int i;
6051
6052         /* disable MST */
6053         for (i = 0; i < I915_MAX_PORTS; i++) {
6054                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6055                 if (!intel_dig_port)
6056                         continue;
6057
6058                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6059                         if (!intel_dig_port->dp.can_mst)
6060                                 continue;
6061                         if (intel_dig_port->dp.is_mst)
6062                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6063                 }
6064         }
6065 }
6066
6067 void intel_dp_mst_resume(struct drm_device *dev)
6068 {
6069         struct drm_i915_private *dev_priv = dev->dev_private;
6070         int i;
6071
6072         for (i = 0; i < I915_MAX_PORTS; i++) {
6073                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6074                 if (!intel_dig_port)
6075                         continue;
6076                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6077                         int ret;
6078
6079                         if (!intel_dig_port->dp.can_mst)
6080                                 continue;
6081
6082                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6083                         if (ret != 0) {
6084                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6085                         }
6086                 }
6087         }
6088 }