]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_display.c
Merge tag 'clock' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_increase_pllclock(struct drm_crtc *crtc);
48 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
49
50 typedef struct {
51         /* given values */
52         int n;
53         int m1, m2;
54         int p1, p2;
55         /* derived values */
56         int     dot;
57         int     vco;
58         int     m;
59         int     p;
60 } intel_clock_t;
61
62 typedef struct {
63         int     min, max;
64 } intel_range_t;
65
66 typedef struct {
67         int     dot_limit;
68         int     p2_slow, p2_fast;
69 } intel_p2_t;
70
71 #define INTEL_P2_NUM                  2
72 typedef struct intel_limit intel_limit_t;
73 struct intel_limit {
74         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
75         intel_p2_t          p2;
76         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
77                         int, int, intel_clock_t *, intel_clock_t *);
78 };
79
80 /* FDI */
81 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
82
83 static bool
84 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
85                     int target, int refclk, intel_clock_t *match_clock,
86                     intel_clock_t *best_clock);
87 static bool
88 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
89                         int target, int refclk, intel_clock_t *match_clock,
90                         intel_clock_t *best_clock);
91
92 static bool
93 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
94                       int target, int refclk, intel_clock_t *match_clock,
95                       intel_clock_t *best_clock);
96 static bool
97 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
98                            int target, int refclk, intel_clock_t *match_clock,
99                            intel_clock_t *best_clock);
100
101 static inline u32 /* units of 100MHz */
102 intel_fdi_link_freq(struct drm_device *dev)
103 {
104         if (IS_GEN5(dev)) {
105                 struct drm_i915_private *dev_priv = dev->dev_private;
106                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
107         } else
108                 return 27;
109 }
110
111 static const intel_limit_t intel_limits_i8xx_dvo = {
112         .dot = { .min = 25000, .max = 350000 },
113         .vco = { .min = 930000, .max = 1400000 },
114         .n = { .min = 3, .max = 16 },
115         .m = { .min = 96, .max = 140 },
116         .m1 = { .min = 18, .max = 26 },
117         .m2 = { .min = 6, .max = 16 },
118         .p = { .min = 4, .max = 128 },
119         .p1 = { .min = 2, .max = 33 },
120         .p2 = { .dot_limit = 165000,
121                 .p2_slow = 4, .p2_fast = 2 },
122         .find_pll = intel_find_best_PLL,
123 };
124
125 static const intel_limit_t intel_limits_i8xx_lvds = {
126         .dot = { .min = 25000, .max = 350000 },
127         .vco = { .min = 930000, .max = 1400000 },
128         .n = { .min = 3, .max = 16 },
129         .m = { .min = 96, .max = 140 },
130         .m1 = { .min = 18, .max = 26 },
131         .m2 = { .min = 6, .max = 16 },
132         .p = { .min = 4, .max = 128 },
133         .p1 = { .min = 1, .max = 6 },
134         .p2 = { .dot_limit = 165000,
135                 .p2_slow = 14, .p2_fast = 7 },
136         .find_pll = intel_find_best_PLL,
137 };
138
139 static const intel_limit_t intel_limits_i9xx_sdvo = {
140         .dot = { .min = 20000, .max = 400000 },
141         .vco = { .min = 1400000, .max = 2800000 },
142         .n = { .min = 1, .max = 6 },
143         .m = { .min = 70, .max = 120 },
144         .m1 = { .min = 10, .max = 22 },
145         .m2 = { .min = 5, .max = 9 },
146         .p = { .min = 5, .max = 80 },
147         .p1 = { .min = 1, .max = 8 },
148         .p2 = { .dot_limit = 200000,
149                 .p2_slow = 10, .p2_fast = 5 },
150         .find_pll = intel_find_best_PLL,
151 };
152
153 static const intel_limit_t intel_limits_i9xx_lvds = {
154         .dot = { .min = 20000, .max = 400000 },
155         .vco = { .min = 1400000, .max = 2800000 },
156         .n = { .min = 1, .max = 6 },
157         .m = { .min = 70, .max = 120 },
158         .m1 = { .min = 10, .max = 22 },
159         .m2 = { .min = 5, .max = 9 },
160         .p = { .min = 7, .max = 98 },
161         .p1 = { .min = 1, .max = 8 },
162         .p2 = { .dot_limit = 112000,
163                 .p2_slow = 14, .p2_fast = 7 },
164         .find_pll = intel_find_best_PLL,
165 };
166
167
168 static const intel_limit_t intel_limits_g4x_sdvo = {
169         .dot = { .min = 25000, .max = 270000 },
170         .vco = { .min = 1750000, .max = 3500000},
171         .n = { .min = 1, .max = 4 },
172         .m = { .min = 104, .max = 138 },
173         .m1 = { .min = 17, .max = 23 },
174         .m2 = { .min = 5, .max = 11 },
175         .p = { .min = 10, .max = 30 },
176         .p1 = { .min = 1, .max = 3},
177         .p2 = { .dot_limit = 270000,
178                 .p2_slow = 10,
179                 .p2_fast = 10
180         },
181         .find_pll = intel_g4x_find_best_PLL,
182 };
183
184 static const intel_limit_t intel_limits_g4x_hdmi = {
185         .dot = { .min = 22000, .max = 400000 },
186         .vco = { .min = 1750000, .max = 3500000},
187         .n = { .min = 1, .max = 4 },
188         .m = { .min = 104, .max = 138 },
189         .m1 = { .min = 16, .max = 23 },
190         .m2 = { .min = 5, .max = 11 },
191         .p = { .min = 5, .max = 80 },
192         .p1 = { .min = 1, .max = 8},
193         .p2 = { .dot_limit = 165000,
194                 .p2_slow = 10, .p2_fast = 5 },
195         .find_pll = intel_g4x_find_best_PLL,
196 };
197
198 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
199         .dot = { .min = 20000, .max = 115000 },
200         .vco = { .min = 1750000, .max = 3500000 },
201         .n = { .min = 1, .max = 3 },
202         .m = { .min = 104, .max = 138 },
203         .m1 = { .min = 17, .max = 23 },
204         .m2 = { .min = 5, .max = 11 },
205         .p = { .min = 28, .max = 112 },
206         .p1 = { .min = 2, .max = 8 },
207         .p2 = { .dot_limit = 0,
208                 .p2_slow = 14, .p2_fast = 14
209         },
210         .find_pll = intel_g4x_find_best_PLL,
211 };
212
213 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
214         .dot = { .min = 80000, .max = 224000 },
215         .vco = { .min = 1750000, .max = 3500000 },
216         .n = { .min = 1, .max = 3 },
217         .m = { .min = 104, .max = 138 },
218         .m1 = { .min = 17, .max = 23 },
219         .m2 = { .min = 5, .max = 11 },
220         .p = { .min = 14, .max = 42 },
221         .p1 = { .min = 2, .max = 6 },
222         .p2 = { .dot_limit = 0,
223                 .p2_slow = 7, .p2_fast = 7
224         },
225         .find_pll = intel_g4x_find_best_PLL,
226 };
227
228 static const intel_limit_t intel_limits_g4x_display_port = {
229         .dot = { .min = 161670, .max = 227000 },
230         .vco = { .min = 1750000, .max = 3500000},
231         .n = { .min = 1, .max = 2 },
232         .m = { .min = 97, .max = 108 },
233         .m1 = { .min = 0x10, .max = 0x12 },
234         .m2 = { .min = 0x05, .max = 0x06 },
235         .p = { .min = 10, .max = 20 },
236         .p1 = { .min = 1, .max = 2},
237         .p2 = { .dot_limit = 0,
238                 .p2_slow = 10, .p2_fast = 10 },
239         .find_pll = intel_find_pll_g4x_dp,
240 };
241
242 static const intel_limit_t intel_limits_pineview_sdvo = {
243         .dot = { .min = 20000, .max = 400000},
244         .vco = { .min = 1700000, .max = 3500000 },
245         /* Pineview's Ncounter is a ring counter */
246         .n = { .min = 3, .max = 6 },
247         .m = { .min = 2, .max = 256 },
248         /* Pineview only has one combined m divider, which we treat as m2. */
249         .m1 = { .min = 0, .max = 0 },
250         .m2 = { .min = 0, .max = 254 },
251         .p = { .min = 5, .max = 80 },
252         .p1 = { .min = 1, .max = 8 },
253         .p2 = { .dot_limit = 200000,
254                 .p2_slow = 10, .p2_fast = 5 },
255         .find_pll = intel_find_best_PLL,
256 };
257
258 static const intel_limit_t intel_limits_pineview_lvds = {
259         .dot = { .min = 20000, .max = 400000 },
260         .vco = { .min = 1700000, .max = 3500000 },
261         .n = { .min = 3, .max = 6 },
262         .m = { .min = 2, .max = 256 },
263         .m1 = { .min = 0, .max = 0 },
264         .m2 = { .min = 0, .max = 254 },
265         .p = { .min = 7, .max = 112 },
266         .p1 = { .min = 1, .max = 8 },
267         .p2 = { .dot_limit = 112000,
268                 .p2_slow = 14, .p2_fast = 14 },
269         .find_pll = intel_find_best_PLL,
270 };
271
272 /* Ironlake / Sandybridge
273  *
274  * We calculate clock using (register_value + 2) for N/M1/M2, so here
275  * the range value for them is (actual_value - 2).
276  */
277 static const intel_limit_t intel_limits_ironlake_dac = {
278         .dot = { .min = 25000, .max = 350000 },
279         .vco = { .min = 1760000, .max = 3510000 },
280         .n = { .min = 1, .max = 5 },
281         .m = { .min = 79, .max = 127 },
282         .m1 = { .min = 12, .max = 22 },
283         .m2 = { .min = 5, .max = 9 },
284         .p = { .min = 5, .max = 80 },
285         .p1 = { .min = 1, .max = 8 },
286         .p2 = { .dot_limit = 225000,
287                 .p2_slow = 10, .p2_fast = 5 },
288         .find_pll = intel_g4x_find_best_PLL,
289 };
290
291 static const intel_limit_t intel_limits_ironlake_single_lvds = {
292         .dot = { .min = 25000, .max = 350000 },
293         .vco = { .min = 1760000, .max = 3510000 },
294         .n = { .min = 1, .max = 3 },
295         .m = { .min = 79, .max = 118 },
296         .m1 = { .min = 12, .max = 22 },
297         .m2 = { .min = 5, .max = 9 },
298         .p = { .min = 28, .max = 112 },
299         .p1 = { .min = 2, .max = 8 },
300         .p2 = { .dot_limit = 225000,
301                 .p2_slow = 14, .p2_fast = 14 },
302         .find_pll = intel_g4x_find_best_PLL,
303 };
304
305 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
306         .dot = { .min = 25000, .max = 350000 },
307         .vco = { .min = 1760000, .max = 3510000 },
308         .n = { .min = 1, .max = 3 },
309         .m = { .min = 79, .max = 127 },
310         .m1 = { .min = 12, .max = 22 },
311         .m2 = { .min = 5, .max = 9 },
312         .p = { .min = 14, .max = 56 },
313         .p1 = { .min = 2, .max = 8 },
314         .p2 = { .dot_limit = 225000,
315                 .p2_slow = 7, .p2_fast = 7 },
316         .find_pll = intel_g4x_find_best_PLL,
317 };
318
319 /* LVDS 100mhz refclk limits. */
320 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
321         .dot = { .min = 25000, .max = 350000 },
322         .vco = { .min = 1760000, .max = 3510000 },
323         .n = { .min = 1, .max = 2 },
324         .m = { .min = 79, .max = 126 },
325         .m1 = { .min = 12, .max = 22 },
326         .m2 = { .min = 5, .max = 9 },
327         .p = { .min = 28, .max = 112 },
328         .p1 = { .min = 2, .max = 8 },
329         .p2 = { .dot_limit = 225000,
330                 .p2_slow = 14, .p2_fast = 14 },
331         .find_pll = intel_g4x_find_best_PLL,
332 };
333
334 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
335         .dot = { .min = 25000, .max = 350000 },
336         .vco = { .min = 1760000, .max = 3510000 },
337         .n = { .min = 1, .max = 3 },
338         .m = { .min = 79, .max = 126 },
339         .m1 = { .min = 12, .max = 22 },
340         .m2 = { .min = 5, .max = 9 },
341         .p = { .min = 14, .max = 42 },
342         .p1 = { .min = 2, .max = 6 },
343         .p2 = { .dot_limit = 225000,
344                 .p2_slow = 7, .p2_fast = 7 },
345         .find_pll = intel_g4x_find_best_PLL,
346 };
347
348 static const intel_limit_t intel_limits_ironlake_display_port = {
349         .dot = { .min = 25000, .max = 350000 },
350         .vco = { .min = 1760000, .max = 3510000},
351         .n = { .min = 1, .max = 2 },
352         .m = { .min = 81, .max = 90 },
353         .m1 = { .min = 12, .max = 22 },
354         .m2 = { .min = 5, .max = 9 },
355         .p = { .min = 10, .max = 20 },
356         .p1 = { .min = 1, .max = 2},
357         .p2 = { .dot_limit = 0,
358                 .p2_slow = 10, .p2_fast = 10 },
359         .find_pll = intel_find_pll_ironlake_dp,
360 };
361
362 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
363 {
364         unsigned long flags;
365         u32 val = 0;
366
367         spin_lock_irqsave(&dev_priv->dpio_lock, flags);
368         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
369                 DRM_ERROR("DPIO idle wait timed out\n");
370                 goto out_unlock;
371         }
372
373         I915_WRITE(DPIO_REG, reg);
374         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
375                    DPIO_BYTE);
376         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
377                 DRM_ERROR("DPIO read wait timed out\n");
378                 goto out_unlock;
379         }
380         val = I915_READ(DPIO_DATA);
381
382 out_unlock:
383         spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
384         return val;
385 }
386
387 static void vlv_init_dpio(struct drm_device *dev)
388 {
389         struct drm_i915_private *dev_priv = dev->dev_private;
390
391         /* Reset the DPIO config */
392         I915_WRITE(DPIO_CTL, 0);
393         POSTING_READ(DPIO_CTL);
394         I915_WRITE(DPIO_CTL, 1);
395         POSTING_READ(DPIO_CTL);
396 }
397
398 static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
399 {
400         DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
401         return 1;
402 }
403
404 static const struct dmi_system_id intel_dual_link_lvds[] = {
405         {
406                 .callback = intel_dual_link_lvds_callback,
407                 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
408                 .matches = {
409                         DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
410                         DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
411                 },
412         },
413         { }     /* terminating entry */
414 };
415
416 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
417                               unsigned int reg)
418 {
419         unsigned int val;
420
421         /* use the module option value if specified */
422         if (i915_lvds_channel_mode > 0)
423                 return i915_lvds_channel_mode == 2;
424
425         if (dmi_check_system(intel_dual_link_lvds))
426                 return true;
427
428         if (dev_priv->lvds_val)
429                 val = dev_priv->lvds_val;
430         else {
431                 /* BIOS should set the proper LVDS register value at boot, but
432                  * in reality, it doesn't set the value when the lid is closed;
433                  * we need to check "the value to be set" in VBT when LVDS
434                  * register is uninitialized.
435                  */
436                 val = I915_READ(reg);
437                 if (!(val & ~LVDS_DETECTED))
438                         val = dev_priv->bios_lvds_val;
439                 dev_priv->lvds_val = val;
440         }
441         return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
442 }
443
444 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
445                                                 int refclk)
446 {
447         struct drm_device *dev = crtc->dev;
448         struct drm_i915_private *dev_priv = dev->dev_private;
449         const intel_limit_t *limit;
450
451         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
452                 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
453                         /* LVDS dual channel */
454                         if (refclk == 100000)
455                                 limit = &intel_limits_ironlake_dual_lvds_100m;
456                         else
457                                 limit = &intel_limits_ironlake_dual_lvds;
458                 } else {
459                         if (refclk == 100000)
460                                 limit = &intel_limits_ironlake_single_lvds_100m;
461                         else
462                                 limit = &intel_limits_ironlake_single_lvds;
463                 }
464         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
465                         HAS_eDP)
466                 limit = &intel_limits_ironlake_display_port;
467         else
468                 limit = &intel_limits_ironlake_dac;
469
470         return limit;
471 }
472
473 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
474 {
475         struct drm_device *dev = crtc->dev;
476         struct drm_i915_private *dev_priv = dev->dev_private;
477         const intel_limit_t *limit;
478
479         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
480                 if (is_dual_link_lvds(dev_priv, LVDS))
481                         /* LVDS with dual channel */
482                         limit = &intel_limits_g4x_dual_channel_lvds;
483                 else
484                         /* LVDS with dual channel */
485                         limit = &intel_limits_g4x_single_channel_lvds;
486         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
487                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
488                 limit = &intel_limits_g4x_hdmi;
489         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
490                 limit = &intel_limits_g4x_sdvo;
491         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
492                 limit = &intel_limits_g4x_display_port;
493         } else /* The option is for other outputs */
494                 limit = &intel_limits_i9xx_sdvo;
495
496         return limit;
497 }
498
499 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
500 {
501         struct drm_device *dev = crtc->dev;
502         const intel_limit_t *limit;
503
504         if (HAS_PCH_SPLIT(dev))
505                 limit = intel_ironlake_limit(crtc, refclk);
506         else if (IS_G4X(dev)) {
507                 limit = intel_g4x_limit(crtc);
508         } else if (IS_PINEVIEW(dev)) {
509                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
510                         limit = &intel_limits_pineview_lvds;
511                 else
512                         limit = &intel_limits_pineview_sdvo;
513         } else if (!IS_GEN2(dev)) {
514                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
515                         limit = &intel_limits_i9xx_lvds;
516                 else
517                         limit = &intel_limits_i9xx_sdvo;
518         } else {
519                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
520                         limit = &intel_limits_i8xx_lvds;
521                 else
522                         limit = &intel_limits_i8xx_dvo;
523         }
524         return limit;
525 }
526
527 /* m1 is reserved as 0 in Pineview, n is a ring counter */
528 static void pineview_clock(int refclk, intel_clock_t *clock)
529 {
530         clock->m = clock->m2 + 2;
531         clock->p = clock->p1 * clock->p2;
532         clock->vco = refclk * clock->m / clock->n;
533         clock->dot = clock->vco / clock->p;
534 }
535
536 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
537 {
538         if (IS_PINEVIEW(dev)) {
539                 pineview_clock(refclk, clock);
540                 return;
541         }
542         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
543         clock->p = clock->p1 * clock->p2;
544         clock->vco = refclk * clock->m / (clock->n + 2);
545         clock->dot = clock->vco / clock->p;
546 }
547
548 /**
549  * Returns whether any output on the specified pipe is of the specified type
550  */
551 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
552 {
553         struct drm_device *dev = crtc->dev;
554         struct drm_mode_config *mode_config = &dev->mode_config;
555         struct intel_encoder *encoder;
556
557         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
558                 if (encoder->base.crtc == crtc && encoder->type == type)
559                         return true;
560
561         return false;
562 }
563
564 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
565 /**
566  * Returns whether the given set of divisors are valid for a given refclk with
567  * the given connectors.
568  */
569
570 static bool intel_PLL_is_valid(struct drm_device *dev,
571                                const intel_limit_t *limit,
572                                const intel_clock_t *clock)
573 {
574         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
575                 INTELPllInvalid("p1 out of range\n");
576         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
577                 INTELPllInvalid("p out of range\n");
578         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
579                 INTELPllInvalid("m2 out of range\n");
580         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
581                 INTELPllInvalid("m1 out of range\n");
582         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
583                 INTELPllInvalid("m1 <= m2\n");
584         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
585                 INTELPllInvalid("m out of range\n");
586         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
587                 INTELPllInvalid("n out of range\n");
588         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
589                 INTELPllInvalid("vco out of range\n");
590         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
591          * connector, etc., rather than just a single range.
592          */
593         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
594                 INTELPllInvalid("dot out of range\n");
595
596         return true;
597 }
598
599 static bool
600 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
601                     int target, int refclk, intel_clock_t *match_clock,
602                     intel_clock_t *best_clock)
603
604 {
605         struct drm_device *dev = crtc->dev;
606         struct drm_i915_private *dev_priv = dev->dev_private;
607         intel_clock_t clock;
608         int err = target;
609
610         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
611             (I915_READ(LVDS)) != 0) {
612                 /*
613                  * For LVDS, if the panel is on, just rely on its current
614                  * settings for dual-channel.  We haven't figured out how to
615                  * reliably set up different single/dual channel state, if we
616                  * even can.
617                  */
618                 if (is_dual_link_lvds(dev_priv, LVDS))
619                         clock.p2 = limit->p2.p2_fast;
620                 else
621                         clock.p2 = limit->p2.p2_slow;
622         } else {
623                 if (target < limit->p2.dot_limit)
624                         clock.p2 = limit->p2.p2_slow;
625                 else
626                         clock.p2 = limit->p2.p2_fast;
627         }
628
629         memset(best_clock, 0, sizeof(*best_clock));
630
631         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
632              clock.m1++) {
633                 for (clock.m2 = limit->m2.min;
634                      clock.m2 <= limit->m2.max; clock.m2++) {
635                         /* m1 is always 0 in Pineview */
636                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
637                                 break;
638                         for (clock.n = limit->n.min;
639                              clock.n <= limit->n.max; clock.n++) {
640                                 for (clock.p1 = limit->p1.min;
641                                         clock.p1 <= limit->p1.max; clock.p1++) {
642                                         int this_err;
643
644                                         intel_clock(dev, refclk, &clock);
645                                         if (!intel_PLL_is_valid(dev, limit,
646                                                                 &clock))
647                                                 continue;
648                                         if (match_clock &&
649                                             clock.p != match_clock->p)
650                                                 continue;
651
652                                         this_err = abs(clock.dot - target);
653                                         if (this_err < err) {
654                                                 *best_clock = clock;
655                                                 err = this_err;
656                                         }
657                                 }
658                         }
659                 }
660         }
661
662         return (err != target);
663 }
664
665 static bool
666 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
667                         int target, int refclk, intel_clock_t *match_clock,
668                         intel_clock_t *best_clock)
669 {
670         struct drm_device *dev = crtc->dev;
671         struct drm_i915_private *dev_priv = dev->dev_private;
672         intel_clock_t clock;
673         int max_n;
674         bool found;
675         /* approximately equals target * 0.00585 */
676         int err_most = (target >> 8) + (target >> 9);
677         found = false;
678
679         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
680                 int lvds_reg;
681
682                 if (HAS_PCH_SPLIT(dev))
683                         lvds_reg = PCH_LVDS;
684                 else
685                         lvds_reg = LVDS;
686                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
687                     LVDS_CLKB_POWER_UP)
688                         clock.p2 = limit->p2.p2_fast;
689                 else
690                         clock.p2 = limit->p2.p2_slow;
691         } else {
692                 if (target < limit->p2.dot_limit)
693                         clock.p2 = limit->p2.p2_slow;
694                 else
695                         clock.p2 = limit->p2.p2_fast;
696         }
697
698         memset(best_clock, 0, sizeof(*best_clock));
699         max_n = limit->n.max;
700         /* based on hardware requirement, prefer smaller n to precision */
701         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
702                 /* based on hardware requirement, prefere larger m1,m2 */
703                 for (clock.m1 = limit->m1.max;
704                      clock.m1 >= limit->m1.min; clock.m1--) {
705                         for (clock.m2 = limit->m2.max;
706                              clock.m2 >= limit->m2.min; clock.m2--) {
707                                 for (clock.p1 = limit->p1.max;
708                                      clock.p1 >= limit->p1.min; clock.p1--) {
709                                         int this_err;
710
711                                         intel_clock(dev, refclk, &clock);
712                                         if (!intel_PLL_is_valid(dev, limit,
713                                                                 &clock))
714                                                 continue;
715                                         if (match_clock &&
716                                             clock.p != match_clock->p)
717                                                 continue;
718
719                                         this_err = abs(clock.dot - target);
720                                         if (this_err < err_most) {
721                                                 *best_clock = clock;
722                                                 err_most = this_err;
723                                                 max_n = clock.n;
724                                                 found = true;
725                                         }
726                                 }
727                         }
728                 }
729         }
730         return found;
731 }
732
733 static bool
734 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
735                            int target, int refclk, intel_clock_t *match_clock,
736                            intel_clock_t *best_clock)
737 {
738         struct drm_device *dev = crtc->dev;
739         intel_clock_t clock;
740
741         if (target < 200000) {
742                 clock.n = 1;
743                 clock.p1 = 2;
744                 clock.p2 = 10;
745                 clock.m1 = 12;
746                 clock.m2 = 9;
747         } else {
748                 clock.n = 2;
749                 clock.p1 = 1;
750                 clock.p2 = 10;
751                 clock.m1 = 14;
752                 clock.m2 = 8;
753         }
754         intel_clock(dev, refclk, &clock);
755         memcpy(best_clock, &clock, sizeof(intel_clock_t));
756         return true;
757 }
758
759 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
760 static bool
761 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
762                       int target, int refclk, intel_clock_t *match_clock,
763                       intel_clock_t *best_clock)
764 {
765         intel_clock_t clock;
766         if (target < 200000) {
767                 clock.p1 = 2;
768                 clock.p2 = 10;
769                 clock.n = 2;
770                 clock.m1 = 23;
771                 clock.m2 = 8;
772         } else {
773                 clock.p1 = 1;
774                 clock.p2 = 10;
775                 clock.n = 1;
776                 clock.m1 = 14;
777                 clock.m2 = 2;
778         }
779         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
780         clock.p = (clock.p1 * clock.p2);
781         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
782         clock.vco = 0;
783         memcpy(best_clock, &clock, sizeof(intel_clock_t));
784         return true;
785 }
786
787 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
788 {
789         struct drm_i915_private *dev_priv = dev->dev_private;
790         u32 frame, frame_reg = PIPEFRAME(pipe);
791
792         frame = I915_READ(frame_reg);
793
794         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
795                 DRM_DEBUG_KMS("vblank wait timed out\n");
796 }
797
798 /**
799  * intel_wait_for_vblank - wait for vblank on a given pipe
800  * @dev: drm device
801  * @pipe: pipe to wait for
802  *
803  * Wait for vblank to occur on a given pipe.  Needed for various bits of
804  * mode setting code.
805  */
806 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
807 {
808         struct drm_i915_private *dev_priv = dev->dev_private;
809         int pipestat_reg = PIPESTAT(pipe);
810
811         if (INTEL_INFO(dev)->gen >= 5) {
812                 ironlake_wait_for_vblank(dev, pipe);
813                 return;
814         }
815
816         /* Clear existing vblank status. Note this will clear any other
817          * sticky status fields as well.
818          *
819          * This races with i915_driver_irq_handler() with the result
820          * that either function could miss a vblank event.  Here it is not
821          * fatal, as we will either wait upon the next vblank interrupt or
822          * timeout.  Generally speaking intel_wait_for_vblank() is only
823          * called during modeset at which time the GPU should be idle and
824          * should *not* be performing page flips and thus not waiting on
825          * vblanks...
826          * Currently, the result of us stealing a vblank from the irq
827          * handler is that a single frame will be skipped during swapbuffers.
828          */
829         I915_WRITE(pipestat_reg,
830                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
831
832         /* Wait for vblank interrupt bit to set */
833         if (wait_for(I915_READ(pipestat_reg) &
834                      PIPE_VBLANK_INTERRUPT_STATUS,
835                      50))
836                 DRM_DEBUG_KMS("vblank wait timed out\n");
837 }
838
839 /*
840  * intel_wait_for_pipe_off - wait for pipe to turn off
841  * @dev: drm device
842  * @pipe: pipe to wait for
843  *
844  * After disabling a pipe, we can't wait for vblank in the usual way,
845  * spinning on the vblank interrupt status bit, since we won't actually
846  * see an interrupt when the pipe is disabled.
847  *
848  * On Gen4 and above:
849  *   wait for the pipe register state bit to turn off
850  *
851  * Otherwise:
852  *   wait for the display line value to settle (it usually
853  *   ends up stopping at the start of the next frame).
854  *
855  */
856 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
857 {
858         struct drm_i915_private *dev_priv = dev->dev_private;
859
860         if (INTEL_INFO(dev)->gen >= 4) {
861                 int reg = PIPECONF(pipe);
862
863                 /* Wait for the Pipe State to go off */
864                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
865                              100))
866                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
867         } else {
868                 u32 last_line, line_mask;
869                 int reg = PIPEDSL(pipe);
870                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
871
872                 if (IS_GEN2(dev))
873                         line_mask = DSL_LINEMASK_GEN2;
874                 else
875                         line_mask = DSL_LINEMASK_GEN3;
876
877                 /* Wait for the display line to settle */
878                 do {
879                         last_line = I915_READ(reg) & line_mask;
880                         mdelay(5);
881                 } while (((I915_READ(reg) & line_mask) != last_line) &&
882                          time_after(timeout, jiffies));
883                 if (time_after(jiffies, timeout))
884                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
885         }
886 }
887
888 static const char *state_string(bool enabled)
889 {
890         return enabled ? "on" : "off";
891 }
892
893 /* Only for pre-ILK configs */
894 static void assert_pll(struct drm_i915_private *dev_priv,
895                        enum pipe pipe, bool state)
896 {
897         int reg;
898         u32 val;
899         bool cur_state;
900
901         reg = DPLL(pipe);
902         val = I915_READ(reg);
903         cur_state = !!(val & DPLL_VCO_ENABLE);
904         WARN(cur_state != state,
905              "PLL state assertion failure (expected %s, current %s)\n",
906              state_string(state), state_string(cur_state));
907 }
908 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
909 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
910
911 /* For ILK+ */
912 static void assert_pch_pll(struct drm_i915_private *dev_priv,
913                            struct intel_crtc *intel_crtc, bool state)
914 {
915         int reg;
916         u32 val;
917         bool cur_state;
918
919         if (HAS_PCH_LPT(dev_priv->dev)) {
920                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
921                 return;
922         }
923
924         if (!intel_crtc->pch_pll) {
925                 WARN(1, "asserting PCH PLL enabled with no PLL\n");
926                 return;
927         }
928
929         if (HAS_PCH_CPT(dev_priv->dev)) {
930                 u32 pch_dpll;
931
932                 pch_dpll = I915_READ(PCH_DPLL_SEL);
933
934                 /* Make sure the selected PLL is enabled to the transcoder */
935                 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
936                      "transcoder %d PLL not enabled\n", intel_crtc->pipe);
937         }
938
939         reg = intel_crtc->pch_pll->pll_reg;
940         val = I915_READ(reg);
941         cur_state = !!(val & DPLL_VCO_ENABLE);
942         WARN(cur_state != state,
943              "PCH PLL state assertion failure (expected %s, current %s)\n",
944              state_string(state), state_string(cur_state));
945 }
946 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
947 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
948
949 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
950                           enum pipe pipe, bool state)
951 {
952         int reg;
953         u32 val;
954         bool cur_state;
955
956         if (IS_HASWELL(dev_priv->dev)) {
957                 /* On Haswell, DDI is used instead of FDI_TX_CTL */
958                 reg = DDI_FUNC_CTL(pipe);
959                 val = I915_READ(reg);
960                 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
961         } else {
962                 reg = FDI_TX_CTL(pipe);
963                 val = I915_READ(reg);
964                 cur_state = !!(val & FDI_TX_ENABLE);
965         }
966         WARN(cur_state != state,
967              "FDI TX state assertion failure (expected %s, current %s)\n",
968              state_string(state), state_string(cur_state));
969 }
970 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
971 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
972
973 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
974                           enum pipe pipe, bool state)
975 {
976         int reg;
977         u32 val;
978         bool cur_state;
979
980         if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
981                         DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
982                         return;
983         } else {
984                 reg = FDI_RX_CTL(pipe);
985                 val = I915_READ(reg);
986                 cur_state = !!(val & FDI_RX_ENABLE);
987         }
988         WARN(cur_state != state,
989              "FDI RX state assertion failure (expected %s, current %s)\n",
990              state_string(state), state_string(cur_state));
991 }
992 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
993 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
994
995 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
996                                       enum pipe pipe)
997 {
998         int reg;
999         u32 val;
1000
1001         /* ILK FDI PLL is always enabled */
1002         if (dev_priv->info->gen == 5)
1003                 return;
1004
1005         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1006         if (IS_HASWELL(dev_priv->dev))
1007                 return;
1008
1009         reg = FDI_TX_CTL(pipe);
1010         val = I915_READ(reg);
1011         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1012 }
1013
1014 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1015                                       enum pipe pipe)
1016 {
1017         int reg;
1018         u32 val;
1019
1020         if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1021                 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1022                 return;
1023         }
1024         reg = FDI_RX_CTL(pipe);
1025         val = I915_READ(reg);
1026         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1027 }
1028
1029 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1030                                   enum pipe pipe)
1031 {
1032         int pp_reg, lvds_reg;
1033         u32 val;
1034         enum pipe panel_pipe = PIPE_A;
1035         bool locked = true;
1036
1037         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1038                 pp_reg = PCH_PP_CONTROL;
1039                 lvds_reg = PCH_LVDS;
1040         } else {
1041                 pp_reg = PP_CONTROL;
1042                 lvds_reg = LVDS;
1043         }
1044
1045         val = I915_READ(pp_reg);
1046         if (!(val & PANEL_POWER_ON) ||
1047             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1048                 locked = false;
1049
1050         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1051                 panel_pipe = PIPE_B;
1052
1053         WARN(panel_pipe == pipe && locked,
1054              "panel assertion failure, pipe %c regs locked\n",
1055              pipe_name(pipe));
1056 }
1057
1058 void assert_pipe(struct drm_i915_private *dev_priv,
1059                  enum pipe pipe, bool state)
1060 {
1061         int reg;
1062         u32 val;
1063         bool cur_state;
1064
1065         /* if we need the pipe A quirk it must be always on */
1066         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1067                 state = true;
1068
1069         reg = PIPECONF(pipe);
1070         val = I915_READ(reg);
1071         cur_state = !!(val & PIPECONF_ENABLE);
1072         WARN(cur_state != state,
1073              "pipe %c assertion failure (expected %s, current %s)\n",
1074              pipe_name(pipe), state_string(state), state_string(cur_state));
1075 }
1076
1077 static void assert_plane(struct drm_i915_private *dev_priv,
1078                          enum plane plane, bool state)
1079 {
1080         int reg;
1081         u32 val;
1082         bool cur_state;
1083
1084         reg = DSPCNTR(plane);
1085         val = I915_READ(reg);
1086         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1087         WARN(cur_state != state,
1088              "plane %c assertion failure (expected %s, current %s)\n",
1089              plane_name(plane), state_string(state), state_string(cur_state));
1090 }
1091
1092 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1093 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1094
1095 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1096                                    enum pipe pipe)
1097 {
1098         int reg, i;
1099         u32 val;
1100         int cur_pipe;
1101
1102         /* Planes are fixed to pipes on ILK+ */
1103         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1104                 reg = DSPCNTR(pipe);
1105                 val = I915_READ(reg);
1106                 WARN((val & DISPLAY_PLANE_ENABLE),
1107                      "plane %c assertion failure, should be disabled but not\n",
1108                      plane_name(pipe));
1109                 return;
1110         }
1111
1112         /* Need to check both planes against the pipe */
1113         for (i = 0; i < 2; i++) {
1114                 reg = DSPCNTR(i);
1115                 val = I915_READ(reg);
1116                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1117                         DISPPLANE_SEL_PIPE_SHIFT;
1118                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1119                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1120                      plane_name(i), pipe_name(pipe));
1121         }
1122 }
1123
1124 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1125 {
1126         u32 val;
1127         bool enabled;
1128
1129         if (HAS_PCH_LPT(dev_priv->dev)) {
1130                 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1131                 return;
1132         }
1133
1134         val = I915_READ(PCH_DREF_CONTROL);
1135         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1136                             DREF_SUPERSPREAD_SOURCE_MASK));
1137         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1138 }
1139
1140 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1141                                        enum pipe pipe)
1142 {
1143         int reg;
1144         u32 val;
1145         bool enabled;
1146
1147         reg = TRANSCONF(pipe);
1148         val = I915_READ(reg);
1149         enabled = !!(val & TRANS_ENABLE);
1150         WARN(enabled,
1151              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1152              pipe_name(pipe));
1153 }
1154
1155 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1156                             enum pipe pipe, u32 port_sel, u32 val)
1157 {
1158         if ((val & DP_PORT_EN) == 0)
1159                 return false;
1160
1161         if (HAS_PCH_CPT(dev_priv->dev)) {
1162                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1163                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1164                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1165                         return false;
1166         } else {
1167                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1168                         return false;
1169         }
1170         return true;
1171 }
1172
1173 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1174                               enum pipe pipe, u32 val)
1175 {
1176         if ((val & PORT_ENABLE) == 0)
1177                 return false;
1178
1179         if (HAS_PCH_CPT(dev_priv->dev)) {
1180                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1181                         return false;
1182         } else {
1183                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1184                         return false;
1185         }
1186         return true;
1187 }
1188
1189 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1190                               enum pipe pipe, u32 val)
1191 {
1192         if ((val & LVDS_PORT_EN) == 0)
1193                 return false;
1194
1195         if (HAS_PCH_CPT(dev_priv->dev)) {
1196                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1197                         return false;
1198         } else {
1199                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1200                         return false;
1201         }
1202         return true;
1203 }
1204
1205 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1206                               enum pipe pipe, u32 val)
1207 {
1208         if ((val & ADPA_DAC_ENABLE) == 0)
1209                 return false;
1210         if (HAS_PCH_CPT(dev_priv->dev)) {
1211                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1212                         return false;
1213         } else {
1214                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1215                         return false;
1216         }
1217         return true;
1218 }
1219
1220 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1221                                    enum pipe pipe, int reg, u32 port_sel)
1222 {
1223         u32 val = I915_READ(reg);
1224         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1225              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1226              reg, pipe_name(pipe));
1227 }
1228
1229 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1230                                      enum pipe pipe, int reg)
1231 {
1232         u32 val = I915_READ(reg);
1233         WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1234              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1235              reg, pipe_name(pipe));
1236 }
1237
1238 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1239                                       enum pipe pipe)
1240 {
1241         int reg;
1242         u32 val;
1243
1244         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1245         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1246         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1247
1248         reg = PCH_ADPA;
1249         val = I915_READ(reg);
1250         WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1251              "PCH VGA enabled on transcoder %c, should be disabled\n",
1252              pipe_name(pipe));
1253
1254         reg = PCH_LVDS;
1255         val = I915_READ(reg);
1256         WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1257              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1258              pipe_name(pipe));
1259
1260         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1261         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1262         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1263 }
1264
1265 /**
1266  * intel_enable_pll - enable a PLL
1267  * @dev_priv: i915 private structure
1268  * @pipe: pipe PLL to enable
1269  *
1270  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1271  * make sure the PLL reg is writable first though, since the panel write
1272  * protect mechanism may be enabled.
1273  *
1274  * Note!  This is for pre-ILK only.
1275  */
1276 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1277 {
1278         int reg;
1279         u32 val;
1280
1281         /* No really, not for ILK+ */
1282         BUG_ON(dev_priv->info->gen >= 5);
1283
1284         /* PLL is protected by panel, make sure we can write it */
1285         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1286                 assert_panel_unlocked(dev_priv, pipe);
1287
1288         reg = DPLL(pipe);
1289         val = I915_READ(reg);
1290         val |= DPLL_VCO_ENABLE;
1291
1292         /* We do this three times for luck */
1293         I915_WRITE(reg, val);
1294         POSTING_READ(reg);
1295         udelay(150); /* wait for warmup */
1296         I915_WRITE(reg, val);
1297         POSTING_READ(reg);
1298         udelay(150); /* wait for warmup */
1299         I915_WRITE(reg, val);
1300         POSTING_READ(reg);
1301         udelay(150); /* wait for warmup */
1302 }
1303
1304 /**
1305  * intel_disable_pll - disable a PLL
1306  * @dev_priv: i915 private structure
1307  * @pipe: pipe PLL to disable
1308  *
1309  * Disable the PLL for @pipe, making sure the pipe is off first.
1310  *
1311  * Note!  This is for pre-ILK only.
1312  */
1313 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1314 {
1315         int reg;
1316         u32 val;
1317
1318         /* Don't disable pipe A or pipe A PLLs if needed */
1319         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1320                 return;
1321
1322         /* Make sure the pipe isn't still relying on us */
1323         assert_pipe_disabled(dev_priv, pipe);
1324
1325         reg = DPLL(pipe);
1326         val = I915_READ(reg);
1327         val &= ~DPLL_VCO_ENABLE;
1328         I915_WRITE(reg, val);
1329         POSTING_READ(reg);
1330 }
1331
1332 /* SBI access */
1333 static void
1334 intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1335 {
1336         unsigned long flags;
1337
1338         spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1339         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1340                                 100)) {
1341                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1342                 goto out_unlock;
1343         }
1344
1345         I915_WRITE(SBI_ADDR,
1346                         (reg << 16));
1347         I915_WRITE(SBI_DATA,
1348                         value);
1349         I915_WRITE(SBI_CTL_STAT,
1350                         SBI_BUSY |
1351                         SBI_CTL_OP_CRWR);
1352
1353         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1354                                 100)) {
1355                 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1356                 goto out_unlock;
1357         }
1358
1359 out_unlock:
1360         spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1361 }
1362
1363 static u32
1364 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1365 {
1366         unsigned long flags;
1367         u32 value;
1368
1369         spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1370         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1371                                 100)) {
1372                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1373                 goto out_unlock;
1374         }
1375
1376         I915_WRITE(SBI_ADDR,
1377                         (reg << 16));
1378         I915_WRITE(SBI_CTL_STAT,
1379                         SBI_BUSY |
1380                         SBI_CTL_OP_CRRD);
1381
1382         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1383                                 100)) {
1384                 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1385                 goto out_unlock;
1386         }
1387
1388         value = I915_READ(SBI_DATA);
1389
1390 out_unlock:
1391         spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1392         return value;
1393 }
1394
1395 /**
1396  * intel_enable_pch_pll - enable PCH PLL
1397  * @dev_priv: i915 private structure
1398  * @pipe: pipe PLL to enable
1399  *
1400  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1401  * drives the transcoder clock.
1402  */
1403 static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
1404 {
1405         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1406         struct intel_pch_pll *pll;
1407         int reg;
1408         u32 val;
1409
1410         /* PCH PLLs only available on ILK, SNB and IVB */
1411         BUG_ON(dev_priv->info->gen < 5);
1412         pll = intel_crtc->pch_pll;
1413         if (pll == NULL)
1414                 return;
1415
1416         if (WARN_ON(pll->refcount == 0))
1417                 return;
1418
1419         DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1420                       pll->pll_reg, pll->active, pll->on,
1421                       intel_crtc->base.base.id);
1422
1423         /* PCH refclock must be enabled first */
1424         assert_pch_refclk_enabled(dev_priv);
1425
1426         if (pll->active++ && pll->on) {
1427                 assert_pch_pll_enabled(dev_priv, intel_crtc);
1428                 return;
1429         }
1430
1431         DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1432
1433         reg = pll->pll_reg;
1434         val = I915_READ(reg);
1435         val |= DPLL_VCO_ENABLE;
1436         I915_WRITE(reg, val);
1437         POSTING_READ(reg);
1438         udelay(200);
1439
1440         pll->on = true;
1441 }
1442
1443 static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1444 {
1445         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1446         struct intel_pch_pll *pll = intel_crtc->pch_pll;
1447         int reg;
1448         u32 val;
1449
1450         /* PCH only available on ILK+ */
1451         BUG_ON(dev_priv->info->gen < 5);
1452         if (pll == NULL)
1453                return;
1454
1455         if (WARN_ON(pll->refcount == 0))
1456                 return;
1457
1458         DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1459                       pll->pll_reg, pll->active, pll->on,
1460                       intel_crtc->base.base.id);
1461
1462         if (WARN_ON(pll->active == 0)) {
1463                 assert_pch_pll_disabled(dev_priv, intel_crtc);
1464                 return;
1465         }
1466
1467         if (--pll->active) {
1468                 assert_pch_pll_enabled(dev_priv, intel_crtc);
1469                 return;
1470         }
1471
1472         DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1473
1474         /* Make sure transcoder isn't still depending on us */
1475         assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1476
1477         reg = pll->pll_reg;
1478         val = I915_READ(reg);
1479         val &= ~DPLL_VCO_ENABLE;
1480         I915_WRITE(reg, val);
1481         POSTING_READ(reg);
1482         udelay(200);
1483
1484         pll->on = false;
1485 }
1486
1487 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1488                                     enum pipe pipe)
1489 {
1490         int reg;
1491         u32 val, pipeconf_val;
1492         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1493
1494         /* PCH only available on ILK+ */
1495         BUG_ON(dev_priv->info->gen < 5);
1496
1497         /* Make sure PCH DPLL is enabled */
1498         assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
1499
1500         /* FDI must be feeding us bits for PCH ports */
1501         assert_fdi_tx_enabled(dev_priv, pipe);
1502         assert_fdi_rx_enabled(dev_priv, pipe);
1503
1504         if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1505                 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1506                 return;
1507         }
1508         reg = TRANSCONF(pipe);
1509         val = I915_READ(reg);
1510         pipeconf_val = I915_READ(PIPECONF(pipe));
1511
1512         if (HAS_PCH_IBX(dev_priv->dev)) {
1513                 /*
1514                  * make the BPC in transcoder be consistent with
1515                  * that in pipeconf reg.
1516                  */
1517                 val &= ~PIPE_BPC_MASK;
1518                 val |= pipeconf_val & PIPE_BPC_MASK;
1519         }
1520
1521         val &= ~TRANS_INTERLACE_MASK;
1522         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1523                 if (HAS_PCH_IBX(dev_priv->dev) &&
1524                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1525                         val |= TRANS_LEGACY_INTERLACED_ILK;
1526                 else
1527                         val |= TRANS_INTERLACED;
1528         else
1529                 val |= TRANS_PROGRESSIVE;
1530
1531         I915_WRITE(reg, val | TRANS_ENABLE);
1532         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1533                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1534 }
1535
1536 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1537                                      enum pipe pipe)
1538 {
1539         int reg;
1540         u32 val;
1541
1542         /* FDI relies on the transcoder */
1543         assert_fdi_tx_disabled(dev_priv, pipe);
1544         assert_fdi_rx_disabled(dev_priv, pipe);
1545
1546         /* Ports must be off as well */
1547         assert_pch_ports_disabled(dev_priv, pipe);
1548
1549         reg = TRANSCONF(pipe);
1550         val = I915_READ(reg);
1551         val &= ~TRANS_ENABLE;
1552         I915_WRITE(reg, val);
1553         /* wait for PCH transcoder off, transcoder state */
1554         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1555                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1556 }
1557
1558 /**
1559  * intel_enable_pipe - enable a pipe, asserting requirements
1560  * @dev_priv: i915 private structure
1561  * @pipe: pipe to enable
1562  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1563  *
1564  * Enable @pipe, making sure that various hardware specific requirements
1565  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1566  *
1567  * @pipe should be %PIPE_A or %PIPE_B.
1568  *
1569  * Will wait until the pipe is actually running (i.e. first vblank) before
1570  * returning.
1571  */
1572 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1573                               bool pch_port)
1574 {
1575         int reg;
1576         u32 val;
1577
1578         /*
1579          * A pipe without a PLL won't actually be able to drive bits from
1580          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1581          * need the check.
1582          */
1583         if (!HAS_PCH_SPLIT(dev_priv->dev))
1584                 assert_pll_enabled(dev_priv, pipe);
1585         else {
1586                 if (pch_port) {
1587                         /* if driving the PCH, we need FDI enabled */
1588                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1589                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1590                 }
1591                 /* FIXME: assert CPU port conditions for SNB+ */
1592         }
1593
1594         reg = PIPECONF(pipe);
1595         val = I915_READ(reg);
1596         if (val & PIPECONF_ENABLE)
1597                 return;
1598
1599         I915_WRITE(reg, val | PIPECONF_ENABLE);
1600         intel_wait_for_vblank(dev_priv->dev, pipe);
1601 }
1602
1603 /**
1604  * intel_disable_pipe - disable a pipe, asserting requirements
1605  * @dev_priv: i915 private structure
1606  * @pipe: pipe to disable
1607  *
1608  * Disable @pipe, making sure that various hardware specific requirements
1609  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1610  *
1611  * @pipe should be %PIPE_A or %PIPE_B.
1612  *
1613  * Will wait until the pipe has shut down before returning.
1614  */
1615 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1616                                enum pipe pipe)
1617 {
1618         int reg;
1619         u32 val;
1620
1621         /*
1622          * Make sure planes won't keep trying to pump pixels to us,
1623          * or we might hang the display.
1624          */
1625         assert_planes_disabled(dev_priv, pipe);
1626
1627         /* Don't disable pipe A or pipe A PLLs if needed */
1628         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1629                 return;
1630
1631         reg = PIPECONF(pipe);
1632         val = I915_READ(reg);
1633         if ((val & PIPECONF_ENABLE) == 0)
1634                 return;
1635
1636         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1637         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1638 }
1639
1640 /*
1641  * Plane regs are double buffered, going from enabled->disabled needs a
1642  * trigger in order to latch.  The display address reg provides this.
1643  */
1644 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1645                                       enum plane plane)
1646 {
1647         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1648         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1649 }
1650
1651 /**
1652  * intel_enable_plane - enable a display plane on a given pipe
1653  * @dev_priv: i915 private structure
1654  * @plane: plane to enable
1655  * @pipe: pipe being fed
1656  *
1657  * Enable @plane on @pipe, making sure that @pipe is running first.
1658  */
1659 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1660                                enum plane plane, enum pipe pipe)
1661 {
1662         int reg;
1663         u32 val;
1664
1665         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1666         assert_pipe_enabled(dev_priv, pipe);
1667
1668         reg = DSPCNTR(plane);
1669         val = I915_READ(reg);
1670         if (val & DISPLAY_PLANE_ENABLE)
1671                 return;
1672
1673         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1674         intel_flush_display_plane(dev_priv, plane);
1675         intel_wait_for_vblank(dev_priv->dev, pipe);
1676 }
1677
1678 /**
1679  * intel_disable_plane - disable a display plane
1680  * @dev_priv: i915 private structure
1681  * @plane: plane to disable
1682  * @pipe: pipe consuming the data
1683  *
1684  * Disable @plane; should be an independent operation.
1685  */
1686 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1687                                 enum plane plane, enum pipe pipe)
1688 {
1689         int reg;
1690         u32 val;
1691
1692         reg = DSPCNTR(plane);
1693         val = I915_READ(reg);
1694         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1695                 return;
1696
1697         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1698         intel_flush_display_plane(dev_priv, plane);
1699         intel_wait_for_vblank(dev_priv->dev, pipe);
1700 }
1701
1702 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1703                            enum pipe pipe, int reg, u32 port_sel)
1704 {
1705         u32 val = I915_READ(reg);
1706         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1707                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1708                 I915_WRITE(reg, val & ~DP_PORT_EN);
1709         }
1710 }
1711
1712 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1713                              enum pipe pipe, int reg)
1714 {
1715         u32 val = I915_READ(reg);
1716         if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1717                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1718                               reg, pipe);
1719                 I915_WRITE(reg, val & ~PORT_ENABLE);
1720         }
1721 }
1722
1723 /* Disable any ports connected to this transcoder */
1724 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1725                                     enum pipe pipe)
1726 {
1727         u32 reg, val;
1728
1729         val = I915_READ(PCH_PP_CONTROL);
1730         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1731
1732         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1733         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1734         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1735
1736         reg = PCH_ADPA;
1737         val = I915_READ(reg);
1738         if (adpa_pipe_enabled(dev_priv, val, pipe))
1739                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1740
1741         reg = PCH_LVDS;
1742         val = I915_READ(reg);
1743         if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1744                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1745                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1746                 POSTING_READ(reg);
1747                 udelay(100);
1748         }
1749
1750         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1751         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1752         disable_pch_hdmi(dev_priv, pipe, HDMID);
1753 }
1754
1755 int
1756 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1757                            struct drm_i915_gem_object *obj,
1758                            struct intel_ring_buffer *pipelined)
1759 {
1760         struct drm_i915_private *dev_priv = dev->dev_private;
1761         u32 alignment;
1762         int ret;
1763
1764         switch (obj->tiling_mode) {
1765         case I915_TILING_NONE:
1766                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1767                         alignment = 128 * 1024;
1768                 else if (INTEL_INFO(dev)->gen >= 4)
1769                         alignment = 4 * 1024;
1770                 else
1771                         alignment = 64 * 1024;
1772                 break;
1773         case I915_TILING_X:
1774                 /* pin() will align the object as required by fence */
1775                 alignment = 0;
1776                 break;
1777         case I915_TILING_Y:
1778                 /* FIXME: Is this true? */
1779                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1780                 return -EINVAL;
1781         default:
1782                 BUG();
1783         }
1784
1785         dev_priv->mm.interruptible = false;
1786         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1787         if (ret)
1788                 goto err_interruptible;
1789
1790         /* Install a fence for tiled scan-out. Pre-i965 always needs a
1791          * fence, whereas 965+ only requires a fence if using
1792          * framebuffer compression.  For simplicity, we always install
1793          * a fence as the cost is not that onerous.
1794          */
1795         ret = i915_gem_object_get_fence(obj);
1796         if (ret)
1797                 goto err_unpin;
1798
1799         i915_gem_object_pin_fence(obj);
1800
1801         dev_priv->mm.interruptible = true;
1802         return 0;
1803
1804 err_unpin:
1805         i915_gem_object_unpin(obj);
1806 err_interruptible:
1807         dev_priv->mm.interruptible = true;
1808         return ret;
1809 }
1810
1811 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1812 {
1813         i915_gem_object_unpin_fence(obj);
1814         i915_gem_object_unpin(obj);
1815 }
1816
1817 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1818                              int x, int y)
1819 {
1820         struct drm_device *dev = crtc->dev;
1821         struct drm_i915_private *dev_priv = dev->dev_private;
1822         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1823         struct intel_framebuffer *intel_fb;
1824         struct drm_i915_gem_object *obj;
1825         int plane = intel_crtc->plane;
1826         unsigned long Start, Offset;
1827         u32 dspcntr;
1828         u32 reg;
1829
1830         switch (plane) {
1831         case 0:
1832         case 1:
1833                 break;
1834         default:
1835                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1836                 return -EINVAL;
1837         }
1838
1839         intel_fb = to_intel_framebuffer(fb);
1840         obj = intel_fb->obj;
1841
1842         reg = DSPCNTR(plane);
1843         dspcntr = I915_READ(reg);
1844         /* Mask out pixel format bits in case we change it */
1845         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1846         switch (fb->bits_per_pixel) {
1847         case 8:
1848                 dspcntr |= DISPPLANE_8BPP;
1849                 break;
1850         case 16:
1851                 if (fb->depth == 15)
1852                         dspcntr |= DISPPLANE_15_16BPP;
1853                 else
1854                         dspcntr |= DISPPLANE_16BPP;
1855                 break;
1856         case 24:
1857         case 32:
1858                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1859                 break;
1860         default:
1861                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1862                 return -EINVAL;
1863         }
1864         if (INTEL_INFO(dev)->gen >= 4) {
1865                 if (obj->tiling_mode != I915_TILING_NONE)
1866                         dspcntr |= DISPPLANE_TILED;
1867                 else
1868                         dspcntr &= ~DISPPLANE_TILED;
1869         }
1870
1871         I915_WRITE(reg, dspcntr);
1872
1873         Start = obj->gtt_offset;
1874         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1875
1876         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1877                       Start, Offset, x, y, fb->pitches[0]);
1878         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1879         if (INTEL_INFO(dev)->gen >= 4) {
1880                 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1881                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1882                 I915_WRITE(DSPADDR(plane), Offset);
1883         } else
1884                 I915_WRITE(DSPADDR(plane), Start + Offset);
1885         POSTING_READ(reg);
1886
1887         return 0;
1888 }
1889
1890 static int ironlake_update_plane(struct drm_crtc *crtc,
1891                                  struct drm_framebuffer *fb, int x, int y)
1892 {
1893         struct drm_device *dev = crtc->dev;
1894         struct drm_i915_private *dev_priv = dev->dev_private;
1895         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1896         struct intel_framebuffer *intel_fb;
1897         struct drm_i915_gem_object *obj;
1898         int plane = intel_crtc->plane;
1899         unsigned long Start, Offset;
1900         u32 dspcntr;
1901         u32 reg;
1902
1903         switch (plane) {
1904         case 0:
1905         case 1:
1906         case 2:
1907                 break;
1908         default:
1909                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1910                 return -EINVAL;
1911         }
1912
1913         intel_fb = to_intel_framebuffer(fb);
1914         obj = intel_fb->obj;
1915
1916         reg = DSPCNTR(plane);
1917         dspcntr = I915_READ(reg);
1918         /* Mask out pixel format bits in case we change it */
1919         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1920         switch (fb->bits_per_pixel) {
1921         case 8:
1922                 dspcntr |= DISPPLANE_8BPP;
1923                 break;
1924         case 16:
1925                 if (fb->depth != 16)
1926                         return -EINVAL;
1927
1928                 dspcntr |= DISPPLANE_16BPP;
1929                 break;
1930         case 24:
1931         case 32:
1932                 if (fb->depth == 24)
1933                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1934                 else if (fb->depth == 30)
1935                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1936                 else
1937                         return -EINVAL;
1938                 break;
1939         default:
1940                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1941                 return -EINVAL;
1942         }
1943
1944         if (obj->tiling_mode != I915_TILING_NONE)
1945                 dspcntr |= DISPPLANE_TILED;
1946         else
1947                 dspcntr &= ~DISPPLANE_TILED;
1948
1949         /* must disable */
1950         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1951
1952         I915_WRITE(reg, dspcntr);
1953
1954         Start = obj->gtt_offset;
1955         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1956
1957         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1958                       Start, Offset, x, y, fb->pitches[0]);
1959         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1960         I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
1961         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1962         I915_WRITE(DSPADDR(plane), Offset);
1963         POSTING_READ(reg);
1964
1965         return 0;
1966 }
1967
1968 /* Assume fb object is pinned & idle & fenced and just update base pointers */
1969 static int
1970 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1971                            int x, int y, enum mode_set_atomic state)
1972 {
1973         struct drm_device *dev = crtc->dev;
1974         struct drm_i915_private *dev_priv = dev->dev_private;
1975
1976         if (dev_priv->display.disable_fbc)
1977                 dev_priv->display.disable_fbc(dev);
1978         intel_increase_pllclock(crtc);
1979
1980         return dev_priv->display.update_plane(crtc, fb, x, y);
1981 }
1982
1983 static int
1984 intel_finish_fb(struct drm_framebuffer *old_fb)
1985 {
1986         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1987         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1988         bool was_interruptible = dev_priv->mm.interruptible;
1989         int ret;
1990
1991         wait_event(dev_priv->pending_flip_queue,
1992                    atomic_read(&dev_priv->mm.wedged) ||
1993                    atomic_read(&obj->pending_flip) == 0);
1994
1995         /* Big Hammer, we also need to ensure that any pending
1996          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1997          * current scanout is retired before unpinning the old
1998          * framebuffer.
1999          *
2000          * This should only fail upon a hung GPU, in which case we
2001          * can safely continue.
2002          */
2003         dev_priv->mm.interruptible = false;
2004         ret = i915_gem_object_finish_gpu(obj);
2005         dev_priv->mm.interruptible = was_interruptible;
2006
2007         return ret;
2008 }
2009
2010 static int
2011 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2012                     struct drm_framebuffer *old_fb)
2013 {
2014         struct drm_device *dev = crtc->dev;
2015         struct drm_i915_private *dev_priv = dev->dev_private;
2016         struct drm_i915_master_private *master_priv;
2017         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2018         int ret;
2019
2020         /* no fb bound */
2021         if (!crtc->fb) {
2022                 DRM_ERROR("No FB bound\n");
2023                 return 0;
2024         }
2025
2026         if(intel_crtc->plane > dev_priv->num_pipe) {
2027                 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2028                                 intel_crtc->plane,
2029                                 dev_priv->num_pipe);
2030                 return -EINVAL;
2031         }
2032
2033         mutex_lock(&dev->struct_mutex);
2034         ret = intel_pin_and_fence_fb_obj(dev,
2035                                          to_intel_framebuffer(crtc->fb)->obj,
2036                                          NULL);
2037         if (ret != 0) {
2038                 mutex_unlock(&dev->struct_mutex);
2039                 DRM_ERROR("pin & fence failed\n");
2040                 return ret;
2041         }
2042
2043         if (old_fb)
2044                 intel_finish_fb(old_fb);
2045
2046         ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
2047         if (ret) {
2048                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2049                 mutex_unlock(&dev->struct_mutex);
2050                 DRM_ERROR("failed to update base address\n");
2051                 return ret;
2052         }
2053
2054         if (old_fb) {
2055                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2056                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2057         }
2058
2059         intel_update_fbc(dev);
2060         mutex_unlock(&dev->struct_mutex);
2061
2062         if (!dev->primary->master)
2063                 return 0;
2064
2065         master_priv = dev->primary->master->driver_priv;
2066         if (!master_priv->sarea_priv)
2067                 return 0;
2068
2069         if (intel_crtc->pipe) {
2070                 master_priv->sarea_priv->pipeB_x = x;
2071                 master_priv->sarea_priv->pipeB_y = y;
2072         } else {
2073                 master_priv->sarea_priv->pipeA_x = x;
2074                 master_priv->sarea_priv->pipeA_y = y;
2075         }
2076
2077         return 0;
2078 }
2079
2080 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2081 {
2082         struct drm_device *dev = crtc->dev;
2083         struct drm_i915_private *dev_priv = dev->dev_private;
2084         u32 dpa_ctl;
2085
2086         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2087         dpa_ctl = I915_READ(DP_A);
2088         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2089
2090         if (clock < 200000) {
2091                 u32 temp;
2092                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2093                 /* workaround for 160Mhz:
2094                    1) program 0x4600c bits 15:0 = 0x8124
2095                    2) program 0x46010 bit 0 = 1
2096                    3) program 0x46034 bit 24 = 1
2097                    4) program 0x64000 bit 14 = 1
2098                    */
2099                 temp = I915_READ(0x4600c);
2100                 temp &= 0xffff0000;
2101                 I915_WRITE(0x4600c, temp | 0x8124);
2102
2103                 temp = I915_READ(0x46010);
2104                 I915_WRITE(0x46010, temp | 1);
2105
2106                 temp = I915_READ(0x46034);
2107                 I915_WRITE(0x46034, temp | (1 << 24));
2108         } else {
2109                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2110         }
2111         I915_WRITE(DP_A, dpa_ctl);
2112
2113         POSTING_READ(DP_A);
2114         udelay(500);
2115 }
2116
2117 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2118 {
2119         struct drm_device *dev = crtc->dev;
2120         struct drm_i915_private *dev_priv = dev->dev_private;
2121         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2122         int pipe = intel_crtc->pipe;
2123         u32 reg, temp;
2124
2125         /* enable normal train */
2126         reg = FDI_TX_CTL(pipe);
2127         temp = I915_READ(reg);
2128         if (IS_IVYBRIDGE(dev)) {
2129                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2130                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2131         } else {
2132                 temp &= ~FDI_LINK_TRAIN_NONE;
2133                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2134         }
2135         I915_WRITE(reg, temp);
2136
2137         reg = FDI_RX_CTL(pipe);
2138         temp = I915_READ(reg);
2139         if (HAS_PCH_CPT(dev)) {
2140                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2141                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2142         } else {
2143                 temp &= ~FDI_LINK_TRAIN_NONE;
2144                 temp |= FDI_LINK_TRAIN_NONE;
2145         }
2146         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2147
2148         /* wait one idle pattern time */
2149         POSTING_READ(reg);
2150         udelay(1000);
2151
2152         /* IVB wants error correction enabled */
2153         if (IS_IVYBRIDGE(dev))
2154                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2155                            FDI_FE_ERRC_ENABLE);
2156 }
2157
2158 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2159 {
2160         struct drm_i915_private *dev_priv = dev->dev_private;
2161         u32 flags = I915_READ(SOUTH_CHICKEN1);
2162
2163         flags |= FDI_PHASE_SYNC_OVR(pipe);
2164         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2165         flags |= FDI_PHASE_SYNC_EN(pipe);
2166         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2167         POSTING_READ(SOUTH_CHICKEN1);
2168 }
2169
2170 /* The FDI link training functions for ILK/Ibexpeak. */
2171 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2172 {
2173         struct drm_device *dev = crtc->dev;
2174         struct drm_i915_private *dev_priv = dev->dev_private;
2175         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2176         int pipe = intel_crtc->pipe;
2177         int plane = intel_crtc->plane;
2178         u32 reg, temp, tries;
2179
2180         /* FDI needs bits from pipe & plane first */
2181         assert_pipe_enabled(dev_priv, pipe);
2182         assert_plane_enabled(dev_priv, plane);
2183
2184         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2185            for train result */
2186         reg = FDI_RX_IMR(pipe);
2187         temp = I915_READ(reg);
2188         temp &= ~FDI_RX_SYMBOL_LOCK;
2189         temp &= ~FDI_RX_BIT_LOCK;
2190         I915_WRITE(reg, temp);
2191         I915_READ(reg);
2192         udelay(150);
2193
2194         /* enable CPU FDI TX and PCH FDI RX */
2195         reg = FDI_TX_CTL(pipe);
2196         temp = I915_READ(reg);
2197         temp &= ~(7 << 19);
2198         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2199         temp &= ~FDI_LINK_TRAIN_NONE;
2200         temp |= FDI_LINK_TRAIN_PATTERN_1;
2201         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2202
2203         reg = FDI_RX_CTL(pipe);
2204         temp = I915_READ(reg);
2205         temp &= ~FDI_LINK_TRAIN_NONE;
2206         temp |= FDI_LINK_TRAIN_PATTERN_1;
2207         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2208
2209         POSTING_READ(reg);
2210         udelay(150);
2211
2212         /* Ironlake workaround, enable clock pointer after FDI enable*/
2213         if (HAS_PCH_IBX(dev)) {
2214                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2215                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2216                            FDI_RX_PHASE_SYNC_POINTER_EN);
2217         }
2218
2219         reg = FDI_RX_IIR(pipe);
2220         for (tries = 0; tries < 5; tries++) {
2221                 temp = I915_READ(reg);
2222                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2223
2224                 if ((temp & FDI_RX_BIT_LOCK)) {
2225                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2226                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2227                         break;
2228                 }
2229         }
2230         if (tries == 5)
2231                 DRM_ERROR("FDI train 1 fail!\n");
2232
2233         /* Train 2 */
2234         reg = FDI_TX_CTL(pipe);
2235         temp = I915_READ(reg);
2236         temp &= ~FDI_LINK_TRAIN_NONE;
2237         temp |= FDI_LINK_TRAIN_PATTERN_2;
2238         I915_WRITE(reg, temp);
2239
2240         reg = FDI_RX_CTL(pipe);
2241         temp = I915_READ(reg);
2242         temp &= ~FDI_LINK_TRAIN_NONE;
2243         temp |= FDI_LINK_TRAIN_PATTERN_2;
2244         I915_WRITE(reg, temp);
2245
2246         POSTING_READ(reg);
2247         udelay(150);
2248
2249         reg = FDI_RX_IIR(pipe);
2250         for (tries = 0; tries < 5; tries++) {
2251                 temp = I915_READ(reg);
2252                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2253
2254                 if (temp & FDI_RX_SYMBOL_LOCK) {
2255                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2256                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2257                         break;
2258                 }
2259         }
2260         if (tries == 5)
2261                 DRM_ERROR("FDI train 2 fail!\n");
2262
2263         DRM_DEBUG_KMS("FDI train done\n");
2264
2265 }
2266
2267 static const int snb_b_fdi_train_param[] = {
2268         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2269         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2270         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2271         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2272 };
2273
2274 /* The FDI link training functions for SNB/Cougarpoint. */
2275 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2276 {
2277         struct drm_device *dev = crtc->dev;
2278         struct drm_i915_private *dev_priv = dev->dev_private;
2279         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2280         int pipe = intel_crtc->pipe;
2281         u32 reg, temp, i, retry;
2282
2283         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2284            for train result */
2285         reg = FDI_RX_IMR(pipe);
2286         temp = I915_READ(reg);
2287         temp &= ~FDI_RX_SYMBOL_LOCK;
2288         temp &= ~FDI_RX_BIT_LOCK;
2289         I915_WRITE(reg, temp);
2290
2291         POSTING_READ(reg);
2292         udelay(150);
2293
2294         /* enable CPU FDI TX and PCH FDI RX */
2295         reg = FDI_TX_CTL(pipe);
2296         temp = I915_READ(reg);
2297         temp &= ~(7 << 19);
2298         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2299         temp &= ~FDI_LINK_TRAIN_NONE;
2300         temp |= FDI_LINK_TRAIN_PATTERN_1;
2301         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2302         /* SNB-B */
2303         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2304         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2305
2306         reg = FDI_RX_CTL(pipe);
2307         temp = I915_READ(reg);
2308         if (HAS_PCH_CPT(dev)) {
2309                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2310                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2311         } else {
2312                 temp &= ~FDI_LINK_TRAIN_NONE;
2313                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2314         }
2315         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2316
2317         POSTING_READ(reg);
2318         udelay(150);
2319
2320         if (HAS_PCH_CPT(dev))
2321                 cpt_phase_pointer_enable(dev, pipe);
2322
2323         for (i = 0; i < 4; i++) {
2324                 reg = FDI_TX_CTL(pipe);
2325                 temp = I915_READ(reg);
2326                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2327                 temp |= snb_b_fdi_train_param[i];
2328                 I915_WRITE(reg, temp);
2329
2330                 POSTING_READ(reg);
2331                 udelay(500);
2332
2333                 for (retry = 0; retry < 5; retry++) {
2334                         reg = FDI_RX_IIR(pipe);
2335                         temp = I915_READ(reg);
2336                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2337                         if (temp & FDI_RX_BIT_LOCK) {
2338                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2339                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2340                                 break;
2341                         }
2342                         udelay(50);
2343                 }
2344                 if (retry < 5)
2345                         break;
2346         }
2347         if (i == 4)
2348                 DRM_ERROR("FDI train 1 fail!\n");
2349
2350         /* Train 2 */
2351         reg = FDI_TX_CTL(pipe);
2352         temp = I915_READ(reg);
2353         temp &= ~FDI_LINK_TRAIN_NONE;
2354         temp |= FDI_LINK_TRAIN_PATTERN_2;
2355         if (IS_GEN6(dev)) {
2356                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2357                 /* SNB-B */
2358                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2359         }
2360         I915_WRITE(reg, temp);
2361
2362         reg = FDI_RX_CTL(pipe);
2363         temp = I915_READ(reg);
2364         if (HAS_PCH_CPT(dev)) {
2365                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2366                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2367         } else {
2368                 temp &= ~FDI_LINK_TRAIN_NONE;
2369                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2370         }
2371         I915_WRITE(reg, temp);
2372
2373         POSTING_READ(reg);
2374         udelay(150);
2375
2376         for (i = 0; i < 4; i++) {
2377                 reg = FDI_TX_CTL(pipe);
2378                 temp = I915_READ(reg);
2379                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2380                 temp |= snb_b_fdi_train_param[i];
2381                 I915_WRITE(reg, temp);
2382
2383                 POSTING_READ(reg);
2384                 udelay(500);
2385
2386                 for (retry = 0; retry < 5; retry++) {
2387                         reg = FDI_RX_IIR(pipe);
2388                         temp = I915_READ(reg);
2389                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2390                         if (temp & FDI_RX_SYMBOL_LOCK) {
2391                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2392                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2393                                 break;
2394                         }
2395                         udelay(50);
2396                 }
2397                 if (retry < 5)
2398                         break;
2399         }
2400         if (i == 4)
2401                 DRM_ERROR("FDI train 2 fail!\n");
2402
2403         DRM_DEBUG_KMS("FDI train done.\n");
2404 }
2405
2406 /* Manual link training for Ivy Bridge A0 parts */
2407 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2408 {
2409         struct drm_device *dev = crtc->dev;
2410         struct drm_i915_private *dev_priv = dev->dev_private;
2411         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2412         int pipe = intel_crtc->pipe;
2413         u32 reg, temp, i;
2414
2415         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2416            for train result */
2417         reg = FDI_RX_IMR(pipe);
2418         temp = I915_READ(reg);
2419         temp &= ~FDI_RX_SYMBOL_LOCK;
2420         temp &= ~FDI_RX_BIT_LOCK;
2421         I915_WRITE(reg, temp);
2422
2423         POSTING_READ(reg);
2424         udelay(150);
2425
2426         /* enable CPU FDI TX and PCH FDI RX */
2427         reg = FDI_TX_CTL(pipe);
2428         temp = I915_READ(reg);
2429         temp &= ~(7 << 19);
2430         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2431         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2432         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2433         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2434         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2435         temp |= FDI_COMPOSITE_SYNC;
2436         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2437
2438         reg = FDI_RX_CTL(pipe);
2439         temp = I915_READ(reg);
2440         temp &= ~FDI_LINK_TRAIN_AUTO;
2441         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2442         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2443         temp |= FDI_COMPOSITE_SYNC;
2444         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2445
2446         POSTING_READ(reg);
2447         udelay(150);
2448
2449         if (HAS_PCH_CPT(dev))
2450                 cpt_phase_pointer_enable(dev, pipe);
2451
2452         for (i = 0; i < 4; i++) {
2453                 reg = FDI_TX_CTL(pipe);
2454                 temp = I915_READ(reg);
2455                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2456                 temp |= snb_b_fdi_train_param[i];
2457                 I915_WRITE(reg, temp);
2458
2459                 POSTING_READ(reg);
2460                 udelay(500);
2461
2462                 reg = FDI_RX_IIR(pipe);
2463                 temp = I915_READ(reg);
2464                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2465
2466                 if (temp & FDI_RX_BIT_LOCK ||
2467                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2468                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2469                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2470                         break;
2471                 }
2472         }
2473         if (i == 4)
2474                 DRM_ERROR("FDI train 1 fail!\n");
2475
2476         /* Train 2 */
2477         reg = FDI_TX_CTL(pipe);
2478         temp = I915_READ(reg);
2479         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2480         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2481         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2482         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2483         I915_WRITE(reg, temp);
2484
2485         reg = FDI_RX_CTL(pipe);
2486         temp = I915_READ(reg);
2487         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2488         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2489         I915_WRITE(reg, temp);
2490
2491         POSTING_READ(reg);
2492         udelay(150);
2493
2494         for (i = 0; i < 4; i++) {
2495                 reg = FDI_TX_CTL(pipe);
2496                 temp = I915_READ(reg);
2497                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2498                 temp |= snb_b_fdi_train_param[i];
2499                 I915_WRITE(reg, temp);
2500
2501                 POSTING_READ(reg);
2502                 udelay(500);
2503
2504                 reg = FDI_RX_IIR(pipe);
2505                 temp = I915_READ(reg);
2506                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2507
2508                 if (temp & FDI_RX_SYMBOL_LOCK) {
2509                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2510                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2511                         break;
2512                 }
2513         }
2514         if (i == 4)
2515                 DRM_ERROR("FDI train 2 fail!\n");
2516
2517         DRM_DEBUG_KMS("FDI train done.\n");
2518 }
2519
2520 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2521 {
2522         struct drm_device *dev = crtc->dev;
2523         struct drm_i915_private *dev_priv = dev->dev_private;
2524         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2525         int pipe = intel_crtc->pipe;
2526         u32 reg, temp;
2527
2528         /* Write the TU size bits so error detection works */
2529         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2530                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2531
2532         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2533         reg = FDI_RX_CTL(pipe);
2534         temp = I915_READ(reg);
2535         temp &= ~((0x7 << 19) | (0x7 << 16));
2536         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2537         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2538         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2539
2540         POSTING_READ(reg);
2541         udelay(200);
2542
2543         /* Switch from Rawclk to PCDclk */
2544         temp = I915_READ(reg);
2545         I915_WRITE(reg, temp | FDI_PCDCLK);
2546
2547         POSTING_READ(reg);
2548         udelay(200);
2549
2550         /* On Haswell, the PLL configuration for ports and pipes is handled
2551          * separately, as part of DDI setup */
2552         if (!IS_HASWELL(dev)) {
2553                 /* Enable CPU FDI TX PLL, always on for Ironlake */
2554                 reg = FDI_TX_CTL(pipe);
2555                 temp = I915_READ(reg);
2556                 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2557                         I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2558
2559                         POSTING_READ(reg);
2560                         udelay(100);
2561                 }
2562         }
2563 }
2564
2565 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2566 {
2567         struct drm_i915_private *dev_priv = dev->dev_private;
2568         u32 flags = I915_READ(SOUTH_CHICKEN1);
2569
2570         flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2571         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2572         flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2573         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2574         POSTING_READ(SOUTH_CHICKEN1);
2575 }
2576 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2577 {
2578         struct drm_device *dev = crtc->dev;
2579         struct drm_i915_private *dev_priv = dev->dev_private;
2580         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2581         int pipe = intel_crtc->pipe;
2582         u32 reg, temp;
2583
2584         /* disable CPU FDI tx and PCH FDI rx */
2585         reg = FDI_TX_CTL(pipe);
2586         temp = I915_READ(reg);
2587         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2588         POSTING_READ(reg);
2589
2590         reg = FDI_RX_CTL(pipe);
2591         temp = I915_READ(reg);
2592         temp &= ~(0x7 << 16);
2593         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2594         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2595
2596         POSTING_READ(reg);
2597         udelay(100);
2598
2599         /* Ironlake workaround, disable clock pointer after downing FDI */
2600         if (HAS_PCH_IBX(dev)) {
2601                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2602                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2603                            I915_READ(FDI_RX_CHICKEN(pipe) &
2604                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2605         } else if (HAS_PCH_CPT(dev)) {
2606                 cpt_phase_pointer_disable(dev, pipe);
2607         }
2608
2609         /* still set train pattern 1 */
2610         reg = FDI_TX_CTL(pipe);
2611         temp = I915_READ(reg);
2612         temp &= ~FDI_LINK_TRAIN_NONE;
2613         temp |= FDI_LINK_TRAIN_PATTERN_1;
2614         I915_WRITE(reg, temp);
2615
2616         reg = FDI_RX_CTL(pipe);
2617         temp = I915_READ(reg);
2618         if (HAS_PCH_CPT(dev)) {
2619                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2620                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2621         } else {
2622                 temp &= ~FDI_LINK_TRAIN_NONE;
2623                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2624         }
2625         /* BPC in FDI rx is consistent with that in PIPECONF */
2626         temp &= ~(0x07 << 16);
2627         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2628         I915_WRITE(reg, temp);
2629
2630         POSTING_READ(reg);
2631         udelay(100);
2632 }
2633
2634 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2635 {
2636         struct drm_device *dev = crtc->dev;
2637
2638         if (crtc->fb == NULL)
2639                 return;
2640
2641         mutex_lock(&dev->struct_mutex);
2642         intel_finish_fb(crtc->fb);
2643         mutex_unlock(&dev->struct_mutex);
2644 }
2645
2646 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2647 {
2648         struct drm_device *dev = crtc->dev;
2649         struct drm_mode_config *mode_config = &dev->mode_config;
2650         struct intel_encoder *encoder;
2651
2652         /*
2653          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2654          * must be driven by its own crtc; no sharing is possible.
2655          */
2656         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2657                 if (encoder->base.crtc != crtc)
2658                         continue;
2659
2660                 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2661                  * CPU handles all others */
2662                 if (IS_HASWELL(dev)) {
2663                         /* It is still unclear how this will work on PPT, so throw up a warning */
2664                         WARN_ON(!HAS_PCH_LPT(dev));
2665
2666                         if (encoder->type == DRM_MODE_ENCODER_DAC) {
2667                                 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2668                                 return true;
2669                         } else {
2670                                 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2671                                                 encoder->type);
2672                                 return false;
2673                         }
2674                 }
2675
2676                 switch (encoder->type) {
2677                 case INTEL_OUTPUT_EDP:
2678                         if (!intel_encoder_is_pch_edp(&encoder->base))
2679                                 return false;
2680                         continue;
2681                 }
2682         }
2683
2684         return true;
2685 }
2686
2687 /* Program iCLKIP clock to the desired frequency */
2688 static void lpt_program_iclkip(struct drm_crtc *crtc)
2689 {
2690         struct drm_device *dev = crtc->dev;
2691         struct drm_i915_private *dev_priv = dev->dev_private;
2692         u32 divsel, phaseinc, auxdiv, phasedir = 0;
2693         u32 temp;
2694
2695         /* It is necessary to ungate the pixclk gate prior to programming
2696          * the divisors, and gate it back when it is done.
2697          */
2698         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2699
2700         /* Disable SSCCTL */
2701         intel_sbi_write(dev_priv, SBI_SSCCTL6,
2702                                 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2703                                         SBI_SSCCTL_DISABLE);
2704
2705         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2706         if (crtc->mode.clock == 20000) {
2707                 auxdiv = 1;
2708                 divsel = 0x41;
2709                 phaseinc = 0x20;
2710         } else {
2711                 /* The iCLK virtual clock root frequency is in MHz,
2712                  * but the crtc->mode.clock in in KHz. To get the divisors,
2713                  * it is necessary to divide one by another, so we
2714                  * convert the virtual clock precision to KHz here for higher
2715                  * precision.
2716                  */
2717                 u32 iclk_virtual_root_freq = 172800 * 1000;
2718                 u32 iclk_pi_range = 64;
2719                 u32 desired_divisor, msb_divisor_value, pi_value;
2720
2721                 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2722                 msb_divisor_value = desired_divisor / iclk_pi_range;
2723                 pi_value = desired_divisor % iclk_pi_range;
2724
2725                 auxdiv = 0;
2726                 divsel = msb_divisor_value - 2;
2727                 phaseinc = pi_value;
2728         }
2729
2730         /* This should not happen with any sane values */
2731         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2732                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2733         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2734                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2735
2736         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2737                         crtc->mode.clock,
2738                         auxdiv,
2739                         divsel,
2740                         phasedir,
2741                         phaseinc);
2742
2743         /* Program SSCDIVINTPHASE6 */
2744         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2745         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2746         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2747         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2748         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2749         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2750         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2751
2752         intel_sbi_write(dev_priv,
2753                         SBI_SSCDIVINTPHASE6,
2754                         temp);
2755
2756         /* Program SSCAUXDIV */
2757         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2758         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2759         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2760         intel_sbi_write(dev_priv,
2761                         SBI_SSCAUXDIV6,
2762                         temp);
2763
2764
2765         /* Enable modulator and associated divider */
2766         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2767         temp &= ~SBI_SSCCTL_DISABLE;
2768         intel_sbi_write(dev_priv,
2769                         SBI_SSCCTL6,
2770                         temp);
2771
2772         /* Wait for initialization time */
2773         udelay(24);
2774
2775         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2776 }
2777
2778 /*
2779  * Enable PCH resources required for PCH ports:
2780  *   - PCH PLLs
2781  *   - FDI training & RX/TX
2782  *   - update transcoder timings
2783  *   - DP transcoding bits
2784  *   - transcoder
2785  */
2786 static void ironlake_pch_enable(struct drm_crtc *crtc)
2787 {
2788         struct drm_device *dev = crtc->dev;
2789         struct drm_i915_private *dev_priv = dev->dev_private;
2790         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2791         int pipe = intel_crtc->pipe;
2792         u32 reg, temp;
2793
2794         assert_transcoder_disabled(dev_priv, pipe);
2795
2796         /* For PCH output, training FDI link */
2797         dev_priv->display.fdi_link_train(crtc);
2798
2799         intel_enable_pch_pll(intel_crtc);
2800
2801         if (HAS_PCH_LPT(dev)) {
2802                 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2803                 lpt_program_iclkip(crtc);
2804         } else if (HAS_PCH_CPT(dev)) {
2805                 u32 sel;
2806
2807                 temp = I915_READ(PCH_DPLL_SEL);
2808                 switch (pipe) {
2809                 default:
2810                 case 0:
2811                         temp |= TRANSA_DPLL_ENABLE;
2812                         sel = TRANSA_DPLLB_SEL;
2813                         break;
2814                 case 1:
2815                         temp |= TRANSB_DPLL_ENABLE;
2816                         sel = TRANSB_DPLLB_SEL;
2817                         break;
2818                 case 2:
2819                         temp |= TRANSC_DPLL_ENABLE;
2820                         sel = TRANSC_DPLLB_SEL;
2821                         break;
2822                 }
2823                 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2824                         temp |= sel;
2825                 else
2826                         temp &= ~sel;
2827                 I915_WRITE(PCH_DPLL_SEL, temp);
2828         }
2829
2830         /* set transcoder timing, panel must allow it */
2831         assert_panel_unlocked(dev_priv, pipe);
2832         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2833         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2834         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2835
2836         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2837         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2838         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2839         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2840
2841         if (!IS_HASWELL(dev))
2842                 intel_fdi_normal_train(crtc);
2843
2844         /* For PCH DP, enable TRANS_DP_CTL */
2845         if (HAS_PCH_CPT(dev) &&
2846             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2847              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2848                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2849                 reg = TRANS_DP_CTL(pipe);
2850                 temp = I915_READ(reg);
2851                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2852                           TRANS_DP_SYNC_MASK |
2853                           TRANS_DP_BPC_MASK);
2854                 temp |= (TRANS_DP_OUTPUT_ENABLE |
2855                          TRANS_DP_ENH_FRAMING);
2856                 temp |= bpc << 9; /* same format but at 11:9 */
2857
2858                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2859                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2860                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2861                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2862
2863                 switch (intel_trans_dp_port_sel(crtc)) {
2864                 case PCH_DP_B:
2865                         temp |= TRANS_DP_PORT_SEL_B;
2866                         break;
2867                 case PCH_DP_C:
2868                         temp |= TRANS_DP_PORT_SEL_C;
2869                         break;
2870                 case PCH_DP_D:
2871                         temp |= TRANS_DP_PORT_SEL_D;
2872                         break;
2873                 default:
2874                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2875                         temp |= TRANS_DP_PORT_SEL_B;
2876                         break;
2877                 }
2878
2879                 I915_WRITE(reg, temp);
2880         }
2881
2882         intel_enable_transcoder(dev_priv, pipe);
2883 }
2884
2885 static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2886 {
2887         struct intel_pch_pll *pll = intel_crtc->pch_pll;
2888
2889         if (pll == NULL)
2890                 return;
2891
2892         if (pll->refcount == 0) {
2893                 WARN(1, "bad PCH PLL refcount\n");
2894                 return;
2895         }
2896
2897         --pll->refcount;
2898         intel_crtc->pch_pll = NULL;
2899 }
2900
2901 static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2902 {
2903         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2904         struct intel_pch_pll *pll;
2905         int i;
2906
2907         pll = intel_crtc->pch_pll;
2908         if (pll) {
2909                 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2910                               intel_crtc->base.base.id, pll->pll_reg);
2911                 goto prepare;
2912         }
2913
2914         if (HAS_PCH_IBX(dev_priv->dev)) {
2915                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
2916                 i = intel_crtc->pipe;
2917                 pll = &dev_priv->pch_plls[i];
2918
2919                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
2920                               intel_crtc->base.base.id, pll->pll_reg);
2921
2922                 goto found;
2923         }
2924
2925         for (i = 0; i < dev_priv->num_pch_pll; i++) {
2926                 pll = &dev_priv->pch_plls[i];
2927
2928                 /* Only want to check enabled timings first */
2929                 if (pll->refcount == 0)
2930                         continue;
2931
2932                 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2933                     fp == I915_READ(pll->fp0_reg)) {
2934                         DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2935                                       intel_crtc->base.base.id,
2936                                       pll->pll_reg, pll->refcount, pll->active);
2937
2938                         goto found;
2939                 }
2940         }
2941
2942         /* Ok no matching timings, maybe there's a free one? */
2943         for (i = 0; i < dev_priv->num_pch_pll; i++) {
2944                 pll = &dev_priv->pch_plls[i];
2945                 if (pll->refcount == 0) {
2946                         DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2947                                       intel_crtc->base.base.id, pll->pll_reg);
2948                         goto found;
2949                 }
2950         }
2951
2952         return NULL;
2953
2954 found:
2955         intel_crtc->pch_pll = pll;
2956         pll->refcount++;
2957         DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2958 prepare: /* separate function? */
2959         DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
2960
2961         /* Wait for the clocks to stabilize before rewriting the regs */
2962         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2963         POSTING_READ(pll->pll_reg);
2964         udelay(150);
2965
2966         I915_WRITE(pll->fp0_reg, fp);
2967         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
2968         pll->on = false;
2969         return pll;
2970 }
2971
2972 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2973 {
2974         struct drm_i915_private *dev_priv = dev->dev_private;
2975         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
2976         u32 temp;
2977
2978         temp = I915_READ(dslreg);
2979         udelay(500);
2980         if (wait_for(I915_READ(dslreg) != temp, 5)) {
2981                 /* Without this, mode sets may fail silently on FDI */
2982                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
2983                 udelay(250);
2984                 I915_WRITE(tc2reg, 0);
2985                 if (wait_for(I915_READ(dslreg) != temp, 5))
2986                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2987         }
2988 }
2989
2990 static void ironlake_crtc_enable(struct drm_crtc *crtc)
2991 {
2992         struct drm_device *dev = crtc->dev;
2993         struct drm_i915_private *dev_priv = dev->dev_private;
2994         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2995         int pipe = intel_crtc->pipe;
2996         int plane = intel_crtc->plane;
2997         u32 temp;
2998         bool is_pch_port;
2999
3000         if (intel_crtc->active)
3001                 return;
3002
3003         intel_crtc->active = true;
3004         intel_update_watermarks(dev);
3005
3006         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3007                 temp = I915_READ(PCH_LVDS);
3008                 if ((temp & LVDS_PORT_EN) == 0)
3009                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3010         }
3011
3012         is_pch_port = intel_crtc_driving_pch(crtc);
3013
3014         if (is_pch_port)
3015                 ironlake_fdi_pll_enable(crtc);
3016         else
3017                 ironlake_fdi_disable(crtc);
3018
3019         /* Enable panel fitting for LVDS */
3020         if (dev_priv->pch_pf_size &&
3021             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3022                 /* Force use of hard-coded filter coefficients
3023                  * as some pre-programmed values are broken,
3024                  * e.g. x201.
3025                  */
3026                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3027                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3028                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3029         }
3030
3031         /*
3032          * On ILK+ LUT must be loaded before the pipe is running but with
3033          * clocks enabled
3034          */
3035         intel_crtc_load_lut(crtc);
3036
3037         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3038         intel_enable_plane(dev_priv, plane, pipe);
3039
3040         if (is_pch_port)
3041                 ironlake_pch_enable(crtc);
3042
3043         mutex_lock(&dev->struct_mutex);
3044         intel_update_fbc(dev);
3045         mutex_unlock(&dev->struct_mutex);
3046
3047         intel_crtc_update_cursor(crtc, true);
3048 }
3049
3050 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3051 {
3052         struct drm_device *dev = crtc->dev;
3053         struct drm_i915_private *dev_priv = dev->dev_private;
3054         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3055         int pipe = intel_crtc->pipe;
3056         int plane = intel_crtc->plane;
3057         u32 reg, temp;
3058
3059         if (!intel_crtc->active)
3060                 return;
3061
3062         intel_crtc_wait_for_pending_flips(crtc);
3063         drm_vblank_off(dev, pipe);
3064         intel_crtc_update_cursor(crtc, false);
3065
3066         intel_disable_plane(dev_priv, plane, pipe);
3067
3068         if (dev_priv->cfb_plane == plane)
3069                 intel_disable_fbc(dev);
3070
3071         intel_disable_pipe(dev_priv, pipe);
3072
3073         /* Disable PF */
3074         I915_WRITE(PF_CTL(pipe), 0);
3075         I915_WRITE(PF_WIN_SZ(pipe), 0);
3076
3077         ironlake_fdi_disable(crtc);
3078
3079         /* This is a horrible layering violation; we should be doing this in
3080          * the connector/encoder ->prepare instead, but we don't always have
3081          * enough information there about the config to know whether it will
3082          * actually be necessary or just cause undesired flicker.
3083          */
3084         intel_disable_pch_ports(dev_priv, pipe);
3085
3086         intel_disable_transcoder(dev_priv, pipe);
3087
3088         if (HAS_PCH_CPT(dev)) {
3089                 /* disable TRANS_DP_CTL */
3090                 reg = TRANS_DP_CTL(pipe);
3091                 temp = I915_READ(reg);
3092                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3093                 temp |= TRANS_DP_PORT_SEL_NONE;
3094                 I915_WRITE(reg, temp);
3095
3096                 /* disable DPLL_SEL */
3097                 temp = I915_READ(PCH_DPLL_SEL);
3098                 switch (pipe) {
3099                 case 0:
3100                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3101                         break;
3102                 case 1:
3103                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3104                         break;
3105                 case 2:
3106                         /* C shares PLL A or B */
3107                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3108                         break;
3109                 default:
3110                         BUG(); /* wtf */
3111                 }
3112                 I915_WRITE(PCH_DPLL_SEL, temp);
3113         }
3114
3115         /* disable PCH DPLL */
3116         intel_disable_pch_pll(intel_crtc);
3117
3118         /* Switch from PCDclk to Rawclk */
3119         reg = FDI_RX_CTL(pipe);
3120         temp = I915_READ(reg);
3121         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3122
3123         /* Disable CPU FDI TX PLL */
3124         reg = FDI_TX_CTL(pipe);
3125         temp = I915_READ(reg);
3126         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3127
3128         POSTING_READ(reg);
3129         udelay(100);
3130
3131         reg = FDI_RX_CTL(pipe);
3132         temp = I915_READ(reg);
3133         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3134
3135         /* Wait for the clocks to turn off. */
3136         POSTING_READ(reg);
3137         udelay(100);
3138
3139         intel_crtc->active = false;
3140         intel_update_watermarks(dev);
3141
3142         mutex_lock(&dev->struct_mutex);
3143         intel_update_fbc(dev);
3144         mutex_unlock(&dev->struct_mutex);
3145 }
3146
3147 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3148 {
3149         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3150         int pipe = intel_crtc->pipe;
3151         int plane = intel_crtc->plane;
3152
3153         /* XXX: When our outputs are all unaware of DPMS modes other than off
3154          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3155          */
3156         switch (mode) {
3157         case DRM_MODE_DPMS_ON:
3158         case DRM_MODE_DPMS_STANDBY:
3159         case DRM_MODE_DPMS_SUSPEND:
3160                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3161                 ironlake_crtc_enable(crtc);
3162                 break;
3163
3164         case DRM_MODE_DPMS_OFF:
3165                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3166                 ironlake_crtc_disable(crtc);
3167                 break;
3168         }
3169 }
3170
3171 static void ironlake_crtc_off(struct drm_crtc *crtc)
3172 {
3173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3174         intel_put_pch_pll(intel_crtc);
3175 }
3176
3177 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3178 {
3179         if (!enable && intel_crtc->overlay) {
3180                 struct drm_device *dev = intel_crtc->base.dev;
3181                 struct drm_i915_private *dev_priv = dev->dev_private;
3182
3183                 mutex_lock(&dev->struct_mutex);
3184                 dev_priv->mm.interruptible = false;
3185                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3186                 dev_priv->mm.interruptible = true;
3187                 mutex_unlock(&dev->struct_mutex);
3188         }
3189
3190         /* Let userspace switch the overlay on again. In most cases userspace
3191          * has to recompute where to put it anyway.
3192          */
3193 }
3194
3195 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3196 {
3197         struct drm_device *dev = crtc->dev;
3198         struct drm_i915_private *dev_priv = dev->dev_private;
3199         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3200         int pipe = intel_crtc->pipe;
3201         int plane = intel_crtc->plane;
3202
3203         if (intel_crtc->active)
3204                 return;
3205
3206         intel_crtc->active = true;
3207         intel_update_watermarks(dev);
3208
3209         intel_enable_pll(dev_priv, pipe);
3210         intel_enable_pipe(dev_priv, pipe, false);
3211         intel_enable_plane(dev_priv, plane, pipe);
3212
3213         intel_crtc_load_lut(crtc);
3214         intel_update_fbc(dev);
3215
3216         /* Give the overlay scaler a chance to enable if it's on this pipe */
3217         intel_crtc_dpms_overlay(intel_crtc, true);
3218         intel_crtc_update_cursor(crtc, true);
3219 }
3220
3221 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3222 {
3223         struct drm_device *dev = crtc->dev;
3224         struct drm_i915_private *dev_priv = dev->dev_private;
3225         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3226         int pipe = intel_crtc->pipe;
3227         int plane = intel_crtc->plane;
3228
3229         if (!intel_crtc->active)
3230                 return;
3231
3232         /* Give the overlay scaler a chance to disable if it's on this pipe */
3233         intel_crtc_wait_for_pending_flips(crtc);
3234         drm_vblank_off(dev, pipe);
3235         intel_crtc_dpms_overlay(intel_crtc, false);
3236         intel_crtc_update_cursor(crtc, false);
3237
3238         if (dev_priv->cfb_plane == plane)
3239                 intel_disable_fbc(dev);
3240
3241         intel_disable_plane(dev_priv, plane, pipe);
3242         intel_disable_pipe(dev_priv, pipe);
3243         intel_disable_pll(dev_priv, pipe);
3244
3245         intel_crtc->active = false;
3246         intel_update_fbc(dev);
3247         intel_update_watermarks(dev);
3248 }
3249
3250 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3251 {
3252         /* XXX: When our outputs are all unaware of DPMS modes other than off
3253          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3254          */
3255         switch (mode) {
3256         case DRM_MODE_DPMS_ON:
3257         case DRM_MODE_DPMS_STANDBY:
3258         case DRM_MODE_DPMS_SUSPEND:
3259                 i9xx_crtc_enable(crtc);
3260                 break;
3261         case DRM_MODE_DPMS_OFF:
3262                 i9xx_crtc_disable(crtc);
3263                 break;
3264         }
3265 }
3266
3267 static void i9xx_crtc_off(struct drm_crtc *crtc)
3268 {
3269 }
3270
3271 /**
3272  * Sets the power management mode of the pipe and plane.
3273  */
3274 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3275 {
3276         struct drm_device *dev = crtc->dev;
3277         struct drm_i915_private *dev_priv = dev->dev_private;
3278         struct drm_i915_master_private *master_priv;
3279         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3280         int pipe = intel_crtc->pipe;
3281         bool enabled;
3282
3283         if (intel_crtc->dpms_mode == mode)
3284                 return;
3285
3286         intel_crtc->dpms_mode = mode;
3287
3288         dev_priv->display.dpms(crtc, mode);
3289
3290         if (!dev->primary->master)
3291                 return;
3292
3293         master_priv = dev->primary->master->driver_priv;
3294         if (!master_priv->sarea_priv)
3295                 return;
3296
3297         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3298
3299         switch (pipe) {
3300         case 0:
3301                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3302                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3303                 break;
3304         case 1:
3305                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3306                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3307                 break;
3308         default:
3309                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3310                 break;
3311         }
3312 }
3313
3314 static void intel_crtc_disable(struct drm_crtc *crtc)
3315 {
3316         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3317         struct drm_device *dev = crtc->dev;
3318         struct drm_i915_private *dev_priv = dev->dev_private;
3319
3320         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3321         dev_priv->display.off(crtc);
3322
3323         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3324         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3325
3326         if (crtc->fb) {
3327                 mutex_lock(&dev->struct_mutex);
3328                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3329                 mutex_unlock(&dev->struct_mutex);
3330         }
3331 }
3332
3333 /* Prepare for a mode set.
3334  *
3335  * Note we could be a lot smarter here.  We need to figure out which outputs
3336  * will be enabled, which disabled (in short, how the config will changes)
3337  * and perform the minimum necessary steps to accomplish that, e.g. updating
3338  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3339  * panel fitting is in the proper state, etc.
3340  */
3341 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3342 {
3343         i9xx_crtc_disable(crtc);
3344 }
3345
3346 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3347 {
3348         i9xx_crtc_enable(crtc);
3349 }
3350
3351 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3352 {
3353         ironlake_crtc_disable(crtc);
3354 }
3355
3356 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3357 {
3358         ironlake_crtc_enable(crtc);
3359 }
3360
3361 void intel_encoder_prepare(struct drm_encoder *encoder)
3362 {
3363         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3364         /* lvds has its own version of prepare see intel_lvds_prepare */
3365         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3366 }
3367
3368 void intel_encoder_commit(struct drm_encoder *encoder)
3369 {
3370         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3371         struct drm_device *dev = encoder->dev;
3372         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3373
3374         /* lvds has its own version of commit see intel_lvds_commit */
3375         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3376
3377         if (HAS_PCH_CPT(dev))
3378                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3379 }
3380
3381 void intel_encoder_destroy(struct drm_encoder *encoder)
3382 {
3383         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3384
3385         drm_encoder_cleanup(encoder);
3386         kfree(intel_encoder);
3387 }
3388
3389 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3390                                   struct drm_display_mode *mode,
3391                                   struct drm_display_mode *adjusted_mode)
3392 {
3393         struct drm_device *dev = crtc->dev;
3394
3395         if (HAS_PCH_SPLIT(dev)) {
3396                 /* FDI link clock is fixed at 2.7G */
3397                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3398                         return false;
3399         }
3400
3401         /* All interlaced capable intel hw wants timings in frames. Note though
3402          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3403          * timings, so we need to be careful not to clobber these.*/
3404         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3405                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3406
3407         return true;
3408 }
3409
3410 static int valleyview_get_display_clock_speed(struct drm_device *dev)
3411 {
3412         return 400000; /* FIXME */
3413 }
3414
3415 static int i945_get_display_clock_speed(struct drm_device *dev)
3416 {
3417         return 400000;
3418 }
3419
3420 static int i915_get_display_clock_speed(struct drm_device *dev)
3421 {
3422         return 333000;
3423 }
3424
3425 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3426 {
3427         return 200000;
3428 }
3429
3430 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3431 {
3432         u16 gcfgc = 0;
3433
3434         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3435
3436         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3437                 return 133000;
3438         else {
3439                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3440                 case GC_DISPLAY_CLOCK_333_MHZ:
3441                         return 333000;
3442                 default:
3443                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3444                         return 190000;
3445                 }
3446         }
3447 }
3448
3449 static int i865_get_display_clock_speed(struct drm_device *dev)
3450 {
3451         return 266000;
3452 }
3453
3454 static int i855_get_display_clock_speed(struct drm_device *dev)
3455 {
3456         u16 hpllcc = 0;
3457         /* Assume that the hardware is in the high speed state.  This
3458          * should be the default.
3459          */
3460         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3461         case GC_CLOCK_133_200:
3462         case GC_CLOCK_100_200:
3463                 return 200000;
3464         case GC_CLOCK_166_250:
3465                 return 250000;
3466         case GC_CLOCK_100_133:
3467                 return 133000;
3468         }
3469
3470         /* Shouldn't happen */
3471         return 0;
3472 }
3473
3474 static int i830_get_display_clock_speed(struct drm_device *dev)
3475 {
3476         return 133000;
3477 }
3478
3479 struct fdi_m_n {
3480         u32        tu;
3481         u32        gmch_m;
3482         u32        gmch_n;
3483         u32        link_m;
3484         u32        link_n;
3485 };
3486
3487 static void
3488 fdi_reduce_ratio(u32 *num, u32 *den)
3489 {
3490         while (*num > 0xffffff || *den > 0xffffff) {
3491                 *num >>= 1;
3492                 *den >>= 1;
3493         }
3494 }
3495
3496 static void
3497 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3498                      int link_clock, struct fdi_m_n *m_n)
3499 {
3500         m_n->tu = 64; /* default size */
3501
3502         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3503         m_n->gmch_m = bits_per_pixel * pixel_clock;
3504         m_n->gmch_n = link_clock * nlanes * 8;
3505         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3506
3507         m_n->link_m = pixel_clock;
3508         m_n->link_n = link_clock;
3509         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3510 }
3511
3512 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3513 {
3514         if (i915_panel_use_ssc >= 0)
3515                 return i915_panel_use_ssc != 0;
3516         return dev_priv->lvds_use_ssc
3517                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3518 }
3519
3520 /**
3521  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3522  * @crtc: CRTC structure
3523  * @mode: requested mode
3524  *
3525  * A pipe may be connected to one or more outputs.  Based on the depth of the
3526  * attached framebuffer, choose a good color depth to use on the pipe.
3527  *
3528  * If possible, match the pipe depth to the fb depth.  In some cases, this
3529  * isn't ideal, because the connected output supports a lesser or restricted
3530  * set of depths.  Resolve that here:
3531  *    LVDS typically supports only 6bpc, so clamp down in that case
3532  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3533  *    Displays may support a restricted set as well, check EDID and clamp as
3534  *      appropriate.
3535  *    DP may want to dither down to 6bpc to fit larger modes
3536  *
3537  * RETURNS:
3538  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3539  * true if they don't match).
3540  */
3541 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3542                                          unsigned int *pipe_bpp,
3543                                          struct drm_display_mode *mode)
3544 {
3545         struct drm_device *dev = crtc->dev;
3546         struct drm_i915_private *dev_priv = dev->dev_private;
3547         struct drm_encoder *encoder;
3548         struct drm_connector *connector;
3549         unsigned int display_bpc = UINT_MAX, bpc;
3550
3551         /* Walk the encoders & connectors on this crtc, get min bpc */
3552         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3553                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3554
3555                 if (encoder->crtc != crtc)
3556                         continue;
3557
3558                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3559                         unsigned int lvds_bpc;
3560
3561                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3562                             LVDS_A3_POWER_UP)
3563                                 lvds_bpc = 8;
3564                         else
3565                                 lvds_bpc = 6;
3566
3567                         if (lvds_bpc < display_bpc) {
3568                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
3569                                 display_bpc = lvds_bpc;
3570                         }
3571                         continue;
3572                 }
3573
3574                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3575                         /* Use VBT settings if we have an eDP panel */
3576                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3577
3578                         if (edp_bpc < display_bpc) {
3579                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
3580                                 display_bpc = edp_bpc;
3581                         }
3582                         continue;
3583                 }
3584
3585                 /* Not one of the known troublemakers, check the EDID */
3586                 list_for_each_entry(connector, &dev->mode_config.connector_list,
3587                                     head) {
3588                         if (connector->encoder != encoder)
3589                                 continue;
3590
3591                         /* Don't use an invalid EDID bpc value */
3592                         if (connector->display_info.bpc &&
3593                             connector->display_info.bpc < display_bpc) {
3594                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
3595                                 display_bpc = connector->display_info.bpc;
3596                         }
3597                 }
3598
3599                 /*
3600                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3601                  * through, clamp it down.  (Note: >12bpc will be caught below.)
3602                  */
3603                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3604                         if (display_bpc > 8 && display_bpc < 12) {
3605                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
3606                                 display_bpc = 12;
3607                         } else {
3608                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
3609                                 display_bpc = 8;
3610                         }
3611                 }
3612         }
3613
3614         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3615                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3616                 display_bpc = 6;
3617         }
3618
3619         /*
3620          * We could just drive the pipe at the highest bpc all the time and
3621          * enable dithering as needed, but that costs bandwidth.  So choose
3622          * the minimum value that expresses the full color range of the fb but
3623          * also stays within the max display bpc discovered above.
3624          */
3625
3626         switch (crtc->fb->depth) {
3627         case 8:
3628                 bpc = 8; /* since we go through a colormap */
3629                 break;
3630         case 15:
3631         case 16:
3632                 bpc = 6; /* min is 18bpp */
3633                 break;
3634         case 24:
3635                 bpc = 8;
3636                 break;
3637         case 30:
3638                 bpc = 10;
3639                 break;
3640         case 48:
3641                 bpc = 12;
3642                 break;
3643         default:
3644                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3645                 bpc = min((unsigned int)8, display_bpc);
3646                 break;
3647         }
3648
3649         display_bpc = min(display_bpc, bpc);
3650
3651         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3652                       bpc, display_bpc);
3653
3654         *pipe_bpp = display_bpc * 3;
3655
3656         return display_bpc != bpc;
3657 }
3658
3659 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3660 {
3661         struct drm_device *dev = crtc->dev;
3662         struct drm_i915_private *dev_priv = dev->dev_private;
3663         int refclk;
3664
3665         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3666             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3667                 refclk = dev_priv->lvds_ssc_freq * 1000;
3668                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3669                               refclk / 1000);
3670         } else if (!IS_GEN2(dev)) {
3671                 refclk = 96000;
3672         } else {
3673                 refclk = 48000;
3674         }
3675
3676         return refclk;
3677 }
3678
3679 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3680                                       intel_clock_t *clock)
3681 {
3682         /* SDVO TV has fixed PLL values depend on its clock range,
3683            this mirrors vbios setting. */
3684         if (adjusted_mode->clock >= 100000
3685             && adjusted_mode->clock < 140500) {
3686                 clock->p1 = 2;
3687                 clock->p2 = 10;
3688                 clock->n = 3;
3689                 clock->m1 = 16;
3690                 clock->m2 = 8;
3691         } else if (adjusted_mode->clock >= 140500
3692                    && adjusted_mode->clock <= 200000) {
3693                 clock->p1 = 1;
3694                 clock->p2 = 10;
3695                 clock->n = 6;
3696                 clock->m1 = 12;
3697                 clock->m2 = 8;
3698         }
3699 }
3700
3701 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3702                                      intel_clock_t *clock,
3703                                      intel_clock_t *reduced_clock)
3704 {
3705         struct drm_device *dev = crtc->dev;
3706         struct drm_i915_private *dev_priv = dev->dev_private;
3707         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3708         int pipe = intel_crtc->pipe;
3709         u32 fp, fp2 = 0;
3710
3711         if (IS_PINEVIEW(dev)) {
3712                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3713                 if (reduced_clock)
3714                         fp2 = (1 << reduced_clock->n) << 16 |
3715                                 reduced_clock->m1 << 8 | reduced_clock->m2;
3716         } else {
3717                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3718                 if (reduced_clock)
3719                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3720                                 reduced_clock->m2;
3721         }
3722
3723         I915_WRITE(FP0(pipe), fp);
3724
3725         intel_crtc->lowfreq_avail = false;
3726         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3727             reduced_clock && i915_powersave) {
3728                 I915_WRITE(FP1(pipe), fp2);
3729                 intel_crtc->lowfreq_avail = true;
3730         } else {
3731                 I915_WRITE(FP1(pipe), fp);
3732         }
3733 }
3734
3735 static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3736                               struct drm_display_mode *adjusted_mode)
3737 {
3738         struct drm_device *dev = crtc->dev;
3739         struct drm_i915_private *dev_priv = dev->dev_private;
3740         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3741         int pipe = intel_crtc->pipe;
3742         u32 temp;
3743
3744         temp = I915_READ(LVDS);
3745         temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3746         if (pipe == 1) {
3747                 temp |= LVDS_PIPEB_SELECT;
3748         } else {
3749                 temp &= ~LVDS_PIPEB_SELECT;
3750         }
3751         /* set the corresponsding LVDS_BORDER bit */
3752         temp |= dev_priv->lvds_border_bits;
3753         /* Set the B0-B3 data pairs corresponding to whether we're going to
3754          * set the DPLLs for dual-channel mode or not.
3755          */
3756         if (clock->p2 == 7)
3757                 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3758         else
3759                 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3760
3761         /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3762          * appropriately here, but we need to look more thoroughly into how
3763          * panels behave in the two modes.
3764          */
3765         /* set the dithering flag on LVDS as needed */
3766         if (INTEL_INFO(dev)->gen >= 4) {
3767                 if (dev_priv->lvds_dither)
3768                         temp |= LVDS_ENABLE_DITHER;
3769                 else
3770                         temp &= ~LVDS_ENABLE_DITHER;
3771         }
3772         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3773         if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3774                 temp |= LVDS_HSYNC_POLARITY;
3775         if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3776                 temp |= LVDS_VSYNC_POLARITY;
3777         I915_WRITE(LVDS, temp);
3778 }
3779
3780 static void i9xx_update_pll(struct drm_crtc *crtc,
3781                             struct drm_display_mode *mode,
3782                             struct drm_display_mode *adjusted_mode,
3783                             intel_clock_t *clock, intel_clock_t *reduced_clock,
3784                             int num_connectors)
3785 {
3786         struct drm_device *dev = crtc->dev;
3787         struct drm_i915_private *dev_priv = dev->dev_private;
3788         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3789         int pipe = intel_crtc->pipe;
3790         u32 dpll;
3791         bool is_sdvo;
3792
3793         is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3794                 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3795
3796         dpll = DPLL_VGA_MODE_DIS;
3797
3798         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3799                 dpll |= DPLLB_MODE_LVDS;
3800         else
3801                 dpll |= DPLLB_MODE_DAC_SERIAL;
3802         if (is_sdvo) {
3803                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3804                 if (pixel_multiplier > 1) {
3805                         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3806                                 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3807                 }
3808                 dpll |= DPLL_DVO_HIGH_SPEED;
3809         }
3810         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3811                 dpll |= DPLL_DVO_HIGH_SPEED;
3812
3813         /* compute bitmask from p1 value */
3814         if (IS_PINEVIEW(dev))
3815                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3816         else {
3817                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3818                 if (IS_G4X(dev) && reduced_clock)
3819                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3820         }
3821         switch (clock->p2) {
3822         case 5:
3823                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3824                 break;
3825         case 7:
3826                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3827                 break;
3828         case 10:
3829                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3830                 break;
3831         case 14:
3832                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3833                 break;
3834         }
3835         if (INTEL_INFO(dev)->gen >= 4)
3836                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3837
3838         if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3839                 dpll |= PLL_REF_INPUT_TVCLKINBC;
3840         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3841                 /* XXX: just matching BIOS for now */
3842                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
3843                 dpll |= 3;
3844         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3845                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3846                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3847         else
3848                 dpll |= PLL_REF_INPUT_DREFCLK;
3849
3850         dpll |= DPLL_VCO_ENABLE;
3851         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3852         POSTING_READ(DPLL(pipe));
3853         udelay(150);
3854
3855         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3856          * This is an exception to the general rule that mode_set doesn't turn
3857          * things on.
3858          */
3859         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3860                 intel_update_lvds(crtc, clock, adjusted_mode);
3861
3862         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3863                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3864
3865         I915_WRITE(DPLL(pipe), dpll);
3866
3867         /* Wait for the clocks to stabilize. */
3868         POSTING_READ(DPLL(pipe));
3869         udelay(150);
3870
3871         if (INTEL_INFO(dev)->gen >= 4) {
3872                 u32 temp = 0;
3873                 if (is_sdvo) {
3874                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3875                         if (temp > 1)
3876                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3877                         else
3878                                 temp = 0;
3879                 }
3880                 I915_WRITE(DPLL_MD(pipe), temp);
3881         } else {
3882                 /* The pixel multiplier can only be updated once the
3883                  * DPLL is enabled and the clocks are stable.
3884                  *
3885                  * So write it again.
3886                  */
3887                 I915_WRITE(DPLL(pipe), dpll);
3888         }
3889 }
3890
3891 static void i8xx_update_pll(struct drm_crtc *crtc,
3892                             struct drm_display_mode *adjusted_mode,
3893                             intel_clock_t *clock,
3894                             int num_connectors)
3895 {
3896         struct drm_device *dev = crtc->dev;
3897         struct drm_i915_private *dev_priv = dev->dev_private;
3898         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3899         int pipe = intel_crtc->pipe;
3900         u32 dpll;
3901
3902         dpll = DPLL_VGA_MODE_DIS;
3903
3904         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3905                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3906         } else {
3907                 if (clock->p1 == 2)
3908                         dpll |= PLL_P1_DIVIDE_BY_TWO;
3909                 else
3910                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3911                 if (clock->p2 == 4)
3912                         dpll |= PLL_P2_DIVIDE_BY_4;
3913         }
3914
3915         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3916                 /* XXX: just matching BIOS for now */
3917                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
3918                 dpll |= 3;
3919         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3920                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3921                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3922         else
3923                 dpll |= PLL_REF_INPUT_DREFCLK;
3924
3925         dpll |= DPLL_VCO_ENABLE;
3926         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3927         POSTING_READ(DPLL(pipe));
3928         udelay(150);
3929
3930         I915_WRITE(DPLL(pipe), dpll);
3931
3932         /* Wait for the clocks to stabilize. */
3933         POSTING_READ(DPLL(pipe));
3934         udelay(150);
3935
3936         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3937          * This is an exception to the general rule that mode_set doesn't turn
3938          * things on.
3939          */
3940         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3941                 intel_update_lvds(crtc, clock, adjusted_mode);
3942
3943         /* The pixel multiplier can only be updated once the
3944          * DPLL is enabled and the clocks are stable.
3945          *
3946          * So write it again.
3947          */
3948         I915_WRITE(DPLL(pipe), dpll);
3949 }
3950
3951 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3952                               struct drm_display_mode *mode,
3953                               struct drm_display_mode *adjusted_mode,
3954                               int x, int y,
3955                               struct drm_framebuffer *old_fb)
3956 {
3957         struct drm_device *dev = crtc->dev;
3958         struct drm_i915_private *dev_priv = dev->dev_private;
3959         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3960         int pipe = intel_crtc->pipe;
3961         int plane = intel_crtc->plane;
3962         int refclk, num_connectors = 0;
3963         intel_clock_t clock, reduced_clock;
3964         u32 dspcntr, pipeconf, vsyncshift;
3965         bool ok, has_reduced_clock = false, is_sdvo = false;
3966         bool is_lvds = false, is_tv = false, is_dp = false;
3967         struct drm_mode_config *mode_config = &dev->mode_config;
3968         struct intel_encoder *encoder;
3969         const intel_limit_t *limit;
3970         int ret;
3971
3972         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3973                 if (encoder->base.crtc != crtc)
3974                         continue;
3975
3976                 switch (encoder->type) {
3977                 case INTEL_OUTPUT_LVDS:
3978                         is_lvds = true;
3979                         break;
3980                 case INTEL_OUTPUT_SDVO:
3981                 case INTEL_OUTPUT_HDMI:
3982                         is_sdvo = true;
3983                         if (encoder->needs_tv_clock)
3984                                 is_tv = true;
3985                         break;
3986                 case INTEL_OUTPUT_TVOUT:
3987                         is_tv = true;
3988                         break;
3989                 case INTEL_OUTPUT_DISPLAYPORT:
3990                         is_dp = true;
3991                         break;
3992                 }
3993
3994                 num_connectors++;
3995         }
3996
3997         refclk = i9xx_get_refclk(crtc, num_connectors);
3998
3999         /*
4000          * Returns a set of divisors for the desired target clock with the given
4001          * refclk, or FALSE.  The returned values represent the clock equation:
4002          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4003          */
4004         limit = intel_limit(crtc, refclk);
4005         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4006                              &clock);
4007         if (!ok) {
4008                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4009                 return -EINVAL;
4010         }
4011
4012         /* Ensure that the cursor is valid for the new mode before changing... */
4013         intel_crtc_update_cursor(crtc, true);
4014
4015         if (is_lvds && dev_priv->lvds_downclock_avail) {
4016                 /*
4017                  * Ensure we match the reduced clock's P to the target clock.
4018                  * If the clocks don't match, we can't switch the display clock
4019                  * by using the FP0/FP1. In such case we will disable the LVDS
4020                  * downclock feature.
4021                 */
4022                 has_reduced_clock = limit->find_pll(limit, crtc,
4023                                                     dev_priv->lvds_downclock,
4024                                                     refclk,
4025                                                     &clock,
4026                                                     &reduced_clock);
4027         }
4028
4029         if (is_sdvo && is_tv)
4030                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4031
4032         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4033                                  &reduced_clock : NULL);
4034
4035         if (IS_GEN2(dev))
4036                 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
4037         else
4038                 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4039                                 has_reduced_clock ? &reduced_clock : NULL,
4040                                 num_connectors);
4041
4042         /* setup pipeconf */
4043         pipeconf = I915_READ(PIPECONF(pipe));
4044
4045         /* Set up the display plane register */
4046         dspcntr = DISPPLANE_GAMMA_ENABLE;
4047
4048         if (pipe == 0)
4049                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4050         else
4051                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4052
4053         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4054                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4055                  * core speed.
4056                  *
4057                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4058                  * pipe == 0 check?
4059                  */
4060                 if (mode->clock >
4061                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4062                         pipeconf |= PIPECONF_DOUBLE_WIDE;
4063                 else
4064                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4065         }
4066
4067         /* default to 8bpc */
4068         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4069         if (is_dp) {
4070                 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4071                         pipeconf |= PIPECONF_BPP_6 |
4072                                     PIPECONF_DITHER_EN |
4073                                     PIPECONF_DITHER_TYPE_SP;
4074                 }
4075         }
4076
4077         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4078         drm_mode_debug_printmodeline(mode);
4079
4080         if (HAS_PIPE_CXSR(dev)) {
4081                 if (intel_crtc->lowfreq_avail) {
4082                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4083                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4084                 } else {
4085                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4086                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4087                 }
4088         }
4089
4090         pipeconf &= ~PIPECONF_INTERLACE_MASK;
4091         if (!IS_GEN2(dev) &&
4092             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4093                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4094                 /* the chip adds 2 halflines automatically */
4095                 adjusted_mode->crtc_vtotal -= 1;
4096                 adjusted_mode->crtc_vblank_end -= 1;
4097                 vsyncshift = adjusted_mode->crtc_hsync_start
4098                              - adjusted_mode->crtc_htotal/2;
4099         } else {
4100                 pipeconf |= PIPECONF_PROGRESSIVE;
4101                 vsyncshift = 0;
4102         }
4103
4104         if (!IS_GEN3(dev))
4105                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4106
4107         I915_WRITE(HTOTAL(pipe),
4108                    (adjusted_mode->crtc_hdisplay - 1) |
4109                    ((adjusted_mode->crtc_htotal - 1) << 16));
4110         I915_WRITE(HBLANK(pipe),
4111                    (adjusted_mode->crtc_hblank_start - 1) |
4112                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
4113         I915_WRITE(HSYNC(pipe),
4114                    (adjusted_mode->crtc_hsync_start - 1) |
4115                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
4116
4117         I915_WRITE(VTOTAL(pipe),
4118                    (adjusted_mode->crtc_vdisplay - 1) |
4119                    ((adjusted_mode->crtc_vtotal - 1) << 16));
4120         I915_WRITE(VBLANK(pipe),
4121                    (adjusted_mode->crtc_vblank_start - 1) |
4122                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
4123         I915_WRITE(VSYNC(pipe),
4124                    (adjusted_mode->crtc_vsync_start - 1) |
4125                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
4126
4127         /* pipesrc and dspsize control the size that is scaled from,
4128          * which should always be the user's requested size.
4129          */
4130         I915_WRITE(DSPSIZE(plane),
4131                    ((mode->vdisplay - 1) << 16) |
4132                    (mode->hdisplay - 1));
4133         I915_WRITE(DSPPOS(plane), 0);
4134         I915_WRITE(PIPESRC(pipe),
4135                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4136
4137         I915_WRITE(PIPECONF(pipe), pipeconf);
4138         POSTING_READ(PIPECONF(pipe));
4139         intel_enable_pipe(dev_priv, pipe, false);
4140
4141         intel_wait_for_vblank(dev, pipe);
4142
4143         I915_WRITE(DSPCNTR(plane), dspcntr);
4144         POSTING_READ(DSPCNTR(plane));
4145
4146         ret = intel_pipe_set_base(crtc, x, y, old_fb);
4147
4148         intel_update_watermarks(dev);
4149
4150         return ret;
4151 }
4152
4153 /*
4154  * Initialize reference clocks when the driver loads
4155  */
4156 void ironlake_init_pch_refclk(struct drm_device *dev)
4157 {
4158         struct drm_i915_private *dev_priv = dev->dev_private;
4159         struct drm_mode_config *mode_config = &dev->mode_config;
4160         struct intel_encoder *encoder;
4161         u32 temp;
4162         bool has_lvds = false;
4163         bool has_cpu_edp = false;
4164         bool has_pch_edp = false;
4165         bool has_panel = false;
4166         bool has_ck505 = false;
4167         bool can_ssc = false;
4168
4169         /* We need to take the global config into account */
4170         list_for_each_entry(encoder, &mode_config->encoder_list,
4171                             base.head) {
4172                 switch (encoder->type) {
4173                 case INTEL_OUTPUT_LVDS:
4174                         has_panel = true;
4175                         has_lvds = true;
4176                         break;
4177                 case INTEL_OUTPUT_EDP:
4178                         has_panel = true;
4179                         if (intel_encoder_is_pch_edp(&encoder->base))
4180                                 has_pch_edp = true;
4181                         else
4182                                 has_cpu_edp = true;
4183                         break;
4184                 }
4185         }
4186
4187         if (HAS_PCH_IBX(dev)) {
4188                 has_ck505 = dev_priv->display_clock_mode;
4189                 can_ssc = has_ck505;
4190         } else {
4191                 has_ck505 = false;
4192                 can_ssc = true;
4193         }
4194
4195         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4196                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4197                       has_ck505);
4198
4199         /* Ironlake: try to setup display ref clock before DPLL
4200          * enabling. This is only under driver's control after
4201          * PCH B stepping, previous chipset stepping should be
4202          * ignoring this setting.
4203          */
4204         temp = I915_READ(PCH_DREF_CONTROL);
4205         /* Always enable nonspread source */
4206         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4207
4208         if (has_ck505)
4209                 temp |= DREF_NONSPREAD_CK505_ENABLE;
4210         else
4211                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4212
4213         if (has_panel) {
4214                 temp &= ~DREF_SSC_SOURCE_MASK;
4215                 temp |= DREF_SSC_SOURCE_ENABLE;
4216
4217                 /* SSC must be turned on before enabling the CPU output  */
4218                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4219                         DRM_DEBUG_KMS("Using SSC on panel\n");
4220                         temp |= DREF_SSC1_ENABLE;
4221                 } else
4222                         temp &= ~DREF_SSC1_ENABLE;
4223
4224                 /* Get SSC going before enabling the outputs */
4225                 I915_WRITE(PCH_DREF_CONTROL, temp);
4226                 POSTING_READ(PCH_DREF_CONTROL);
4227                 udelay(200);
4228
4229                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4230
4231                 /* Enable CPU source on CPU attached eDP */
4232                 if (has_cpu_edp) {
4233                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4234                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
4235                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4236                         }
4237                         else
4238                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4239                 } else
4240                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4241
4242                 I915_WRITE(PCH_DREF_CONTROL, temp);
4243                 POSTING_READ(PCH_DREF_CONTROL);
4244                 udelay(200);
4245         } else {
4246                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4247
4248                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4249
4250                 /* Turn off CPU output */
4251                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4252
4253                 I915_WRITE(PCH_DREF_CONTROL, temp);
4254                 POSTING_READ(PCH_DREF_CONTROL);
4255                 udelay(200);
4256
4257                 /* Turn off the SSC source */
4258                 temp &= ~DREF_SSC_SOURCE_MASK;
4259                 temp |= DREF_SSC_SOURCE_DISABLE;
4260
4261                 /* Turn off SSC1 */
4262                 temp &= ~ DREF_SSC1_ENABLE;
4263
4264                 I915_WRITE(PCH_DREF_CONTROL, temp);
4265                 POSTING_READ(PCH_DREF_CONTROL);
4266                 udelay(200);
4267         }
4268 }
4269
4270 static int ironlake_get_refclk(struct drm_crtc *crtc)
4271 {
4272         struct drm_device *dev = crtc->dev;
4273         struct drm_i915_private *dev_priv = dev->dev_private;
4274         struct intel_encoder *encoder;
4275         struct drm_mode_config *mode_config = &dev->mode_config;
4276         struct intel_encoder *edp_encoder = NULL;
4277         int num_connectors = 0;
4278         bool is_lvds = false;
4279
4280         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4281                 if (encoder->base.crtc != crtc)
4282                         continue;
4283
4284                 switch (encoder->type) {
4285                 case INTEL_OUTPUT_LVDS:
4286                         is_lvds = true;
4287                         break;
4288                 case INTEL_OUTPUT_EDP:
4289                         edp_encoder = encoder;
4290                         break;
4291                 }
4292                 num_connectors++;
4293         }
4294
4295         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4296                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4297                               dev_priv->lvds_ssc_freq);
4298                 return dev_priv->lvds_ssc_freq * 1000;
4299         }
4300
4301         return 120000;
4302 }
4303
4304 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4305                                   struct drm_display_mode *mode,
4306                                   struct drm_display_mode *adjusted_mode,
4307                                   int x, int y,
4308                                   struct drm_framebuffer *old_fb)
4309 {
4310         struct drm_device *dev = crtc->dev;
4311         struct drm_i915_private *dev_priv = dev->dev_private;
4312         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4313         int pipe = intel_crtc->pipe;
4314         int plane = intel_crtc->plane;
4315         int refclk, num_connectors = 0;
4316         intel_clock_t clock, reduced_clock;
4317         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4318         bool ok, has_reduced_clock = false, is_sdvo = false;
4319         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4320         struct drm_mode_config *mode_config = &dev->mode_config;
4321         struct intel_encoder *encoder, *edp_encoder = NULL;
4322         const intel_limit_t *limit;
4323         int ret;
4324         struct fdi_m_n m_n = {0};
4325         u32 temp;
4326         int target_clock, pixel_multiplier, lane, link_bw, factor;
4327         unsigned int pipe_bpp;
4328         bool dither;
4329         bool is_cpu_edp = false, is_pch_edp = false;
4330
4331         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4332                 if (encoder->base.crtc != crtc)
4333                         continue;
4334
4335                 switch (encoder->type) {
4336                 case INTEL_OUTPUT_LVDS:
4337                         is_lvds = true;
4338                         break;
4339                 case INTEL_OUTPUT_SDVO:
4340                 case INTEL_OUTPUT_HDMI:
4341                         is_sdvo = true;
4342                         if (encoder->needs_tv_clock)
4343                                 is_tv = true;
4344                         break;
4345                 case INTEL_OUTPUT_TVOUT:
4346                         is_tv = true;
4347                         break;
4348                 case INTEL_OUTPUT_ANALOG:
4349                         is_crt = true;
4350                         break;
4351                 case INTEL_OUTPUT_DISPLAYPORT:
4352                         is_dp = true;
4353                         break;
4354                 case INTEL_OUTPUT_EDP:
4355                         is_dp = true;
4356                         if (intel_encoder_is_pch_edp(&encoder->base))
4357                                 is_pch_edp = true;
4358                         else
4359                                 is_cpu_edp = true;
4360                         edp_encoder = encoder;
4361                         break;
4362                 }
4363
4364                 num_connectors++;
4365         }
4366
4367         refclk = ironlake_get_refclk(crtc);
4368
4369         /*
4370          * Returns a set of divisors for the desired target clock with the given
4371          * refclk, or FALSE.  The returned values represent the clock equation:
4372          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4373          */
4374         limit = intel_limit(crtc, refclk);
4375         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4376                              &clock);
4377         if (!ok) {
4378                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4379                 return -EINVAL;
4380         }
4381
4382         /* Ensure that the cursor is valid for the new mode before changing... */
4383         intel_crtc_update_cursor(crtc, true);
4384
4385         if (is_lvds && dev_priv->lvds_downclock_avail) {
4386                 /*
4387                  * Ensure we match the reduced clock's P to the target clock.
4388                  * If the clocks don't match, we can't switch the display clock
4389                  * by using the FP0/FP1. In such case we will disable the LVDS
4390                  * downclock feature.
4391                 */
4392                 has_reduced_clock = limit->find_pll(limit, crtc,
4393                                                     dev_priv->lvds_downclock,
4394                                                     refclk,
4395                                                     &clock,
4396                                                     &reduced_clock);
4397         }
4398         /* SDVO TV has fixed PLL values depend on its clock range,
4399            this mirrors vbios setting. */
4400         if (is_sdvo && is_tv) {
4401                 if (adjusted_mode->clock >= 100000
4402                     && adjusted_mode->clock < 140500) {
4403                         clock.p1 = 2;
4404                         clock.p2 = 10;
4405                         clock.n = 3;
4406                         clock.m1 = 16;
4407                         clock.m2 = 8;
4408                 } else if (adjusted_mode->clock >= 140500
4409                            && adjusted_mode->clock <= 200000) {
4410                         clock.p1 = 1;
4411                         clock.p2 = 10;
4412                         clock.n = 6;
4413                         clock.m1 = 12;
4414                         clock.m2 = 8;
4415                 }
4416         }
4417
4418         /* FDI link */
4419         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4420         lane = 0;
4421         /* CPU eDP doesn't require FDI link, so just set DP M/N
4422            according to current link config */
4423         if (is_cpu_edp) {
4424                 target_clock = mode->clock;
4425                 intel_edp_link_config(edp_encoder, &lane, &link_bw);
4426         } else {
4427                 /* [e]DP over FDI requires target mode clock
4428                    instead of link clock */
4429                 if (is_dp)
4430                         target_clock = mode->clock;
4431                 else
4432                         target_clock = adjusted_mode->clock;
4433
4434                 /* FDI is a binary signal running at ~2.7GHz, encoding
4435                  * each output octet as 10 bits. The actual frequency
4436                  * is stored as a divider into a 100MHz clock, and the
4437                  * mode pixel clock is stored in units of 1KHz.
4438                  * Hence the bw of each lane in terms of the mode signal
4439                  * is:
4440                  */
4441                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4442         }
4443
4444         /* determine panel color depth */
4445         temp = I915_READ(PIPECONF(pipe));
4446         temp &= ~PIPE_BPC_MASK;
4447         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
4448         switch (pipe_bpp) {
4449         case 18:
4450                 temp |= PIPE_6BPC;
4451                 break;
4452         case 24:
4453                 temp |= PIPE_8BPC;
4454                 break;
4455         case 30:
4456                 temp |= PIPE_10BPC;
4457                 break;
4458         case 36:
4459                 temp |= PIPE_12BPC;
4460                 break;
4461         default:
4462                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4463                         pipe_bpp);
4464                 temp |= PIPE_8BPC;
4465                 pipe_bpp = 24;
4466                 break;
4467         }
4468
4469         intel_crtc->bpp = pipe_bpp;
4470         I915_WRITE(PIPECONF(pipe), temp);
4471
4472         if (!lane) {
4473                 /*
4474                  * Account for spread spectrum to avoid
4475                  * oversubscribing the link. Max center spread
4476                  * is 2.5%; use 5% for safety's sake.
4477                  */
4478                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4479                 lane = bps / (link_bw * 8) + 1;
4480         }
4481
4482         intel_crtc->fdi_lanes = lane;
4483
4484         if (pixel_multiplier > 1)
4485                 link_bw *= pixel_multiplier;
4486         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4487                              &m_n);
4488
4489         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4490         if (has_reduced_clock)
4491                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4492                         reduced_clock.m2;
4493
4494         /* Enable autotuning of the PLL clock (if permissible) */
4495         factor = 21;
4496         if (is_lvds) {
4497                 if ((intel_panel_use_ssc(dev_priv) &&
4498                      dev_priv->lvds_ssc_freq == 100) ||
4499                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4500                         factor = 25;
4501         } else if (is_sdvo && is_tv)
4502                 factor = 20;
4503
4504         if (clock.m < factor * clock.n)
4505                 fp |= FP_CB_TUNE;
4506
4507         dpll = 0;
4508
4509         if (is_lvds)
4510                 dpll |= DPLLB_MODE_LVDS;
4511         else
4512                 dpll |= DPLLB_MODE_DAC_SERIAL;
4513         if (is_sdvo) {
4514                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4515                 if (pixel_multiplier > 1) {
4516                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4517                 }
4518                 dpll |= DPLL_DVO_HIGH_SPEED;
4519         }
4520         if (is_dp && !is_cpu_edp)
4521                 dpll |= DPLL_DVO_HIGH_SPEED;
4522
4523         /* compute bitmask from p1 value */
4524         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4525         /* also FPA1 */
4526         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4527
4528         switch (clock.p2) {
4529         case 5:
4530                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4531                 break;
4532         case 7:
4533                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4534                 break;
4535         case 10:
4536                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4537                 break;
4538         case 14:
4539                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4540                 break;
4541         }
4542
4543         if (is_sdvo && is_tv)
4544                 dpll |= PLL_REF_INPUT_TVCLKINBC;
4545         else if (is_tv)
4546                 /* XXX: just matching BIOS for now */
4547                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
4548                 dpll |= 3;
4549         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4550                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4551         else
4552                 dpll |= PLL_REF_INPUT_DREFCLK;
4553
4554         /* setup pipeconf */
4555         pipeconf = I915_READ(PIPECONF(pipe));
4556
4557         /* Set up the display plane register */
4558         dspcntr = DISPPLANE_GAMMA_ENABLE;
4559
4560         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4561         drm_mode_debug_printmodeline(mode);
4562
4563         /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4564          * pre-Haswell/LPT generation */
4565         if (HAS_PCH_LPT(dev)) {
4566                 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4567                                 pipe);
4568         } else if (!is_cpu_edp) {
4569                 struct intel_pch_pll *pll;
4570
4571                 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4572                 if (pll == NULL) {
4573                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4574                                          pipe);
4575                         return -EINVAL;
4576                 }
4577         } else
4578                 intel_put_pch_pll(intel_crtc);
4579
4580         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4581          * This is an exception to the general rule that mode_set doesn't turn
4582          * things on.
4583          */
4584         if (is_lvds) {
4585                 temp = I915_READ(PCH_LVDS);
4586                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4587                 if (HAS_PCH_CPT(dev)) {
4588                         temp &= ~PORT_TRANS_SEL_MASK;
4589                         temp |= PORT_TRANS_SEL_CPT(pipe);
4590                 } else {
4591                         if (pipe == 1)
4592                                 temp |= LVDS_PIPEB_SELECT;
4593                         else
4594                                 temp &= ~LVDS_PIPEB_SELECT;
4595                 }
4596
4597                 /* set the corresponsding LVDS_BORDER bit */
4598                 temp |= dev_priv->lvds_border_bits;
4599                 /* Set the B0-B3 data pairs corresponding to whether we're going to
4600                  * set the DPLLs for dual-channel mode or not.
4601                  */
4602                 if (clock.p2 == 7)
4603                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4604                 else
4605                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4606
4607                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4608                  * appropriately here, but we need to look more thoroughly into how
4609                  * panels behave in the two modes.
4610                  */
4611                 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4612                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4613                         temp |= LVDS_HSYNC_POLARITY;
4614                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4615                         temp |= LVDS_VSYNC_POLARITY;
4616                 I915_WRITE(PCH_LVDS, temp);
4617         }
4618
4619         pipeconf &= ~PIPECONF_DITHER_EN;
4620         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4621         if ((is_lvds && dev_priv->lvds_dither) || dither) {
4622                 pipeconf |= PIPECONF_DITHER_EN;
4623                 pipeconf |= PIPECONF_DITHER_TYPE_SP;
4624         }
4625         if (is_dp && !is_cpu_edp) {
4626                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4627         } else {
4628                 /* For non-DP output, clear any trans DP clock recovery setting.*/
4629                 I915_WRITE(TRANSDATA_M1(pipe), 0);
4630                 I915_WRITE(TRANSDATA_N1(pipe), 0);
4631                 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4632                 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4633         }
4634
4635         if (intel_crtc->pch_pll) {
4636                 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4637
4638                 /* Wait for the clocks to stabilize. */
4639                 POSTING_READ(intel_crtc->pch_pll->pll_reg);
4640                 udelay(150);
4641
4642                 /* The pixel multiplier can only be updated once the
4643                  * DPLL is enabled and the clocks are stable.
4644                  *
4645                  * So write it again.
4646                  */
4647                 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
4648         }
4649
4650         intel_crtc->lowfreq_avail = false;
4651         if (intel_crtc->pch_pll) {
4652                 if (is_lvds && has_reduced_clock && i915_powersave) {
4653                         I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4654                         intel_crtc->lowfreq_avail = true;
4655                         if (HAS_PIPE_CXSR(dev)) {
4656                                 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4657                                 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4658                         }
4659                 } else {
4660                         I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
4661                         if (HAS_PIPE_CXSR(dev)) {
4662                                 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4663                                 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4664                         }
4665                 }
4666         }
4667
4668         pipeconf &= ~PIPECONF_INTERLACE_MASK;
4669         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4670                 pipeconf |= PIPECONF_INTERLACED_ILK;
4671                 /* the chip adds 2 halflines automatically */
4672                 adjusted_mode->crtc_vtotal -= 1;
4673                 adjusted_mode->crtc_vblank_end -= 1;
4674                 I915_WRITE(VSYNCSHIFT(pipe),
4675                            adjusted_mode->crtc_hsync_start
4676                            - adjusted_mode->crtc_htotal/2);
4677         } else {
4678                 pipeconf |= PIPECONF_PROGRESSIVE;
4679                 I915_WRITE(VSYNCSHIFT(pipe), 0);
4680         }
4681
4682         I915_WRITE(HTOTAL(pipe),
4683                    (adjusted_mode->crtc_hdisplay - 1) |
4684                    ((adjusted_mode->crtc_htotal - 1) << 16));
4685         I915_WRITE(HBLANK(pipe),
4686                    (adjusted_mode->crtc_hblank_start - 1) |
4687                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
4688         I915_WRITE(HSYNC(pipe),
4689                    (adjusted_mode->crtc_hsync_start - 1) |
4690                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
4691
4692         I915_WRITE(VTOTAL(pipe),
4693                    (adjusted_mode->crtc_vdisplay - 1) |
4694                    ((adjusted_mode->crtc_vtotal - 1) << 16));
4695         I915_WRITE(VBLANK(pipe),
4696                    (adjusted_mode->crtc_vblank_start - 1) |
4697                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
4698         I915_WRITE(VSYNC(pipe),
4699                    (adjusted_mode->crtc_vsync_start - 1) |
4700                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
4701
4702         /* pipesrc controls the size that is scaled from, which should
4703          * always be the user's requested size.
4704          */
4705         I915_WRITE(PIPESRC(pipe),
4706                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4707
4708         I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4709         I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4710         I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4711         I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4712
4713         if (is_cpu_edp)
4714                 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4715
4716         I915_WRITE(PIPECONF(pipe), pipeconf);
4717         POSTING_READ(PIPECONF(pipe));
4718
4719         intel_wait_for_vblank(dev, pipe);
4720
4721         I915_WRITE(DSPCNTR(plane), dspcntr);
4722         POSTING_READ(DSPCNTR(plane));
4723
4724         ret = intel_pipe_set_base(crtc, x, y, old_fb);
4725
4726         intel_update_watermarks(dev);
4727
4728         intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4729
4730         return ret;
4731 }
4732
4733 static int intel_crtc_mode_set(struct drm_crtc *crtc,
4734                                struct drm_display_mode *mode,
4735                                struct drm_display_mode *adjusted_mode,
4736                                int x, int y,
4737                                struct drm_framebuffer *old_fb)
4738 {
4739         struct drm_device *dev = crtc->dev;
4740         struct drm_i915_private *dev_priv = dev->dev_private;
4741         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4742         int pipe = intel_crtc->pipe;
4743         int ret;
4744
4745         drm_vblank_pre_modeset(dev, pipe);
4746
4747         ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4748                                               x, y, old_fb);
4749         drm_vblank_post_modeset(dev, pipe);
4750
4751         if (ret)
4752                 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4753         else
4754                 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4755
4756         return ret;
4757 }
4758
4759 static bool intel_eld_uptodate(struct drm_connector *connector,
4760                                int reg_eldv, uint32_t bits_eldv,
4761                                int reg_elda, uint32_t bits_elda,
4762                                int reg_edid)
4763 {
4764         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4765         uint8_t *eld = connector->eld;
4766         uint32_t i;
4767
4768         i = I915_READ(reg_eldv);
4769         i &= bits_eldv;
4770
4771         if (!eld[0])
4772                 return !i;
4773
4774         if (!i)
4775                 return false;
4776
4777         i = I915_READ(reg_elda);
4778         i &= ~bits_elda;
4779         I915_WRITE(reg_elda, i);
4780
4781         for (i = 0; i < eld[2]; i++)
4782                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
4783                         return false;
4784
4785         return true;
4786 }
4787
4788 static void g4x_write_eld(struct drm_connector *connector,
4789                           struct drm_crtc *crtc)
4790 {
4791         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4792         uint8_t *eld = connector->eld;
4793         uint32_t eldv;
4794         uint32_t len;
4795         uint32_t i;
4796
4797         i = I915_READ(G4X_AUD_VID_DID);
4798
4799         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
4800                 eldv = G4X_ELDV_DEVCL_DEVBLC;
4801         else
4802                 eldv = G4X_ELDV_DEVCTG;
4803
4804         if (intel_eld_uptodate(connector,
4805                                G4X_AUD_CNTL_ST, eldv,
4806                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
4807                                G4X_HDMIW_HDMIEDID))
4808                 return;
4809
4810         i = I915_READ(G4X_AUD_CNTL_ST);
4811         i &= ~(eldv | G4X_ELD_ADDR);
4812         len = (i >> 9) & 0x1f;          /* ELD buffer size */
4813         I915_WRITE(G4X_AUD_CNTL_ST, i);
4814
4815         if (!eld[0])
4816                 return;
4817
4818         len = min_t(uint8_t, eld[2], len);
4819         DRM_DEBUG_DRIVER("ELD size %d\n", len);
4820         for (i = 0; i < len; i++)
4821                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
4822
4823         i = I915_READ(G4X_AUD_CNTL_ST);
4824         i |= eldv;
4825         I915_WRITE(G4X_AUD_CNTL_ST, i);
4826 }
4827
4828 static void ironlake_write_eld(struct drm_connector *connector,
4829                                      struct drm_crtc *crtc)
4830 {
4831         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4832         uint8_t *eld = connector->eld;
4833         uint32_t eldv;
4834         uint32_t i;
4835         int len;
4836         int hdmiw_hdmiedid;
4837         int aud_config;
4838         int aud_cntl_st;
4839         int aud_cntrl_st2;
4840
4841         if (HAS_PCH_IBX(connector->dev)) {
4842                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
4843                 aud_config = IBX_AUD_CONFIG_A;
4844                 aud_cntl_st = IBX_AUD_CNTL_ST_A;
4845                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4846         } else {
4847                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
4848                 aud_config = CPT_AUD_CONFIG_A;
4849                 aud_cntl_st = CPT_AUD_CNTL_ST_A;
4850                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
4851         }
4852
4853         i = to_intel_crtc(crtc)->pipe;
4854         hdmiw_hdmiedid += i * 0x100;
4855         aud_cntl_st += i * 0x100;
4856         aud_config += i * 0x100;
4857
4858         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
4859
4860         i = I915_READ(aud_cntl_st);
4861         i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
4862         if (!i) {
4863                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
4864                 /* operate blindly on all ports */
4865                 eldv = IBX_ELD_VALIDB;
4866                 eldv |= IBX_ELD_VALIDB << 4;
4867                 eldv |= IBX_ELD_VALIDB << 8;
4868         } else {
4869                 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
4870                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
4871         }
4872
4873         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
4874                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
4875                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
4876                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4877         } else
4878                 I915_WRITE(aud_config, 0);
4879
4880         if (intel_eld_uptodate(connector,
4881                                aud_cntrl_st2, eldv,
4882                                aud_cntl_st, IBX_ELD_ADDRESS,
4883                                hdmiw_hdmiedid))
4884                 return;
4885
4886         i = I915_READ(aud_cntrl_st2);
4887         i &= ~eldv;
4888         I915_WRITE(aud_cntrl_st2, i);
4889
4890         if (!eld[0])
4891                 return;
4892
4893         i = I915_READ(aud_cntl_st);
4894         i &= ~IBX_ELD_ADDRESS;
4895         I915_WRITE(aud_cntl_st, i);
4896
4897         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
4898         DRM_DEBUG_DRIVER("ELD size %d\n", len);
4899         for (i = 0; i < len; i++)
4900                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
4901
4902         i = I915_READ(aud_cntrl_st2);
4903         i |= eldv;
4904         I915_WRITE(aud_cntrl_st2, i);
4905 }
4906
4907 void intel_write_eld(struct drm_encoder *encoder,
4908                      struct drm_display_mode *mode)
4909 {
4910         struct drm_crtc *crtc = encoder->crtc;
4911         struct drm_connector *connector;
4912         struct drm_device *dev = encoder->dev;
4913         struct drm_i915_private *dev_priv = dev->dev_private;
4914
4915         connector = drm_select_eld(encoder, mode);
4916         if (!connector)
4917                 return;
4918
4919         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4920                          connector->base.id,
4921                          drm_get_connector_name(connector),
4922                          connector->encoder->base.id,
4923                          drm_get_encoder_name(connector->encoder));
4924
4925         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
4926
4927         if (dev_priv->display.write_eld)
4928                 dev_priv->display.write_eld(connector, crtc);
4929 }
4930
4931 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4932 void intel_crtc_load_lut(struct drm_crtc *crtc)
4933 {
4934         struct drm_device *dev = crtc->dev;
4935         struct drm_i915_private *dev_priv = dev->dev_private;
4936         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4937         int palreg = PALETTE(intel_crtc->pipe);
4938         int i;
4939
4940         /* The clocks have to be on to load the palette. */
4941         if (!crtc->enabled || !intel_crtc->active)
4942                 return;
4943
4944         /* use legacy palette for Ironlake */
4945         if (HAS_PCH_SPLIT(dev))
4946                 palreg = LGC_PALETTE(intel_crtc->pipe);
4947
4948         for (i = 0; i < 256; i++) {
4949                 I915_WRITE(palreg + 4 * i,
4950                            (intel_crtc->lut_r[i] << 16) |
4951                            (intel_crtc->lut_g[i] << 8) |
4952                            intel_crtc->lut_b[i]);
4953         }
4954 }
4955
4956 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4957 {
4958         struct drm_device *dev = crtc->dev;
4959         struct drm_i915_private *dev_priv = dev->dev_private;
4960         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4961         bool visible = base != 0;
4962         u32 cntl;
4963
4964         if (intel_crtc->cursor_visible == visible)
4965                 return;
4966
4967         cntl = I915_READ(_CURACNTR);
4968         if (visible) {
4969                 /* On these chipsets we can only modify the base whilst
4970                  * the cursor is disabled.
4971                  */
4972                 I915_WRITE(_CURABASE, base);
4973
4974                 cntl &= ~(CURSOR_FORMAT_MASK);
4975                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
4976                 cntl |= CURSOR_ENABLE |
4977                         CURSOR_GAMMA_ENABLE |
4978                         CURSOR_FORMAT_ARGB;
4979         } else
4980                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4981         I915_WRITE(_CURACNTR, cntl);
4982
4983         intel_crtc->cursor_visible = visible;
4984 }
4985
4986 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4987 {
4988         struct drm_device *dev = crtc->dev;
4989         struct drm_i915_private *dev_priv = dev->dev_private;
4990         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4991         int pipe = intel_crtc->pipe;
4992         bool visible = base != 0;
4993
4994         if (intel_crtc->cursor_visible != visible) {
4995                 uint32_t cntl = I915_READ(CURCNTR(pipe));
4996                 if (base) {
4997                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4998                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4999                         cntl |= pipe << 28; /* Connect to correct pipe */
5000                 } else {
5001                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5002                         cntl |= CURSOR_MODE_DISABLE;
5003                 }
5004                 I915_WRITE(CURCNTR(pipe), cntl);
5005
5006                 intel_crtc->cursor_visible = visible;
5007         }
5008         /* and commit changes on next vblank */
5009         I915_WRITE(CURBASE(pipe), base);
5010 }
5011
5012 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5013 {
5014         struct drm_device *dev = crtc->dev;
5015         struct drm_i915_private *dev_priv = dev->dev_private;
5016         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5017         int pipe = intel_crtc->pipe;
5018         bool visible = base != 0;
5019
5020         if (intel_crtc->cursor_visible != visible) {
5021                 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5022                 if (base) {
5023                         cntl &= ~CURSOR_MODE;
5024                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5025                 } else {
5026                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5027                         cntl |= CURSOR_MODE_DISABLE;
5028                 }
5029                 I915_WRITE(CURCNTR_IVB(pipe), cntl);
5030
5031                 intel_crtc->cursor_visible = visible;
5032         }
5033         /* and commit changes on next vblank */
5034         I915_WRITE(CURBASE_IVB(pipe), base);
5035 }
5036
5037 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5038 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5039                                      bool on)
5040 {
5041         struct drm_device *dev = crtc->dev;
5042         struct drm_i915_private *dev_priv = dev->dev_private;
5043         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5044         int pipe = intel_crtc->pipe;
5045         int x = intel_crtc->cursor_x;
5046         int y = intel_crtc->cursor_y;
5047         u32 base, pos;
5048         bool visible;
5049
5050         pos = 0;
5051
5052         if (on && crtc->enabled && crtc->fb) {
5053                 base = intel_crtc->cursor_addr;
5054                 if (x > (int) crtc->fb->width)
5055                         base = 0;
5056
5057                 if (y > (int) crtc->fb->height)
5058                         base = 0;
5059         } else
5060                 base = 0;
5061
5062         if (x < 0) {
5063                 if (x + intel_crtc->cursor_width < 0)
5064                         base = 0;
5065
5066                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5067                 x = -x;
5068         }
5069         pos |= x << CURSOR_X_SHIFT;
5070
5071         if (y < 0) {
5072                 if (y + intel_crtc->cursor_height < 0)
5073                         base = 0;
5074
5075                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5076                 y = -y;
5077         }
5078         pos |= y << CURSOR_Y_SHIFT;
5079
5080         visible = base != 0;
5081         if (!visible && !intel_crtc->cursor_visible)
5082                 return;
5083
5084         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
5085                 I915_WRITE(CURPOS_IVB(pipe), pos);
5086                 ivb_update_cursor(crtc, base);
5087         } else {
5088                 I915_WRITE(CURPOS(pipe), pos);
5089                 if (IS_845G(dev) || IS_I865G(dev))
5090                         i845_update_cursor(crtc, base);
5091                 else
5092                         i9xx_update_cursor(crtc, base);
5093         }
5094 }
5095
5096 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5097                                  struct drm_file *file,
5098                                  uint32_t handle,
5099                                  uint32_t width, uint32_t height)
5100 {
5101         struct drm_device *dev = crtc->dev;
5102         struct drm_i915_private *dev_priv = dev->dev_private;
5103         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5104         struct drm_i915_gem_object *obj;
5105         uint32_t addr;
5106         int ret;
5107
5108         DRM_DEBUG_KMS("\n");
5109
5110         /* if we want to turn off the cursor ignore width and height */
5111         if (!handle) {
5112                 DRM_DEBUG_KMS("cursor off\n");
5113                 addr = 0;
5114                 obj = NULL;
5115                 mutex_lock(&dev->struct_mutex);
5116                 goto finish;
5117         }
5118
5119         /* Currently we only support 64x64 cursors */
5120         if (width != 64 || height != 64) {
5121                 DRM_ERROR("we currently only support 64x64 cursors\n");
5122                 return -EINVAL;
5123         }
5124
5125         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
5126         if (&obj->base == NULL)
5127                 return -ENOENT;
5128
5129         if (obj->base.size < width * height * 4) {
5130                 DRM_ERROR("buffer is to small\n");
5131                 ret = -ENOMEM;
5132                 goto fail;
5133         }
5134
5135         /* we only need to pin inside GTT if cursor is non-phy */
5136         mutex_lock(&dev->struct_mutex);
5137         if (!dev_priv->info->cursor_needs_physical) {
5138                 if (obj->tiling_mode) {
5139                         DRM_ERROR("cursor cannot be tiled\n");
5140                         ret = -EINVAL;
5141                         goto fail_locked;
5142                 }
5143
5144                 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5145                 if (ret) {
5146                         DRM_ERROR("failed to move cursor bo into the GTT\n");
5147                         goto fail_locked;
5148                 }
5149
5150                 ret = i915_gem_object_put_fence(obj);
5151                 if (ret) {
5152                         DRM_ERROR("failed to release fence for cursor");
5153                         goto fail_unpin;
5154                 }
5155
5156                 addr = obj->gtt_offset;
5157         } else {
5158                 int align = IS_I830(dev) ? 16 * 1024 : 256;
5159                 ret = i915_gem_attach_phys_object(dev, obj,
5160                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5161                                                   align);
5162                 if (ret) {
5163                         DRM_ERROR("failed to attach phys object\n");
5164                         goto fail_locked;
5165                 }
5166                 addr = obj->phys_obj->handle->busaddr;
5167         }
5168
5169         if (IS_GEN2(dev))
5170                 I915_WRITE(CURSIZE, (height << 12) | width);
5171
5172  finish:
5173         if (intel_crtc->cursor_bo) {
5174                 if (dev_priv->info->cursor_needs_physical) {
5175                         if (intel_crtc->cursor_bo != obj)
5176                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5177                 } else
5178                         i915_gem_object_unpin(intel_crtc->cursor_bo);
5179                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
5180         }
5181
5182         mutex_unlock(&dev->struct_mutex);
5183
5184         intel_crtc->cursor_addr = addr;
5185         intel_crtc->cursor_bo = obj;
5186         intel_crtc->cursor_width = width;
5187         intel_crtc->cursor_height = height;
5188
5189         intel_crtc_update_cursor(crtc, true);
5190
5191         return 0;
5192 fail_unpin:
5193         i915_gem_object_unpin(obj);
5194 fail_locked:
5195         mutex_unlock(&dev->struct_mutex);
5196 fail:
5197         drm_gem_object_unreference_unlocked(&obj->base);
5198         return ret;
5199 }
5200
5201 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5202 {
5203         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5204
5205         intel_crtc->cursor_x = x;
5206         intel_crtc->cursor_y = y;
5207
5208         intel_crtc_update_cursor(crtc, true);
5209
5210         return 0;
5211 }
5212
5213 /** Sets the color ramps on behalf of RandR */
5214 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5215                                  u16 blue, int regno)
5216 {
5217         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5218
5219         intel_crtc->lut_r[regno] = red >> 8;
5220         intel_crtc->lut_g[regno] = green >> 8;
5221         intel_crtc->lut_b[regno] = blue >> 8;
5222 }
5223
5224 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5225                              u16 *blue, int regno)
5226 {
5227         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5228
5229         *red = intel_crtc->lut_r[regno] << 8;
5230         *green = intel_crtc->lut_g[regno] << 8;
5231         *blue = intel_crtc->lut_b[regno] << 8;
5232 }
5233
5234 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5235                                  u16 *blue, uint32_t start, uint32_t size)
5236 {
5237         int end = (start + size > 256) ? 256 : start + size, i;
5238         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5239
5240         for (i = start; i < end; i++) {
5241                 intel_crtc->lut_r[i] = red[i] >> 8;
5242                 intel_crtc->lut_g[i] = green[i] >> 8;
5243                 intel_crtc->lut_b[i] = blue[i] >> 8;
5244         }
5245
5246         intel_crtc_load_lut(crtc);
5247 }
5248
5249 /**
5250  * Get a pipe with a simple mode set on it for doing load-based monitor
5251  * detection.
5252  *
5253  * It will be up to the load-detect code to adjust the pipe as appropriate for
5254  * its requirements.  The pipe will be connected to no other encoders.
5255  *
5256  * Currently this code will only succeed if there is a pipe with no encoders
5257  * configured for it.  In the future, it could choose to temporarily disable
5258  * some outputs to free up a pipe for its use.
5259  *
5260  * \return crtc, or NULL if no pipes are available.
5261  */
5262
5263 /* VESA 640x480x72Hz mode to set on the pipe */
5264 static struct drm_display_mode load_detect_mode = {
5265         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5266                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5267 };
5268
5269 static struct drm_framebuffer *
5270 intel_framebuffer_create(struct drm_device *dev,
5271                          struct drm_mode_fb_cmd2 *mode_cmd,
5272                          struct drm_i915_gem_object *obj)
5273 {
5274         struct intel_framebuffer *intel_fb;
5275         int ret;
5276
5277         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5278         if (!intel_fb) {
5279                 drm_gem_object_unreference_unlocked(&obj->base);
5280                 return ERR_PTR(-ENOMEM);
5281         }
5282
5283         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5284         if (ret) {
5285                 drm_gem_object_unreference_unlocked(&obj->base);
5286                 kfree(intel_fb);
5287                 return ERR_PTR(ret);
5288         }
5289
5290         return &intel_fb->base;
5291 }
5292
5293 static u32
5294 intel_framebuffer_pitch_for_width(int width, int bpp)
5295 {
5296         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5297         return ALIGN(pitch, 64);
5298 }
5299
5300 static u32
5301 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5302 {
5303         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5304         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5305 }
5306
5307 static struct drm_framebuffer *
5308 intel_framebuffer_create_for_mode(struct drm_device *dev,
5309                                   struct drm_display_mode *mode,
5310                                   int depth, int bpp)
5311 {
5312         struct drm_i915_gem_object *obj;
5313         struct drm_mode_fb_cmd2 mode_cmd;
5314
5315         obj = i915_gem_alloc_object(dev,
5316                                     intel_framebuffer_size_for_mode(mode, bpp));
5317         if (obj == NULL)
5318                 return ERR_PTR(-ENOMEM);
5319
5320         mode_cmd.width = mode->hdisplay;
5321         mode_cmd.height = mode->vdisplay;
5322         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
5323                                                                 bpp);
5324         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
5325
5326         return intel_framebuffer_create(dev, &mode_cmd, obj);
5327 }
5328
5329 static struct drm_framebuffer *
5330 mode_fits_in_fbdev(struct drm_device *dev,
5331                    struct drm_display_mode *mode)
5332 {
5333         struct drm_i915_private *dev_priv = dev->dev_private;
5334         struct drm_i915_gem_object *obj;
5335         struct drm_framebuffer *fb;
5336
5337         if (dev_priv->fbdev == NULL)
5338                 return NULL;
5339
5340         obj = dev_priv->fbdev->ifb.obj;
5341         if (obj == NULL)
5342                 return NULL;
5343
5344         fb = &dev_priv->fbdev->ifb.base;
5345         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
5346                                                                fb->bits_per_pixel))
5347                 return NULL;
5348
5349         if (obj->base.size < mode->vdisplay * fb->pitches[0])
5350                 return NULL;
5351
5352         return fb;
5353 }
5354
5355 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5356                                 struct drm_connector *connector,
5357                                 struct drm_display_mode *mode,
5358                                 struct intel_load_detect_pipe *old)
5359 {
5360         struct intel_crtc *intel_crtc;
5361         struct drm_crtc *possible_crtc;
5362         struct drm_encoder *encoder = &intel_encoder->base;
5363         struct drm_crtc *crtc = NULL;
5364         struct drm_device *dev = encoder->dev;
5365         struct drm_framebuffer *old_fb;
5366         int i = -1;
5367
5368         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5369                       connector->base.id, drm_get_connector_name(connector),
5370                       encoder->base.id, drm_get_encoder_name(encoder));
5371
5372         /*
5373          * Algorithm gets a little messy:
5374          *
5375          *   - if the connector already has an assigned crtc, use it (but make
5376          *     sure it's on first)
5377          *
5378          *   - try to find the first unused crtc that can drive this connector,
5379          *     and use that if we find one
5380          */
5381
5382         /* See if we already have a CRTC for this connector */
5383         if (encoder->crtc) {
5384                 crtc = encoder->crtc;
5385
5386                 intel_crtc = to_intel_crtc(crtc);
5387                 old->dpms_mode = intel_crtc->dpms_mode;
5388                 old->load_detect_temp = false;
5389
5390                 /* Make sure the crtc and connector are running */
5391                 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5392                         struct drm_encoder_helper_funcs *encoder_funcs;
5393                         struct drm_crtc_helper_funcs *crtc_funcs;
5394
5395                         crtc_funcs = crtc->helper_private;
5396                         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5397
5398                         encoder_funcs = encoder->helper_private;
5399                         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5400                 }
5401
5402                 return true;
5403         }
5404
5405         /* Find an unused one (if possible) */
5406         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5407                 i++;
5408                 if (!(encoder->possible_crtcs & (1 << i)))
5409                         continue;
5410                 if (!possible_crtc->enabled) {
5411                         crtc = possible_crtc;
5412                         break;
5413                 }
5414         }
5415
5416         /*
5417          * If we didn't find an unused CRTC, don't use any.
5418          */
5419         if (!crtc) {
5420                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5421                 return false;
5422         }
5423
5424         encoder->crtc = crtc;
5425         connector->encoder = encoder;
5426
5427         intel_crtc = to_intel_crtc(crtc);
5428         old->dpms_mode = intel_crtc->dpms_mode;
5429         old->load_detect_temp = true;
5430         old->release_fb = NULL;
5431
5432         if (!mode)
5433                 mode = &load_detect_mode;
5434
5435         old_fb = crtc->fb;
5436
5437         /* We need a framebuffer large enough to accommodate all accesses
5438          * that the plane may generate whilst we perform load detection.
5439          * We can not rely on the fbcon either being present (we get called
5440          * during its initialisation to detect all boot displays, or it may
5441          * not even exist) or that it is large enough to satisfy the
5442          * requested mode.
5443          */
5444         crtc->fb = mode_fits_in_fbdev(dev, mode);
5445         if (crtc->fb == NULL) {
5446                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5447                 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5448                 old->release_fb = crtc->fb;
5449         } else
5450                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5451         if (IS_ERR(crtc->fb)) {
5452                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5453                 crtc->fb = old_fb;
5454                 return false;
5455         }
5456
5457         if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5458                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5459                 if (old->release_fb)
5460                         old->release_fb->funcs->destroy(old->release_fb);
5461                 crtc->fb = old_fb;
5462                 return false;
5463         }
5464
5465         /* let the connector get through one full cycle before testing */
5466         intel_wait_for_vblank(dev, intel_crtc->pipe);
5467
5468         return true;
5469 }
5470
5471 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5472                                     struct drm_connector *connector,
5473                                     struct intel_load_detect_pipe *old)
5474 {
5475         struct drm_encoder *encoder = &intel_encoder->base;
5476         struct drm_device *dev = encoder->dev;
5477         struct drm_crtc *crtc = encoder->crtc;
5478         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5479         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5480
5481         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5482                       connector->base.id, drm_get_connector_name(connector),
5483                       encoder->base.id, drm_get_encoder_name(encoder));
5484
5485         if (old->load_detect_temp) {
5486                 connector->encoder = NULL;
5487                 drm_helper_disable_unused_functions(dev);
5488
5489                 if (old->release_fb)
5490                         old->release_fb->funcs->destroy(old->release_fb);
5491
5492                 return;
5493         }
5494
5495         /* Switch crtc and encoder back off if necessary */
5496         if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5497                 encoder_funcs->dpms(encoder, old->dpms_mode);
5498                 crtc_funcs->dpms(crtc, old->dpms_mode);
5499         }
5500 }
5501
5502 /* Returns the clock of the currently programmed mode of the given pipe. */
5503 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5504 {
5505         struct drm_i915_private *dev_priv = dev->dev_private;
5506         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5507         int pipe = intel_crtc->pipe;
5508         u32 dpll = I915_READ(DPLL(pipe));
5509         u32 fp;
5510         intel_clock_t clock;
5511
5512         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5513                 fp = I915_READ(FP0(pipe));
5514         else
5515                 fp = I915_READ(FP1(pipe));
5516
5517         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5518         if (IS_PINEVIEW(dev)) {
5519                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5520                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5521         } else {
5522                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5523                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5524         }
5525
5526         if (!IS_GEN2(dev)) {
5527                 if (IS_PINEVIEW(dev))
5528                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5529                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5530                 else
5531                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5532                                DPLL_FPA01_P1_POST_DIV_SHIFT);
5533
5534                 switch (dpll & DPLL_MODE_MASK) {
5535                 case DPLLB_MODE_DAC_SERIAL:
5536                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5537                                 5 : 10;
5538                         break;
5539                 case DPLLB_MODE_LVDS:
5540                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5541                                 7 : 14;
5542                         break;
5543                 default:
5544                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5545                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
5546                         return 0;
5547                 }
5548
5549                 /* XXX: Handle the 100Mhz refclk */
5550                 intel_clock(dev, 96000, &clock);
5551         } else {
5552                 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5553
5554                 if (is_lvds) {
5555                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5556                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
5557                         clock.p2 = 14;
5558
5559                         if ((dpll & PLL_REF_INPUT_MASK) ==
5560                             PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5561                                 /* XXX: might not be 66MHz */
5562                                 intel_clock(dev, 66000, &clock);
5563                         } else
5564                                 intel_clock(dev, 48000, &clock);
5565                 } else {
5566                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
5567                                 clock.p1 = 2;
5568                         else {
5569                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5570                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5571                         }
5572                         if (dpll & PLL_P2_DIVIDE_BY_4)
5573                                 clock.p2 = 4;
5574                         else
5575                                 clock.p2 = 2;
5576
5577                         intel_clock(dev, 48000, &clock);
5578                 }
5579         }
5580
5581         /* XXX: It would be nice to validate the clocks, but we can't reuse
5582          * i830PllIsValid() because it relies on the xf86_config connector
5583          * configuration being accurate, which it isn't necessarily.
5584          */
5585
5586         return clock.dot;
5587 }
5588
5589 /** Returns the currently programmed mode of the given pipe. */
5590 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5591                                              struct drm_crtc *crtc)
5592 {
5593         struct drm_i915_private *dev_priv = dev->dev_private;
5594         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5595         int pipe = intel_crtc->pipe;
5596         struct drm_display_mode *mode;
5597         int htot = I915_READ(HTOTAL(pipe));
5598         int hsync = I915_READ(HSYNC(pipe));
5599         int vtot = I915_READ(VTOTAL(pipe));
5600         int vsync = I915_READ(VSYNC(pipe));
5601
5602         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5603         if (!mode)
5604                 return NULL;
5605
5606         mode->clock = intel_crtc_clock_get(dev, crtc);
5607         mode->hdisplay = (htot & 0xffff) + 1;
5608         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5609         mode->hsync_start = (hsync & 0xffff) + 1;
5610         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5611         mode->vdisplay = (vtot & 0xffff) + 1;
5612         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5613         mode->vsync_start = (vsync & 0xffff) + 1;
5614         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5615
5616         drm_mode_set_name(mode);
5617
5618         return mode;
5619 }
5620
5621 #define GPU_IDLE_TIMEOUT 500 /* ms */
5622
5623 /* When this timer fires, we've been idle for awhile */
5624 static void intel_gpu_idle_timer(unsigned long arg)
5625 {
5626         struct drm_device *dev = (struct drm_device *)arg;
5627         drm_i915_private_t *dev_priv = dev->dev_private;
5628
5629         if (!list_empty(&dev_priv->mm.active_list)) {
5630                 /* Still processing requests, so just re-arm the timer. */
5631                 mod_timer(&dev_priv->idle_timer, jiffies +
5632                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5633                 return;
5634         }
5635
5636         dev_priv->busy = false;
5637         queue_work(dev_priv->wq, &dev_priv->idle_work);
5638 }
5639
5640 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
5641
5642 static void intel_crtc_idle_timer(unsigned long arg)
5643 {
5644         struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5645         struct drm_crtc *crtc = &intel_crtc->base;
5646         drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5647         struct intel_framebuffer *intel_fb;
5648
5649         intel_fb = to_intel_framebuffer(crtc->fb);
5650         if (intel_fb && intel_fb->obj->active) {
5651                 /* The framebuffer is still being accessed by the GPU. */
5652                 mod_timer(&intel_crtc->idle_timer, jiffies +
5653                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5654                 return;
5655         }
5656
5657         intel_crtc->busy = false;
5658         queue_work(dev_priv->wq, &dev_priv->idle_work);
5659 }
5660
5661 static void intel_increase_pllclock(struct drm_crtc *crtc)
5662 {
5663         struct drm_device *dev = crtc->dev;
5664         drm_i915_private_t *dev_priv = dev->dev_private;
5665         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5666         int pipe = intel_crtc->pipe;
5667         int dpll_reg = DPLL(pipe);
5668         int dpll;
5669
5670         if (HAS_PCH_SPLIT(dev))
5671                 return;
5672
5673         if (!dev_priv->lvds_downclock_avail)
5674                 return;
5675
5676         dpll = I915_READ(dpll_reg);
5677         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5678                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
5679
5680                 assert_panel_unlocked(dev_priv, pipe);
5681
5682                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5683                 I915_WRITE(dpll_reg, dpll);
5684                 intel_wait_for_vblank(dev, pipe);
5685
5686                 dpll = I915_READ(dpll_reg);
5687                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5688                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5689         }
5690
5691         /* Schedule downclock */
5692         mod_timer(&intel_crtc->idle_timer, jiffies +
5693                   msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5694 }
5695
5696 static void intel_decrease_pllclock(struct drm_crtc *crtc)
5697 {
5698         struct drm_device *dev = crtc->dev;
5699         drm_i915_private_t *dev_priv = dev->dev_private;
5700         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5701
5702         if (HAS_PCH_SPLIT(dev))
5703                 return;
5704
5705         if (!dev_priv->lvds_downclock_avail)
5706                 return;
5707
5708         /*
5709          * Since this is called by a timer, we should never get here in
5710          * the manual case.
5711          */
5712         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5713                 int pipe = intel_crtc->pipe;
5714                 int dpll_reg = DPLL(pipe);
5715                 int dpll;
5716
5717                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
5718
5719                 assert_panel_unlocked(dev_priv, pipe);
5720
5721                 dpll = I915_READ(dpll_reg);
5722                 dpll |= DISPLAY_RATE_SELECT_FPA1;
5723                 I915_WRITE(dpll_reg, dpll);
5724                 intel_wait_for_vblank(dev, pipe);
5725                 dpll = I915_READ(dpll_reg);
5726                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
5727                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
5728         }
5729
5730 }
5731
5732 /**
5733  * intel_idle_update - adjust clocks for idleness
5734  * @work: work struct
5735  *
5736  * Either the GPU or display (or both) went idle.  Check the busy status
5737  * here and adjust the CRTC and GPU clocks as necessary.
5738  */
5739 static void intel_idle_update(struct work_struct *work)
5740 {
5741         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
5742                                                     idle_work);
5743         struct drm_device *dev = dev_priv->dev;
5744         struct drm_crtc *crtc;
5745         struct intel_crtc *intel_crtc;
5746
5747         if (!i915_powersave)
5748                 return;
5749
5750         mutex_lock(&dev->struct_mutex);
5751
5752         i915_update_gfx_val(dev_priv);
5753
5754         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5755                 /* Skip inactive CRTCs */
5756                 if (!crtc->fb)
5757                         continue;
5758
5759                 intel_crtc = to_intel_crtc(crtc);
5760                 if (!intel_crtc->busy)
5761                         intel_decrease_pllclock(crtc);
5762         }
5763
5764
5765         mutex_unlock(&dev->struct_mutex);
5766 }
5767
5768 /**
5769  * intel_mark_busy - mark the GPU and possibly the display busy
5770  * @dev: drm device
5771  * @obj: object we're operating on
5772  *
5773  * Callers can use this function to indicate that the GPU is busy processing
5774  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
5775  * buffer), we'll also mark the display as busy, so we know to increase its
5776  * clock frequency.
5777  */
5778 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
5779 {
5780         drm_i915_private_t *dev_priv = dev->dev_private;
5781         struct drm_crtc *crtc = NULL;
5782         struct intel_framebuffer *intel_fb;
5783         struct intel_crtc *intel_crtc;
5784
5785         if (!drm_core_check_feature(dev, DRIVER_MODESET))
5786                 return;
5787
5788         if (!dev_priv->busy) {
5789                 intel_sanitize_pm(dev);
5790                 dev_priv->busy = true;
5791         } else
5792                 mod_timer(&dev_priv->idle_timer, jiffies +
5793                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5794
5795         if (obj == NULL)
5796                 return;
5797
5798         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5799                 if (!crtc->fb)
5800                         continue;
5801
5802                 intel_crtc = to_intel_crtc(crtc);
5803                 intel_fb = to_intel_framebuffer(crtc->fb);
5804                 if (intel_fb->obj == obj) {
5805                         if (!intel_crtc->busy) {
5806                                 /* Non-busy -> busy, upclock */
5807                                 intel_increase_pllclock(crtc);
5808                                 intel_crtc->busy = true;
5809                         } else {
5810                                 /* Busy -> busy, put off timer */
5811                                 mod_timer(&intel_crtc->idle_timer, jiffies +
5812                                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5813                         }
5814                 }
5815         }
5816 }
5817
5818 static void intel_crtc_destroy(struct drm_crtc *crtc)
5819 {
5820         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5821         struct drm_device *dev = crtc->dev;
5822         struct intel_unpin_work *work;
5823         unsigned long flags;
5824
5825         spin_lock_irqsave(&dev->event_lock, flags);
5826         work = intel_crtc->unpin_work;
5827         intel_crtc->unpin_work = NULL;
5828         spin_unlock_irqrestore(&dev->event_lock, flags);
5829
5830         if (work) {
5831                 cancel_work_sync(&work->work);
5832                 kfree(work);
5833         }
5834
5835         drm_crtc_cleanup(crtc);
5836
5837         kfree(intel_crtc);
5838 }
5839
5840 static void intel_unpin_work_fn(struct work_struct *__work)
5841 {
5842         struct intel_unpin_work *work =
5843                 container_of(__work, struct intel_unpin_work, work);
5844
5845         mutex_lock(&work->dev->struct_mutex);
5846         intel_unpin_fb_obj(work->old_fb_obj);
5847         drm_gem_object_unreference(&work->pending_flip_obj->base);
5848         drm_gem_object_unreference(&work->old_fb_obj->base);
5849
5850         intel_update_fbc(work->dev);
5851         mutex_unlock(&work->dev->struct_mutex);
5852         kfree(work);
5853 }
5854
5855 static void do_intel_finish_page_flip(struct drm_device *dev,
5856                                       struct drm_crtc *crtc)
5857 {
5858         drm_i915_private_t *dev_priv = dev->dev_private;
5859         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5860         struct intel_unpin_work *work;
5861         struct drm_i915_gem_object *obj;
5862         struct drm_pending_vblank_event *e;
5863         struct timeval tnow, tvbl;
5864         unsigned long flags;
5865
5866         /* Ignore early vblank irqs */
5867         if (intel_crtc == NULL)
5868                 return;
5869
5870         do_gettimeofday(&tnow);
5871
5872         spin_lock_irqsave(&dev->event_lock, flags);
5873         work = intel_crtc->unpin_work;
5874         if (work == NULL || !work->pending) {
5875                 spin_unlock_irqrestore(&dev->event_lock, flags);
5876                 return;
5877         }
5878
5879         intel_crtc->unpin_work = NULL;
5880
5881         if (work->event) {
5882                 e = work->event;
5883                 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5884
5885                 /* Called before vblank count and timestamps have
5886                  * been updated for the vblank interval of flip
5887                  * completion? Need to increment vblank count and
5888                  * add one videorefresh duration to returned timestamp
5889                  * to account for this. We assume this happened if we
5890                  * get called over 0.9 frame durations after the last
5891                  * timestamped vblank.
5892                  *
5893                  * This calculation can not be used with vrefresh rates
5894                  * below 5Hz (10Hz to be on the safe side) without
5895                  * promoting to 64 integers.
5896                  */
5897                 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5898                     9 * crtc->framedur_ns) {
5899                         e->event.sequence++;
5900                         tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5901                                              crtc->framedur_ns);
5902                 }
5903
5904                 e->event.tv_sec = tvbl.tv_sec;
5905                 e->event.tv_usec = tvbl.tv_usec;
5906
5907                 list_add_tail(&e->base.link,
5908                               &e->base.file_priv->event_list);
5909                 wake_up_interruptible(&e->base.file_priv->event_wait);
5910         }
5911
5912         drm_vblank_put(dev, intel_crtc->pipe);
5913
5914         spin_unlock_irqrestore(&dev->event_lock, flags);
5915
5916         obj = work->old_fb_obj;
5917
5918         atomic_clear_mask(1 << intel_crtc->plane,
5919                           &obj->pending_flip.counter);
5920         if (atomic_read(&obj->pending_flip) == 0)
5921                 wake_up(&dev_priv->pending_flip_queue);
5922
5923         schedule_work(&work->work);
5924
5925         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
5926 }
5927
5928 void intel_finish_page_flip(struct drm_device *dev, int pipe)
5929 {
5930         drm_i915_private_t *dev_priv = dev->dev_private;
5931         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
5932
5933         do_intel_finish_page_flip(dev, crtc);
5934 }
5935
5936 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
5937 {
5938         drm_i915_private_t *dev_priv = dev->dev_private;
5939         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
5940
5941         do_intel_finish_page_flip(dev, crtc);
5942 }
5943
5944 void intel_prepare_page_flip(struct drm_device *dev, int plane)
5945 {
5946         drm_i915_private_t *dev_priv = dev->dev_private;
5947         struct intel_crtc *intel_crtc =
5948                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5949         unsigned long flags;
5950
5951         spin_lock_irqsave(&dev->event_lock, flags);
5952         if (intel_crtc->unpin_work) {
5953                 if ((++intel_crtc->unpin_work->pending) > 1)
5954                         DRM_ERROR("Prepared flip multiple times\n");
5955         } else {
5956                 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5957         }
5958         spin_unlock_irqrestore(&dev->event_lock, flags);
5959 }
5960
5961 static int intel_gen2_queue_flip(struct drm_device *dev,
5962                                  struct drm_crtc *crtc,
5963                                  struct drm_framebuffer *fb,
5964                                  struct drm_i915_gem_object *obj)
5965 {
5966         struct drm_i915_private *dev_priv = dev->dev_private;
5967         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5968         unsigned long offset;
5969         u32 flip_mask;
5970         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5971         int ret;
5972
5973         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5974         if (ret)
5975                 goto err;
5976
5977         /* Offset into the new buffer for cases of shared fbs between CRTCs */
5978         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5979
5980         ret = intel_ring_begin(ring, 6);
5981         if (ret)
5982                 goto err_unpin;
5983
5984         /* Can't queue multiple flips, so wait for the previous
5985          * one to finish before executing the next.
5986          */
5987         if (intel_crtc->plane)
5988                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5989         else
5990                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5991         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5992         intel_ring_emit(ring, MI_NOOP);
5993         intel_ring_emit(ring, MI_DISPLAY_FLIP |
5994                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5995         intel_ring_emit(ring, fb->pitches[0]);
5996         intel_ring_emit(ring, obj->gtt_offset + offset);
5997         intel_ring_emit(ring, 0); /* aux display base address, unused */
5998         intel_ring_advance(ring);
5999         return 0;
6000
6001 err_unpin:
6002         intel_unpin_fb_obj(obj);
6003 err:
6004         return ret;
6005 }
6006
6007 static int intel_gen3_queue_flip(struct drm_device *dev,
6008                                  struct drm_crtc *crtc,
6009                                  struct drm_framebuffer *fb,
6010                                  struct drm_i915_gem_object *obj)
6011 {
6012         struct drm_i915_private *dev_priv = dev->dev_private;
6013         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6014         unsigned long offset;
6015         u32 flip_mask;
6016         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6017         int ret;
6018
6019         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6020         if (ret)
6021                 goto err;
6022
6023         /* Offset into the new buffer for cases of shared fbs between CRTCs */
6024         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
6025
6026         ret = intel_ring_begin(ring, 6);
6027         if (ret)
6028                 goto err_unpin;
6029
6030         if (intel_crtc->plane)
6031                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6032         else
6033                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6034         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6035         intel_ring_emit(ring, MI_NOOP);
6036         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6037                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6038         intel_ring_emit(ring, fb->pitches[0]);
6039         intel_ring_emit(ring, obj->gtt_offset + offset);
6040         intel_ring_emit(ring, MI_NOOP);
6041
6042         intel_ring_advance(ring);
6043         return 0;
6044
6045 err_unpin:
6046         intel_unpin_fb_obj(obj);
6047 err:
6048         return ret;
6049 }
6050
6051 static int intel_gen4_queue_flip(struct drm_device *dev,
6052                                  struct drm_crtc *crtc,
6053                                  struct drm_framebuffer *fb,
6054                                  struct drm_i915_gem_object *obj)
6055 {
6056         struct drm_i915_private *dev_priv = dev->dev_private;
6057         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6058         uint32_t pf, pipesrc;
6059         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6060         int ret;
6061
6062         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6063         if (ret)
6064                 goto err;
6065
6066         ret = intel_ring_begin(ring, 4);
6067         if (ret)
6068                 goto err_unpin;
6069
6070         /* i965+ uses the linear or tiled offsets from the
6071          * Display Registers (which do not change across a page-flip)
6072          * so we need only reprogram the base address.
6073          */
6074         intel_ring_emit(ring, MI_DISPLAY_FLIP |
6075                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6076         intel_ring_emit(ring, fb->pitches[0]);
6077         intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
6078
6079         /* XXX Enabling the panel-fitter across page-flip is so far
6080          * untested on non-native modes, so ignore it for now.
6081          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6082          */
6083         pf = 0;
6084         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6085         intel_ring_emit(ring, pf | pipesrc);
6086         intel_ring_advance(ring);
6087         return 0;
6088
6089 err_unpin:
6090         intel_unpin_fb_obj(obj);
6091 err:
6092         return ret;
6093 }
6094
6095 static int intel_gen6_queue_flip(struct drm_device *dev,
6096                                  struct drm_crtc *crtc,
6097                                  struct drm_framebuffer *fb,
6098                                  struct drm_i915_gem_object *obj)
6099 {
6100         struct drm_i915_private *dev_priv = dev->dev_private;
6101         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6102         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6103         uint32_t pf, pipesrc;
6104         int ret;
6105
6106         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6107         if (ret)
6108                 goto err;
6109
6110         ret = intel_ring_begin(ring, 4);
6111         if (ret)
6112                 goto err_unpin;
6113
6114         intel_ring_emit(ring, MI_DISPLAY_FLIP |
6115                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6116         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
6117         intel_ring_emit(ring, obj->gtt_offset);
6118
6119         /* Contrary to the suggestions in the documentation,
6120          * "Enable Panel Fitter" does not seem to be required when page
6121          * flipping with a non-native mode, and worse causes a normal
6122          * modeset to fail.
6123          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6124          */
6125         pf = 0;
6126         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6127         intel_ring_emit(ring, pf | pipesrc);
6128         intel_ring_advance(ring);
6129         return 0;
6130
6131 err_unpin:
6132         intel_unpin_fb_obj(obj);
6133 err:
6134         return ret;
6135 }
6136
6137 /*
6138  * On gen7 we currently use the blit ring because (in early silicon at least)
6139  * the render ring doesn't give us interrpts for page flip completion, which
6140  * means clients will hang after the first flip is queued.  Fortunately the
6141  * blit ring generates interrupts properly, so use it instead.
6142  */
6143 static int intel_gen7_queue_flip(struct drm_device *dev,
6144                                  struct drm_crtc *crtc,
6145                                  struct drm_framebuffer *fb,
6146                                  struct drm_i915_gem_object *obj)
6147 {
6148         struct drm_i915_private *dev_priv = dev->dev_private;
6149         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6150         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6151         int ret;
6152
6153         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6154         if (ret)
6155                 goto err;
6156
6157         ret = intel_ring_begin(ring, 4);
6158         if (ret)
6159                 goto err_unpin;
6160
6161         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6162         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6163         intel_ring_emit(ring, (obj->gtt_offset));
6164         intel_ring_emit(ring, (MI_NOOP));
6165         intel_ring_advance(ring);
6166         return 0;
6167
6168 err_unpin:
6169         intel_unpin_fb_obj(obj);
6170 err:
6171         return ret;
6172 }
6173
6174 static int intel_default_queue_flip(struct drm_device *dev,
6175                                     struct drm_crtc *crtc,
6176                                     struct drm_framebuffer *fb,
6177                                     struct drm_i915_gem_object *obj)
6178 {
6179         return -ENODEV;
6180 }
6181
6182 static int intel_crtc_page_flip(struct drm_crtc *crtc,
6183                                 struct drm_framebuffer *fb,
6184                                 struct drm_pending_vblank_event *event)
6185 {
6186         struct drm_device *dev = crtc->dev;
6187         struct drm_i915_private *dev_priv = dev->dev_private;
6188         struct intel_framebuffer *intel_fb;
6189         struct drm_i915_gem_object *obj;
6190         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191         struct intel_unpin_work *work;
6192         unsigned long flags;
6193         int ret;
6194
6195         work = kzalloc(sizeof *work, GFP_KERNEL);
6196         if (work == NULL)
6197                 return -ENOMEM;
6198
6199         work->event = event;
6200         work->dev = crtc->dev;
6201         intel_fb = to_intel_framebuffer(crtc->fb);
6202         work->old_fb_obj = intel_fb->obj;
6203         INIT_WORK(&work->work, intel_unpin_work_fn);
6204
6205         ret = drm_vblank_get(dev, intel_crtc->pipe);
6206         if (ret)
6207                 goto free_work;
6208
6209         /* We borrow the event spin lock for protecting unpin_work */
6210         spin_lock_irqsave(&dev->event_lock, flags);
6211         if (intel_crtc->unpin_work) {
6212                 spin_unlock_irqrestore(&dev->event_lock, flags);
6213                 kfree(work);
6214                 drm_vblank_put(dev, intel_crtc->pipe);
6215
6216                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6217                 return -EBUSY;
6218         }
6219         intel_crtc->unpin_work = work;
6220         spin_unlock_irqrestore(&dev->event_lock, flags);
6221
6222         intel_fb = to_intel_framebuffer(fb);
6223         obj = intel_fb->obj;
6224
6225         mutex_lock(&dev->struct_mutex);
6226
6227         /* Reference the objects for the scheduled work. */
6228         drm_gem_object_reference(&work->old_fb_obj->base);
6229         drm_gem_object_reference(&obj->base);
6230
6231         crtc->fb = fb;
6232
6233         work->pending_flip_obj = obj;
6234
6235         work->enable_stall_check = true;
6236
6237         /* Block clients from rendering to the new back buffer until
6238          * the flip occurs and the object is no longer visible.
6239          */
6240         atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6241
6242         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6243         if (ret)
6244                 goto cleanup_pending;
6245
6246         intel_disable_fbc(dev);
6247         intel_mark_busy(dev, obj);
6248         mutex_unlock(&dev->struct_mutex);
6249
6250         trace_i915_flip_request(intel_crtc->plane, obj);
6251
6252         return 0;
6253
6254 cleanup_pending:
6255         atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6256         drm_gem_object_unreference(&work->old_fb_obj->base);
6257         drm_gem_object_unreference(&obj->base);
6258         mutex_unlock(&dev->struct_mutex);
6259
6260         spin_lock_irqsave(&dev->event_lock, flags);
6261         intel_crtc->unpin_work = NULL;
6262         spin_unlock_irqrestore(&dev->event_lock, flags);
6263
6264         drm_vblank_put(dev, intel_crtc->pipe);
6265 free_work:
6266         kfree(work);
6267
6268         return ret;
6269 }
6270
6271 static void intel_sanitize_modesetting(struct drm_device *dev,
6272                                        int pipe, int plane)
6273 {
6274         struct drm_i915_private *dev_priv = dev->dev_private;
6275         u32 reg, val;
6276         int i;
6277
6278         /* Clear any frame start delays used for debugging left by the BIOS */
6279         for_each_pipe(i) {
6280                 reg = PIPECONF(i);
6281                 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6282         }
6283
6284         if (HAS_PCH_SPLIT(dev))
6285                 return;
6286
6287         /* Who knows what state these registers were left in by the BIOS or
6288          * grub?
6289          *
6290          * If we leave the registers in a conflicting state (e.g. with the
6291          * display plane reading from the other pipe than the one we intend
6292          * to use) then when we attempt to teardown the active mode, we will
6293          * not disable the pipes and planes in the correct order -- leaving
6294          * a plane reading from a disabled pipe and possibly leading to
6295          * undefined behaviour.
6296          */
6297
6298         reg = DSPCNTR(plane);
6299         val = I915_READ(reg);
6300
6301         if ((val & DISPLAY_PLANE_ENABLE) == 0)
6302                 return;
6303         if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6304                 return;
6305
6306         /* This display plane is active and attached to the other CPU pipe. */
6307         pipe = !pipe;
6308
6309         /* Disable the plane and wait for it to stop reading from the pipe. */
6310         intel_disable_plane(dev_priv, plane, pipe);
6311         intel_disable_pipe(dev_priv, pipe);
6312 }
6313
6314 static void intel_crtc_reset(struct drm_crtc *crtc)
6315 {
6316         struct drm_device *dev = crtc->dev;
6317         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6318
6319         /* Reset flags back to the 'unknown' status so that they
6320          * will be correctly set on the initial modeset.
6321          */
6322         intel_crtc->dpms_mode = -1;
6323
6324         /* We need to fix up any BIOS configuration that conflicts with
6325          * our expectations.
6326          */
6327         intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6328 }
6329
6330 static struct drm_crtc_helper_funcs intel_helper_funcs = {
6331         .dpms = intel_crtc_dpms,
6332         .mode_fixup = intel_crtc_mode_fixup,
6333         .mode_set = intel_crtc_mode_set,
6334         .mode_set_base = intel_pipe_set_base,
6335         .mode_set_base_atomic = intel_pipe_set_base_atomic,
6336         .load_lut = intel_crtc_load_lut,
6337         .disable = intel_crtc_disable,
6338 };
6339
6340 static const struct drm_crtc_funcs intel_crtc_funcs = {
6341         .reset = intel_crtc_reset,
6342         .cursor_set = intel_crtc_cursor_set,
6343         .cursor_move = intel_crtc_cursor_move,
6344         .gamma_set = intel_crtc_gamma_set,
6345         .set_config = drm_crtc_helper_set_config,
6346         .destroy = intel_crtc_destroy,
6347         .page_flip = intel_crtc_page_flip,
6348 };
6349
6350 static void intel_pch_pll_init(struct drm_device *dev)
6351 {
6352         drm_i915_private_t *dev_priv = dev->dev_private;
6353         int i;
6354
6355         if (dev_priv->num_pch_pll == 0) {
6356                 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6357                 return;
6358         }
6359
6360         for (i = 0; i < dev_priv->num_pch_pll; i++) {
6361                 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6362                 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6363                 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6364         }
6365 }
6366
6367 static void intel_crtc_init(struct drm_device *dev, int pipe)
6368 {
6369         drm_i915_private_t *dev_priv = dev->dev_private;
6370         struct intel_crtc *intel_crtc;
6371         int i;
6372
6373         intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6374         if (intel_crtc == NULL)
6375                 return;
6376
6377         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6378
6379         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6380         for (i = 0; i < 256; i++) {
6381                 intel_crtc->lut_r[i] = i;
6382                 intel_crtc->lut_g[i] = i;
6383                 intel_crtc->lut_b[i] = i;
6384         }
6385
6386         /* Swap pipes & planes for FBC on pre-965 */
6387         intel_crtc->pipe = pipe;
6388         intel_crtc->plane = pipe;
6389         if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6390                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6391                 intel_crtc->plane = !pipe;
6392         }
6393
6394         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6395                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6396         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6397         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6398
6399         intel_crtc_reset(&intel_crtc->base);
6400         intel_crtc->active = true; /* force the pipe off on setup_init_config */
6401         intel_crtc->bpp = 24; /* default for pre-Ironlake */
6402
6403         if (HAS_PCH_SPLIT(dev)) {
6404                 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6405                 intel_helper_funcs.commit = ironlake_crtc_commit;
6406         } else {
6407                 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6408                 intel_helper_funcs.commit = i9xx_crtc_commit;
6409         }
6410
6411         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6412
6413         intel_crtc->busy = false;
6414
6415         setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6416                     (unsigned long)intel_crtc);
6417 }
6418
6419 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6420                                 struct drm_file *file)
6421 {
6422         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
6423         struct drm_mode_object *drmmode_obj;
6424         struct intel_crtc *crtc;
6425
6426         if (!drm_core_check_feature(dev, DRIVER_MODESET))
6427                 return -ENODEV;
6428
6429         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6430                         DRM_MODE_OBJECT_CRTC);
6431
6432         if (!drmmode_obj) {
6433                 DRM_ERROR("no such CRTC id\n");
6434                 return -EINVAL;
6435         }
6436
6437         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6438         pipe_from_crtc_id->pipe = crtc->pipe;
6439
6440         return 0;
6441 }
6442
6443 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6444 {
6445         struct intel_encoder *encoder;
6446         int index_mask = 0;
6447         int entry = 0;
6448
6449         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6450                 if (type_mask & encoder->clone_mask)
6451                         index_mask |= (1 << entry);
6452                 entry++;
6453         }
6454
6455         return index_mask;
6456 }
6457
6458 static bool has_edp_a(struct drm_device *dev)
6459 {
6460         struct drm_i915_private *dev_priv = dev->dev_private;
6461
6462         if (!IS_MOBILE(dev))
6463                 return false;
6464
6465         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6466                 return false;
6467
6468         if (IS_GEN5(dev) &&
6469             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6470                 return false;
6471
6472         return true;
6473 }
6474
6475 static void intel_setup_outputs(struct drm_device *dev)
6476 {
6477         struct drm_i915_private *dev_priv = dev->dev_private;
6478         struct intel_encoder *encoder;
6479         bool dpd_is_edp = false;
6480         bool has_lvds;
6481
6482         has_lvds = intel_lvds_init(dev);
6483         if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6484                 /* disable the panel fitter on everything but LVDS */
6485                 I915_WRITE(PFIT_CONTROL, 0);
6486         }
6487
6488         if (HAS_PCH_SPLIT(dev)) {
6489                 dpd_is_edp = intel_dpd_is_edp(dev);
6490
6491                 if (has_edp_a(dev))
6492                         intel_dp_init(dev, DP_A);
6493
6494                 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6495                         intel_dp_init(dev, PCH_DP_D);
6496         }
6497
6498         intel_crt_init(dev);
6499
6500         if (IS_HASWELL(dev)) {
6501                 int found;
6502
6503                 /* Haswell uses DDI functions to detect digital outputs */
6504                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6505                 /* DDI A only supports eDP */
6506                 if (found)
6507                         intel_ddi_init(dev, PORT_A);
6508
6509                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6510                  * register */
6511                 found = I915_READ(SFUSE_STRAP);
6512
6513                 if (found & SFUSE_STRAP_DDIB_DETECTED)
6514                         intel_ddi_init(dev, PORT_B);
6515                 if (found & SFUSE_STRAP_DDIC_DETECTED)
6516                         intel_ddi_init(dev, PORT_C);
6517                 if (found & SFUSE_STRAP_DDID_DETECTED)
6518                         intel_ddi_init(dev, PORT_D);
6519         } else if (HAS_PCH_SPLIT(dev)) {
6520                 int found;
6521
6522                 if (I915_READ(HDMIB) & PORT_DETECTED) {
6523                         /* PCH SDVOB multiplex with HDMIB */
6524                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
6525                         if (!found)
6526                                 intel_hdmi_init(dev, HDMIB);
6527                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6528                                 intel_dp_init(dev, PCH_DP_B);
6529                 }
6530
6531                 if (I915_READ(HDMIC) & PORT_DETECTED)
6532                         intel_hdmi_init(dev, HDMIC);
6533
6534                 if (I915_READ(HDMID) & PORT_DETECTED)
6535                         intel_hdmi_init(dev, HDMID);
6536
6537                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6538                         intel_dp_init(dev, PCH_DP_C);
6539
6540                 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6541                         intel_dp_init(dev, PCH_DP_D);
6542
6543         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6544                 bool found = false;
6545
6546                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6547                         DRM_DEBUG_KMS("probing SDVOB\n");
6548                         found = intel_sdvo_init(dev, SDVOB, true);
6549                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6550                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6551                                 intel_hdmi_init(dev, SDVOB);
6552                         }
6553
6554                         if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6555                                 DRM_DEBUG_KMS("probing DP_B\n");
6556                                 intel_dp_init(dev, DP_B);
6557                         }
6558                 }
6559
6560                 /* Before G4X SDVOC doesn't have its own detect register */
6561
6562                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6563                         DRM_DEBUG_KMS("probing SDVOC\n");
6564                         found = intel_sdvo_init(dev, SDVOC, false);
6565                 }
6566
6567                 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6568
6569                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6570                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6571                                 intel_hdmi_init(dev, SDVOC);
6572                         }
6573                         if (SUPPORTS_INTEGRATED_DP(dev)) {
6574                                 DRM_DEBUG_KMS("probing DP_C\n");
6575                                 intel_dp_init(dev, DP_C);
6576                         }
6577                 }
6578
6579                 if (SUPPORTS_INTEGRATED_DP(dev) &&
6580                     (I915_READ(DP_D) & DP_DETECTED)) {
6581                         DRM_DEBUG_KMS("probing DP_D\n");
6582                         intel_dp_init(dev, DP_D);
6583                 }
6584         } else if (IS_GEN2(dev))
6585                 intel_dvo_init(dev);
6586
6587         if (SUPPORTS_TV(dev))
6588                 intel_tv_init(dev);
6589
6590         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6591                 encoder->base.possible_crtcs = encoder->crtc_mask;
6592                 encoder->base.possible_clones =
6593                         intel_encoder_clones(dev, encoder->clone_mask);
6594         }
6595
6596         /* disable all the possible outputs/crtcs before entering KMS mode */
6597         drm_helper_disable_unused_functions(dev);
6598
6599         if (HAS_PCH_SPLIT(dev))
6600                 ironlake_init_pch_refclk(dev);
6601 }
6602
6603 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6604 {
6605         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6606
6607         drm_framebuffer_cleanup(fb);
6608         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6609
6610         kfree(intel_fb);
6611 }
6612
6613 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6614                                                 struct drm_file *file,
6615                                                 unsigned int *handle)
6616 {
6617         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6618         struct drm_i915_gem_object *obj = intel_fb->obj;
6619
6620         return drm_gem_handle_create(file, &obj->base, handle);
6621 }
6622
6623 static const struct drm_framebuffer_funcs intel_fb_funcs = {
6624         .destroy = intel_user_framebuffer_destroy,
6625         .create_handle = intel_user_framebuffer_create_handle,
6626 };
6627
6628 int intel_framebuffer_init(struct drm_device *dev,
6629                            struct intel_framebuffer *intel_fb,
6630                            struct drm_mode_fb_cmd2 *mode_cmd,
6631                            struct drm_i915_gem_object *obj)
6632 {
6633         int ret;
6634
6635         if (obj->tiling_mode == I915_TILING_Y)
6636                 return -EINVAL;
6637
6638         if (mode_cmd->pitches[0] & 63)
6639                 return -EINVAL;
6640
6641         switch (mode_cmd->pixel_format) {
6642         case DRM_FORMAT_RGB332:
6643         case DRM_FORMAT_RGB565:
6644         case DRM_FORMAT_XRGB8888:
6645         case DRM_FORMAT_XBGR8888:
6646         case DRM_FORMAT_ARGB8888:
6647         case DRM_FORMAT_XRGB2101010:
6648         case DRM_FORMAT_ARGB2101010:
6649                 /* RGB formats are common across chipsets */
6650                 break;
6651         case DRM_FORMAT_YUYV:
6652         case DRM_FORMAT_UYVY:
6653         case DRM_FORMAT_YVYU:
6654         case DRM_FORMAT_VYUY:
6655                 break;
6656         default:
6657                 DRM_DEBUG_KMS("unsupported pixel format %u\n",
6658                                 mode_cmd->pixel_format);
6659                 return -EINVAL;
6660         }
6661
6662         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6663         if (ret) {
6664                 DRM_ERROR("framebuffer init failed %d\n", ret);
6665                 return ret;
6666         }
6667
6668         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6669         intel_fb->obj = obj;
6670         return 0;
6671 }
6672
6673 static struct drm_framebuffer *
6674 intel_user_framebuffer_create(struct drm_device *dev,
6675                               struct drm_file *filp,
6676                               struct drm_mode_fb_cmd2 *mode_cmd)
6677 {
6678         struct drm_i915_gem_object *obj;
6679
6680         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6681                                                 mode_cmd->handles[0]));
6682         if (&obj->base == NULL)
6683                 return ERR_PTR(-ENOENT);
6684
6685         return intel_framebuffer_create(dev, mode_cmd, obj);
6686 }
6687
6688 static const struct drm_mode_config_funcs intel_mode_funcs = {
6689         .fb_create = intel_user_framebuffer_create,
6690         .output_poll_changed = intel_fb_output_poll_changed,
6691 };
6692
6693 /* Set up chip specific display functions */
6694 static void intel_init_display(struct drm_device *dev)
6695 {
6696         struct drm_i915_private *dev_priv = dev->dev_private;
6697
6698         /* We always want a DPMS function */
6699         if (HAS_PCH_SPLIT(dev)) {
6700                 dev_priv->display.dpms = ironlake_crtc_dpms;
6701                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6702                 dev_priv->display.off = ironlake_crtc_off;
6703                 dev_priv->display.update_plane = ironlake_update_plane;
6704         } else {
6705                 dev_priv->display.dpms = i9xx_crtc_dpms;
6706                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6707                 dev_priv->display.off = i9xx_crtc_off;
6708                 dev_priv->display.update_plane = i9xx_update_plane;
6709         }
6710
6711         /* Returns the core display clock speed */
6712         if (IS_VALLEYVIEW(dev))
6713                 dev_priv->display.get_display_clock_speed =
6714                         valleyview_get_display_clock_speed;
6715         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6716                 dev_priv->display.get_display_clock_speed =
6717                         i945_get_display_clock_speed;
6718         else if (IS_I915G(dev))
6719                 dev_priv->display.get_display_clock_speed =
6720                         i915_get_display_clock_speed;
6721         else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6722                 dev_priv->display.get_display_clock_speed =
6723                         i9xx_misc_get_display_clock_speed;
6724         else if (IS_I915GM(dev))
6725                 dev_priv->display.get_display_clock_speed =
6726                         i915gm_get_display_clock_speed;
6727         else if (IS_I865G(dev))
6728                 dev_priv->display.get_display_clock_speed =
6729                         i865_get_display_clock_speed;
6730         else if (IS_I85X(dev))
6731                 dev_priv->display.get_display_clock_speed =
6732                         i855_get_display_clock_speed;
6733         else /* 852, 830 */
6734                 dev_priv->display.get_display_clock_speed =
6735                         i830_get_display_clock_speed;
6736
6737         if (HAS_PCH_SPLIT(dev)) {
6738                 if (IS_GEN5(dev)) {
6739                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6740                         dev_priv->display.write_eld = ironlake_write_eld;
6741                 } else if (IS_GEN6(dev)) {
6742                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6743                         dev_priv->display.write_eld = ironlake_write_eld;
6744                 } else if (IS_IVYBRIDGE(dev)) {
6745                         /* FIXME: detect B0+ stepping and use auto training */
6746                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6747                         dev_priv->display.write_eld = ironlake_write_eld;
6748                 } else if (IS_HASWELL(dev)) {
6749                         dev_priv->display.fdi_link_train = hsw_fdi_link_train;
6750                         dev_priv->display.write_eld = ironlake_write_eld;
6751                 } else
6752                         dev_priv->display.update_wm = NULL;
6753         } else if (IS_VALLEYVIEW(dev)) {
6754                 dev_priv->display.force_wake_get = vlv_force_wake_get;
6755                 dev_priv->display.force_wake_put = vlv_force_wake_put;
6756         } else if (IS_G4X(dev)) {
6757                 dev_priv->display.write_eld = g4x_write_eld;
6758         }
6759
6760         /* Default just returns -ENODEV to indicate unsupported */
6761         dev_priv->display.queue_flip = intel_default_queue_flip;
6762
6763         switch (INTEL_INFO(dev)->gen) {
6764         case 2:
6765                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
6766                 break;
6767
6768         case 3:
6769                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
6770                 break;
6771
6772         case 4:
6773         case 5:
6774                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
6775                 break;
6776
6777         case 6:
6778                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
6779                 break;
6780         case 7:
6781                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
6782                 break;
6783         }
6784 }
6785
6786 /*
6787  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6788  * resume, or other times.  This quirk makes sure that's the case for
6789  * affected systems.
6790  */
6791 static void quirk_pipea_force(struct drm_device *dev)
6792 {
6793         struct drm_i915_private *dev_priv = dev->dev_private;
6794
6795         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6796         DRM_INFO("applying pipe a force quirk\n");
6797 }
6798
6799 /*
6800  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6801  */
6802 static void quirk_ssc_force_disable(struct drm_device *dev)
6803 {
6804         struct drm_i915_private *dev_priv = dev->dev_private;
6805         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6806         DRM_INFO("applying lvds SSC disable quirk\n");
6807 }
6808
6809 /*
6810  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6811  * brightness value
6812  */
6813 static void quirk_invert_brightness(struct drm_device *dev)
6814 {
6815         struct drm_i915_private *dev_priv = dev->dev_private;
6816         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
6817         DRM_INFO("applying inverted panel brightness quirk\n");
6818 }
6819
6820 struct intel_quirk {
6821         int device;
6822         int subsystem_vendor;
6823         int subsystem_device;
6824         void (*hook)(struct drm_device *dev);
6825 };
6826
6827 static struct intel_quirk intel_quirks[] = {
6828         /* HP Mini needs pipe A force quirk (LP: #322104) */
6829         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
6830
6831         /* Thinkpad R31 needs pipe A force quirk */
6832         { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6833         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6834         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6835
6836         /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6837         { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
6838         /* ThinkPad X40 needs pipe A force quirk */
6839
6840         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6841         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6842
6843         /* 855 & before need to leave pipe A & dpll A up */
6844         { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6845         { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6846
6847         /* Lenovo U160 cannot use SSC on LVDS */
6848         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6849
6850         /* Sony Vaio Y cannot use SSC on LVDS */
6851         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6852
6853         /* Acer Aspire 5734Z must invert backlight brightness */
6854         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
6855 };
6856
6857 static void intel_init_quirks(struct drm_device *dev)
6858 {
6859         struct pci_dev *d = dev->pdev;
6860         int i;
6861
6862         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
6863                 struct intel_quirk *q = &intel_quirks[i];
6864
6865                 if (d->device == q->device &&
6866                     (d->subsystem_vendor == q->subsystem_vendor ||
6867                      q->subsystem_vendor == PCI_ANY_ID) &&
6868                     (d->subsystem_device == q->subsystem_device ||
6869                      q->subsystem_device == PCI_ANY_ID))
6870                         q->hook(dev);
6871         }
6872 }
6873
6874 /* Disable the VGA plane that we never use */
6875 static void i915_disable_vga(struct drm_device *dev)
6876 {
6877         struct drm_i915_private *dev_priv = dev->dev_private;
6878         u8 sr1;
6879         u32 vga_reg;
6880
6881         if (HAS_PCH_SPLIT(dev))
6882                 vga_reg = CPU_VGACNTRL;
6883         else
6884                 vga_reg = VGACNTRL;
6885
6886         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6887         outb(SR01, VGA_SR_INDEX);
6888         sr1 = inb(VGA_SR_DATA);
6889         outb(sr1 | 1<<5, VGA_SR_DATA);
6890         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6891         udelay(300);
6892
6893         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6894         POSTING_READ(vga_reg);
6895 }
6896
6897 static void ivb_pch_pwm_override(struct drm_device *dev)
6898 {
6899         struct drm_i915_private *dev_priv = dev->dev_private;
6900
6901         /*
6902          * IVB has CPU eDP backlight regs too, set things up to let the
6903          * PCH regs control the backlight
6904          */
6905         I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6906         I915_WRITE(BLC_PWM_CPU_CTL, 0);
6907         I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
6908 }
6909
6910 void intel_modeset_init_hw(struct drm_device *dev)
6911 {
6912         struct drm_i915_private *dev_priv = dev->dev_private;
6913
6914         intel_init_clock_gating(dev);
6915
6916         if (IS_IRONLAKE_M(dev)) {
6917                 ironlake_enable_drps(dev);
6918                 ironlake_enable_rc6(dev);
6919                 intel_init_emon(dev);
6920         }
6921
6922         if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
6923                 gen6_enable_rps(dev_priv);
6924                 gen6_update_ring_freq(dev_priv);
6925         }
6926
6927         if (IS_IVYBRIDGE(dev))
6928                 ivb_pch_pwm_override(dev);
6929 }
6930
6931 void intel_modeset_init(struct drm_device *dev)
6932 {
6933         struct drm_i915_private *dev_priv = dev->dev_private;
6934         int i, ret;
6935
6936         drm_mode_config_init(dev);
6937
6938         dev->mode_config.min_width = 0;
6939         dev->mode_config.min_height = 0;
6940
6941         dev->mode_config.preferred_depth = 24;
6942         dev->mode_config.prefer_shadow = 1;
6943
6944         dev->mode_config.funcs = &intel_mode_funcs;
6945
6946         intel_init_quirks(dev);
6947
6948         intel_init_pm(dev);
6949
6950         intel_prepare_ddi(dev);
6951
6952         intel_init_display(dev);
6953
6954         if (IS_GEN2(dev)) {
6955                 dev->mode_config.max_width = 2048;
6956                 dev->mode_config.max_height = 2048;
6957         } else if (IS_GEN3(dev)) {
6958                 dev->mode_config.max_width = 4096;
6959                 dev->mode_config.max_height = 4096;
6960         } else {
6961                 dev->mode_config.max_width = 8192;
6962                 dev->mode_config.max_height = 8192;
6963         }
6964         dev->mode_config.fb_base = dev->agp->base;
6965
6966         DRM_DEBUG_KMS("%d display pipe%s available.\n",
6967                       dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
6968
6969         for (i = 0; i < dev_priv->num_pipe; i++) {
6970                 intel_crtc_init(dev, i);
6971                 ret = intel_plane_init(dev, i);
6972                 if (ret)
6973                         DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
6974         }
6975
6976         intel_pch_pll_init(dev);
6977
6978         /* Just disable it once at startup */
6979         i915_disable_vga(dev);
6980         intel_setup_outputs(dev);
6981
6982         INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6983         setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6984                     (unsigned long)dev);
6985 }
6986
6987 void intel_modeset_gem_init(struct drm_device *dev)
6988 {
6989         intel_modeset_init_hw(dev);
6990
6991         intel_setup_overlay(dev);
6992 }
6993
6994 void intel_modeset_cleanup(struct drm_device *dev)
6995 {
6996         struct drm_i915_private *dev_priv = dev->dev_private;
6997         struct drm_crtc *crtc;
6998         struct intel_crtc *intel_crtc;
6999
7000         drm_kms_helper_poll_fini(dev);
7001         mutex_lock(&dev->struct_mutex);
7002
7003         intel_unregister_dsm_handler();
7004
7005
7006         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7007                 /* Skip inactive CRTCs */
7008                 if (!crtc->fb)
7009                         continue;
7010
7011                 intel_crtc = to_intel_crtc(crtc);
7012                 intel_increase_pllclock(crtc);
7013         }
7014
7015         intel_disable_fbc(dev);
7016
7017         if (IS_IRONLAKE_M(dev))
7018                 ironlake_disable_drps(dev);
7019         if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
7020                 gen6_disable_rps(dev);
7021
7022         if (IS_IRONLAKE_M(dev))
7023                 ironlake_disable_rc6(dev);
7024
7025         if (IS_VALLEYVIEW(dev))
7026                 vlv_init_dpio(dev);
7027
7028         mutex_unlock(&dev->struct_mutex);
7029
7030         /* Disable the irq before mode object teardown, for the irq might
7031          * enqueue unpin/hotplug work. */
7032         drm_irq_uninstall(dev);
7033         cancel_work_sync(&dev_priv->hotplug_work);
7034         cancel_work_sync(&dev_priv->rps_work);
7035
7036         /* flush any delayed tasks or pending work */
7037         flush_scheduled_work();
7038
7039         /* Shut off idle work before the crtcs get freed. */
7040         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7041                 intel_crtc = to_intel_crtc(crtc);
7042                 del_timer_sync(&intel_crtc->idle_timer);
7043         }
7044         del_timer_sync(&dev_priv->idle_timer);
7045         cancel_work_sync(&dev_priv->idle_work);
7046
7047         drm_mode_config_cleanup(dev);
7048 }
7049
7050 /*
7051  * Return which encoder is currently attached for connector.
7052  */
7053 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7054 {
7055         return &intel_attached_encoder(connector)->base;
7056 }
7057
7058 void intel_connector_attach_encoder(struct intel_connector *connector,
7059                                     struct intel_encoder *encoder)
7060 {
7061         connector->encoder = encoder;
7062         drm_mode_connector_attach_encoder(&connector->base,
7063                                           &encoder->base);
7064 }
7065
7066 /*
7067  * set vga decode state - true == enable VGA decode
7068  */
7069 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
7070 {
7071         struct drm_i915_private *dev_priv = dev->dev_private;
7072         u16 gmch_ctrl;
7073
7074         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
7075         if (state)
7076                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
7077         else
7078                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
7079         pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
7080         return 0;
7081 }
7082
7083 #ifdef CONFIG_DEBUG_FS
7084 #include <linux/seq_file.h>
7085
7086 struct intel_display_error_state {
7087         struct intel_cursor_error_state {
7088                 u32 control;
7089                 u32 position;
7090                 u32 base;
7091                 u32 size;
7092         } cursor[2];
7093
7094         struct intel_pipe_error_state {
7095                 u32 conf;
7096                 u32 source;
7097
7098                 u32 htotal;
7099                 u32 hblank;
7100                 u32 hsync;
7101                 u32 vtotal;
7102                 u32 vblank;
7103                 u32 vsync;
7104         } pipe[2];
7105
7106         struct intel_plane_error_state {
7107                 u32 control;
7108                 u32 stride;
7109                 u32 size;
7110                 u32 pos;
7111                 u32 addr;
7112                 u32 surface;
7113                 u32 tile_offset;
7114         } plane[2];
7115 };
7116
7117 struct intel_display_error_state *
7118 intel_display_capture_error_state(struct drm_device *dev)
7119 {
7120         drm_i915_private_t *dev_priv = dev->dev_private;
7121         struct intel_display_error_state *error;
7122         int i;
7123
7124         error = kmalloc(sizeof(*error), GFP_ATOMIC);
7125         if (error == NULL)
7126                 return NULL;
7127
7128         for (i = 0; i < 2; i++) {
7129                 error->cursor[i].control = I915_READ(CURCNTR(i));
7130                 error->cursor[i].position = I915_READ(CURPOS(i));
7131                 error->cursor[i].base = I915_READ(CURBASE(i));
7132
7133                 error->plane[i].control = I915_READ(DSPCNTR(i));
7134                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
7135                 error->plane[i].size = I915_READ(DSPSIZE(i));
7136                 error->plane[i].pos = I915_READ(DSPPOS(i));
7137                 error->plane[i].addr = I915_READ(DSPADDR(i));
7138                 if (INTEL_INFO(dev)->gen >= 4) {
7139                         error->plane[i].surface = I915_READ(DSPSURF(i));
7140                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7141                 }
7142
7143                 error->pipe[i].conf = I915_READ(PIPECONF(i));
7144                 error->pipe[i].source = I915_READ(PIPESRC(i));
7145                 error->pipe[i].htotal = I915_READ(HTOTAL(i));
7146                 error->pipe[i].hblank = I915_READ(HBLANK(i));
7147                 error->pipe[i].hsync = I915_READ(HSYNC(i));
7148                 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7149                 error->pipe[i].vblank = I915_READ(VBLANK(i));
7150                 error->pipe[i].vsync = I915_READ(VSYNC(i));
7151         }
7152
7153         return error;
7154 }
7155
7156 void
7157 intel_display_print_error_state(struct seq_file *m,
7158                                 struct drm_device *dev,
7159                                 struct intel_display_error_state *error)
7160 {
7161         int i;
7162
7163         for (i = 0; i < 2; i++) {
7164                 seq_printf(m, "Pipe [%d]:\n", i);
7165                 seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
7166                 seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
7167                 seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
7168                 seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
7169                 seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
7170                 seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
7171                 seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
7172                 seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
7173
7174                 seq_printf(m, "Plane [%d]:\n", i);
7175                 seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
7176                 seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
7177                 seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
7178                 seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
7179                 seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
7180                 if (INTEL_INFO(dev)->gen >= 4) {
7181                         seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
7182                         seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
7183                 }
7184
7185                 seq_printf(m, "Cursor [%d]:\n", i);
7186                 seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
7187                 seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
7188                 seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
7189         }
7190 }
7191 #endif