]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
Merge tag 'sunxi-fixes-for-4.12' of https://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / dce_v6_0.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "atom.h"
28 #include "amdgpu_atombios.h"
29 #include "atombios_crtc.h"
30 #include "atombios_encoders.h"
31 #include "amdgpu_pll.h"
32 #include "amdgpu_connectors.h"
33
34 #include "bif/bif_3_0_d.h"
35 #include "bif/bif_3_0_sh_mask.h"
36 #include "oss/oss_1_0_d.h"
37 #include "oss/oss_1_0_sh_mask.h"
38 #include "gca/gfx_6_0_d.h"
39 #include "gca/gfx_6_0_sh_mask.h"
40 #include "gmc/gmc_6_0_d.h"
41 #include "gmc/gmc_6_0_sh_mask.h"
42 #include "dce/dce_6_0_d.h"
43 #include "dce/dce_6_0_sh_mask.h"
44 #include "gca/gfx_7_2_enum.h"
45 #include "si_enums.h"
46
47 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
48 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
49
50 static const u32 crtc_offsets[6] =
51 {
52         SI_CRTC0_REGISTER_OFFSET,
53         SI_CRTC1_REGISTER_OFFSET,
54         SI_CRTC2_REGISTER_OFFSET,
55         SI_CRTC3_REGISTER_OFFSET,
56         SI_CRTC4_REGISTER_OFFSET,
57         SI_CRTC5_REGISTER_OFFSET
58 };
59
60 static const u32 hpd_offsets[] =
61 {
62         mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
63         mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
64         mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
65         mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
66         mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
67         mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
68 };
69
70 static const uint32_t dig_offsets[] = {
71         SI_CRTC0_REGISTER_OFFSET,
72         SI_CRTC1_REGISTER_OFFSET,
73         SI_CRTC2_REGISTER_OFFSET,
74         SI_CRTC3_REGISTER_OFFSET,
75         SI_CRTC4_REGISTER_OFFSET,
76         SI_CRTC5_REGISTER_OFFSET,
77         (0x13830 - 0x7030) >> 2,
78 };
79
80 static const struct {
81         uint32_t        reg;
82         uint32_t        vblank;
83         uint32_t        vline;
84         uint32_t        hpd;
85
86 } interrupt_status_offsets[6] = { {
87         .reg = mmDISP_INTERRUPT_STATUS,
88         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
91 }, {
92         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
96 }, {
97         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
101 }, {
102         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
106 }, {
107         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
111 }, {
112         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
116 } };
117
118 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
119                                      u32 block_offset, u32 reg)
120 {
121         DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
122         return 0;
123 }
124
125 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
126                                       u32 block_offset, u32 reg, u32 v)
127 {
128         DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
129 }
130
131 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
132 {
133         if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
134                 return true;
135         else
136                 return false;
137 }
138
139 static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
140 {
141         u32 pos1, pos2;
142
143         pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
144         pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
145
146         if (pos1 != pos2)
147                 return true;
148         else
149                 return false;
150 }
151
152 /**
153  * dce_v6_0_wait_for_vblank - vblank wait asic callback.
154  *
155  * @crtc: crtc to wait for vblank on
156  *
157  * Wait for vblank on the requested crtc (evergreen+).
158  */
159 static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
160 {
161         unsigned i = 100;
162
163         if (crtc >= adev->mode_info.num_crtc)
164                 return;
165
166         if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
167                 return;
168
169         /* depending on when we hit vblank, we may be close to active; if so,
170          * wait for another frame.
171          */
172         while (dce_v6_0_is_in_vblank(adev, crtc)) {
173                 if (i++ == 100) {
174                         i = 0;
175                         if (!dce_v6_0_is_counter_moving(adev, crtc))
176                                 break;
177                 }
178         }
179
180         while (!dce_v6_0_is_in_vblank(adev, crtc)) {
181                 if (i++ == 100) {
182                         i = 0;
183                         if (!dce_v6_0_is_counter_moving(adev, crtc))
184                                 break;
185                 }
186         }
187 }
188
189 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191         if (crtc >= adev->mode_info.num_crtc)
192                 return 0;
193         else
194                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
195 }
196
197 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
198 {
199         unsigned i;
200
201         /* Enable pflip interrupts */
202         for (i = 0; i < adev->mode_info.num_crtc; i++)
203                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
204 }
205
206 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
207 {
208         unsigned i;
209
210         /* Disable pflip interrupts */
211         for (i = 0; i < adev->mode_info.num_crtc; i++)
212                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
213 }
214
215 /**
216  * dce_v6_0_page_flip - pageflip callback.
217  *
218  * @adev: amdgpu_device pointer
219  * @crtc_id: crtc to cleanup pageflip on
220  * @crtc_base: new address of the crtc (GPU MC address)
221  *
222  * Does the actual pageflip (evergreen+).
223  * During vblank we take the crtc lock and wait for the update_pending
224  * bit to go high, when it does, we release the lock, and allow the
225  * double buffered update to take place.
226  * Returns the current update pending status.
227  */
228 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
229                                int crtc_id, u64 crtc_base, bool async)
230 {
231         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
232
233         /* flip at hsync for async, default is vsync */
234         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
235                GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
236         /* update the scanout addresses */
237         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
238                upper_32_bits(crtc_base));
239         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
240                (u32)crtc_base);
241
242         /* post the write */
243         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
244 }
245
246 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247                                         u32 *vbl, u32 *position)
248 {
249         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250                 return -EINVAL;
251         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
252         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
253
254         return 0;
255
256 }
257
258 /**
259  * dce_v6_0_hpd_sense - hpd sense callback.
260  *
261  * @adev: amdgpu_device pointer
262  * @hpd: hpd (hotplug detect) pin
263  *
264  * Checks if a digital monitor is connected (evergreen+).
265  * Returns true if connected, false if not connected.
266  */
267 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
268                                enum amdgpu_hpd_id hpd)
269 {
270         bool connected = false;
271
272         if (hpd >= adev->mode_info.num_hpd)
273                 return connected;
274
275         if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
276                 connected = true;
277
278         return connected;
279 }
280
281 /**
282  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
283  *
284  * @adev: amdgpu_device pointer
285  * @hpd: hpd (hotplug detect) pin
286  *
287  * Set the polarity of the hpd pin (evergreen+).
288  */
289 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
290                                       enum amdgpu_hpd_id hpd)
291 {
292         u32 tmp;
293         bool connected = dce_v6_0_hpd_sense(adev, hpd);
294
295         if (hpd >= adev->mode_info.num_hpd)
296                 return;
297
298         tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
299         if (connected)
300                 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
301         else
302                 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
303         WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
304 }
305
306 /**
307  * dce_v6_0_hpd_init - hpd setup callback.
308  *
309  * @adev: amdgpu_device pointer
310  *
311  * Setup the hpd pins used by the card (evergreen+).
312  * Enable the pin, set the polarity, and enable the hpd interrupts.
313  */
314 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
315 {
316         struct drm_device *dev = adev->ddev;
317         struct drm_connector *connector;
318         u32 tmp;
319
320         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
321                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
322
323                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
324                         continue;
325
326                 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
327                 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
328                 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
329
330                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
331                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
332                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
333                          * aux dp channel on imac and help (but not completely fix)
334                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
335                          * also avoid interrupt storms during dpms.
336                          */
337                         tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
338                         tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
339                         WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
340                         continue;
341                 }
342
343                 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
344                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
345         }
346
347 }
348
349 /**
350  * dce_v6_0_hpd_fini - hpd tear down callback.
351  *
352  * @adev: amdgpu_device pointer
353  *
354  * Tear down the hpd pins used by the card (evergreen+).
355  * Disable the hpd interrupts.
356  */
357 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
358 {
359         struct drm_device *dev = adev->ddev;
360         struct drm_connector *connector;
361         u32 tmp;
362
363         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
364                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
365
366                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
367                         continue;
368
369                 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
370                 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
371                 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
372
373                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
374         }
375 }
376
377 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
378 {
379         return mmDC_GPIO_HPD_A;
380 }
381
382 static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
383 {
384         if (crtc >= adev->mode_info.num_crtc)
385                 return 0;
386         else
387                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
388 }
389
390 static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
391                                     struct amdgpu_mode_mc_save *save)
392 {
393         u32 crtc_enabled, tmp, frame_count;
394         int i, j;
395
396         save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
397         save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
398
399         /* disable VGA render */
400         WREG32(mmVGA_RENDER_CONTROL, 0);
401
402         /* blank the display controllers */
403         for (i = 0; i < adev->mode_info.num_crtc; i++) {
404                 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
405                 if (crtc_enabled) {
406                         save->crtc_enabled[i] = true;
407                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
408
409                         if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
410                                 dce_v6_0_vblank_wait(adev, i);
411                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
412                                 tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
413                                 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
414                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
415                         }
416                         /* wait for the next frame */
417                         frame_count = evergreen_get_vblank_counter(adev, i);
418                         for (j = 0; j < adev->usec_timeout; j++) {
419                                 if (evergreen_get_vblank_counter(adev, i) != frame_count)
420                                         break;
421                                 udelay(1);
422                         }
423
424                         /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
425                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
426                         tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
427                         tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
428                         WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
429                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
430                         save->crtc_enabled[i] = false;
431                         /* ***** */
432                 } else {
433                         save->crtc_enabled[i] = false;
434                 }
435         }
436 }
437
438 static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
439                                       struct amdgpu_mode_mc_save *save)
440 {
441         u32 tmp;
442         int i, j;
443
444         /* update crtc base addresses */
445         for (i = 0; i < adev->mode_info.num_crtc; i++) {
446                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
447                        upper_32_bits(adev->mc.vram_start));
448                 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
449                        upper_32_bits(adev->mc.vram_start));
450                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
451                        (u32)adev->mc.vram_start);
452                 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
453                        (u32)adev->mc.vram_start);
454         }
455
456         WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
457         WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
458
459         /* unlock regs and wait for update */
460         for (i = 0; i < adev->mode_info.num_crtc; i++) {
461                 if (save->crtc_enabled[i]) {
462                         tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
463                         if ((tmp & 0x7) != 0) {
464                                 tmp &= ~0x7;
465                                 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
466                         }
467                         tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
468                         if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
469                                 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
470                                 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
471                         }
472                         tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
473                         if (tmp & 1) {
474                                 tmp &= ~1;
475                                 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
476                         }
477                         for (j = 0; j < adev->usec_timeout; j++) {
478                                 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
479                                 if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
480                                         break;
481                                 udelay(1);
482                         }
483                 }
484         }
485
486         /* Unlock vga access */
487         WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
488         mdelay(1);
489         WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
490
491 }
492
493 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
494                                           bool render)
495 {
496         if (!render)
497                 WREG32(mmVGA_RENDER_CONTROL,
498                         RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
499
500 }
501
502 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
503 {
504         int num_crtc = 0;
505
506         switch (adev->asic_type) {
507         case CHIP_TAHITI:
508         case CHIP_PITCAIRN:
509         case CHIP_VERDE:
510                 num_crtc = 6;
511                 break;
512         case CHIP_OLAND:
513                 num_crtc = 2;
514                 break;
515         default:
516                 num_crtc = 0;
517         }
518         return num_crtc;
519 }
520
521 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
522 {
523         /*Disable VGA render and enabled crtc, if has DCE engine*/
524         if (amdgpu_atombios_has_dce_engine_info(adev)) {
525                 u32 tmp;
526                 int crtc_enabled, i;
527
528                 dce_v6_0_set_vga_render_state(adev, false);
529
530                 /*Disable crtc*/
531                 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
532                         crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
533                                 CRTC_CONTROL__CRTC_MASTER_EN_MASK;
534                         if (crtc_enabled) {
535                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
536                                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
537                                 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
538                                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
539                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
540                         }
541                 }
542         }
543 }
544
545 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
546 {
547
548         struct drm_device *dev = encoder->dev;
549         struct amdgpu_device *adev = dev->dev_private;
550         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
551         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
552         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
553         int bpc = 0;
554         u32 tmp = 0;
555         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
556
557         if (connector) {
558                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
559                 bpc = amdgpu_connector_get_monitor_bpc(connector);
560                 dither = amdgpu_connector->dither;
561         }
562
563         /* LVDS FMT is set up by atom */
564         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
565                 return;
566
567         if (bpc == 0)
568                 return;
569
570
571         switch (bpc) {
572         case 6:
573                 if (dither == AMDGPU_FMT_DITHER_ENABLE)
574                         /* XXX sort out optimal dither settings */
575                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
576                                 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
577                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
578                 else
579                         tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
580                 break;
581         case 8:
582                 if (dither == AMDGPU_FMT_DITHER_ENABLE)
583                         /* XXX sort out optimal dither settings */
584                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
585                                 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
586                                 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
587                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
588                                 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
589                 else
590                         tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
591                                 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
592                 break;
593         case 10:
594         default:
595                 /* not needed */
596                 break;
597         }
598
599         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
600 }
601
602 /**
603  * cik_get_number_of_dram_channels - get the number of dram channels
604  *
605  * @adev: amdgpu_device pointer
606  *
607  * Look up the number of video ram channels (CIK).
608  * Used for display watermark bandwidth calculations
609  * Returns the number of dram channels
610  */
611 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
612 {
613         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
614
615         switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
616         case 0:
617         default:
618                 return 1;
619         case 1:
620                 return 2;
621         case 2:
622                 return 4;
623         case 3:
624                 return 8;
625         case 4:
626                 return 3;
627         case 5:
628                 return 6;
629         case 6:
630                 return 10;
631         case 7:
632                 return 12;
633         case 8:
634                 return 16;
635         }
636 }
637
638 struct dce6_wm_params {
639         u32 dram_channels; /* number of dram channels */
640         u32 yclk;          /* bandwidth per dram data pin in kHz */
641         u32 sclk;          /* engine clock in kHz */
642         u32 disp_clk;      /* display clock in kHz */
643         u32 src_width;     /* viewport width */
644         u32 active_time;   /* active display time in ns */
645         u32 blank_time;    /* blank time in ns */
646         bool interlaced;    /* mode is interlaced */
647         fixed20_12 vsc;    /* vertical scale ratio */
648         u32 num_heads;     /* number of active crtcs */
649         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
650         u32 lb_size;       /* line buffer allocated to pipe */
651         u32 vtaps;         /* vertical scaler taps */
652 };
653
654 /**
655  * dce_v6_0_dram_bandwidth - get the dram bandwidth
656  *
657  * @wm: watermark calculation data
658  *
659  * Calculate the raw dram bandwidth (CIK).
660  * Used for display watermark bandwidth calculations
661  * Returns the dram bandwidth in MBytes/s
662  */
663 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
664 {
665         /* Calculate raw DRAM Bandwidth */
666         fixed20_12 dram_efficiency; /* 0.7 */
667         fixed20_12 yclk, dram_channels, bandwidth;
668         fixed20_12 a;
669
670         a.full = dfixed_const(1000);
671         yclk.full = dfixed_const(wm->yclk);
672         yclk.full = dfixed_div(yclk, a);
673         dram_channels.full = dfixed_const(wm->dram_channels * 4);
674         a.full = dfixed_const(10);
675         dram_efficiency.full = dfixed_const(7);
676         dram_efficiency.full = dfixed_div(dram_efficiency, a);
677         bandwidth.full = dfixed_mul(dram_channels, yclk);
678         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
679
680         return dfixed_trunc(bandwidth);
681 }
682
683 /**
684  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
685  *
686  * @wm: watermark calculation data
687  *
688  * Calculate the dram bandwidth used for display (CIK).
689  * Used for display watermark bandwidth calculations
690  * Returns the dram bandwidth for display in MBytes/s
691  */
692 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
693 {
694         /* Calculate DRAM Bandwidth and the part allocated to display. */
695         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
696         fixed20_12 yclk, dram_channels, bandwidth;
697         fixed20_12 a;
698
699         a.full = dfixed_const(1000);
700         yclk.full = dfixed_const(wm->yclk);
701         yclk.full = dfixed_div(yclk, a);
702         dram_channels.full = dfixed_const(wm->dram_channels * 4);
703         a.full = dfixed_const(10);
704         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
705         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
706         bandwidth.full = dfixed_mul(dram_channels, yclk);
707         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
708
709         return dfixed_trunc(bandwidth);
710 }
711
712 /**
713  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
714  *
715  * @wm: watermark calculation data
716  *
717  * Calculate the data return bandwidth used for display (CIK).
718  * Used for display watermark bandwidth calculations
719  * Returns the data return bandwidth in MBytes/s
720  */
721 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
722 {
723         /* Calculate the display Data return Bandwidth */
724         fixed20_12 return_efficiency; /* 0.8 */
725         fixed20_12 sclk, bandwidth;
726         fixed20_12 a;
727
728         a.full = dfixed_const(1000);
729         sclk.full = dfixed_const(wm->sclk);
730         sclk.full = dfixed_div(sclk, a);
731         a.full = dfixed_const(10);
732         return_efficiency.full = dfixed_const(8);
733         return_efficiency.full = dfixed_div(return_efficiency, a);
734         a.full = dfixed_const(32);
735         bandwidth.full = dfixed_mul(a, sclk);
736         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
737
738         return dfixed_trunc(bandwidth);
739 }
740
741 /**
742  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
743  *
744  * @wm: watermark calculation data
745  *
746  * Calculate the dmif bandwidth used for display (CIK).
747  * Used for display watermark bandwidth calculations
748  * Returns the dmif bandwidth in MBytes/s
749  */
750 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
751 {
752         /* Calculate the DMIF Request Bandwidth */
753         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
754         fixed20_12 disp_clk, bandwidth;
755         fixed20_12 a, b;
756
757         a.full = dfixed_const(1000);
758         disp_clk.full = dfixed_const(wm->disp_clk);
759         disp_clk.full = dfixed_div(disp_clk, a);
760         a.full = dfixed_const(32);
761         b.full = dfixed_mul(a, disp_clk);
762
763         a.full = dfixed_const(10);
764         disp_clk_request_efficiency.full = dfixed_const(8);
765         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
766
767         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
768
769         return dfixed_trunc(bandwidth);
770 }
771
772 /**
773  * dce_v6_0_available_bandwidth - get the min available bandwidth
774  *
775  * @wm: watermark calculation data
776  *
777  * Calculate the min available bandwidth used for display (CIK).
778  * Used for display watermark bandwidth calculations
779  * Returns the min available bandwidth in MBytes/s
780  */
781 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
782 {
783         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
784         u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
785         u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
786         u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
787
788         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
789 }
790
791 /**
792  * dce_v6_0_average_bandwidth - get the average available bandwidth
793  *
794  * @wm: watermark calculation data
795  *
796  * Calculate the average available bandwidth used for display (CIK).
797  * Used for display watermark bandwidth calculations
798  * Returns the average available bandwidth in MBytes/s
799  */
800 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
801 {
802         /* Calculate the display mode Average Bandwidth
803          * DisplayMode should contain the source and destination dimensions,
804          * timing, etc.
805          */
806         fixed20_12 bpp;
807         fixed20_12 line_time;
808         fixed20_12 src_width;
809         fixed20_12 bandwidth;
810         fixed20_12 a;
811
812         a.full = dfixed_const(1000);
813         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
814         line_time.full = dfixed_div(line_time, a);
815         bpp.full = dfixed_const(wm->bytes_per_pixel);
816         src_width.full = dfixed_const(wm->src_width);
817         bandwidth.full = dfixed_mul(src_width, bpp);
818         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
819         bandwidth.full = dfixed_div(bandwidth, line_time);
820
821         return dfixed_trunc(bandwidth);
822 }
823
824 /**
825  * dce_v6_0_latency_watermark - get the latency watermark
826  *
827  * @wm: watermark calculation data
828  *
829  * Calculate the latency watermark (CIK).
830  * Used for display watermark bandwidth calculations
831  * Returns the latency watermark in ns
832  */
833 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
834 {
835         /* First calculate the latency in ns */
836         u32 mc_latency = 2000; /* 2000 ns. */
837         u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
838         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
839         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
840         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
841         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
842                 (wm->num_heads * cursor_line_pair_return_time);
843         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
844         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
845         u32 tmp, dmif_size = 12288;
846         fixed20_12 a, b, c;
847
848         if (wm->num_heads == 0)
849                 return 0;
850
851         a.full = dfixed_const(2);
852         b.full = dfixed_const(1);
853         if ((wm->vsc.full > a.full) ||
854             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
855             (wm->vtaps >= 5) ||
856             ((wm->vsc.full >= a.full) && wm->interlaced))
857                 max_src_lines_per_dst_line = 4;
858         else
859                 max_src_lines_per_dst_line = 2;
860
861         a.full = dfixed_const(available_bandwidth);
862         b.full = dfixed_const(wm->num_heads);
863         a.full = dfixed_div(a, b);
864         tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
865         tmp = min(dfixed_trunc(a), tmp);
866
867         lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
868
869         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
870         b.full = dfixed_const(1000);
871         c.full = dfixed_const(lb_fill_bw);
872         b.full = dfixed_div(c, b);
873         a.full = dfixed_div(a, b);
874         line_fill_time = dfixed_trunc(a);
875
876         if (line_fill_time < wm->active_time)
877                 return latency;
878         else
879                 return latency + (line_fill_time - wm->active_time);
880
881 }
882
883 /**
884  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
885  * average and available dram bandwidth
886  *
887  * @wm: watermark calculation data
888  *
889  * Check if the display average bandwidth fits in the display
890  * dram bandwidth (CIK).
891  * Used for display watermark bandwidth calculations
892  * Returns true if the display fits, false if not.
893  */
894 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
895 {
896         if (dce_v6_0_average_bandwidth(wm) <=
897             (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
898                 return true;
899         else
900                 return false;
901 }
902
903 /**
904  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
905  * average and available bandwidth
906  *
907  * @wm: watermark calculation data
908  *
909  * Check if the display average bandwidth fits in the display
910  * available bandwidth (CIK).
911  * Used for display watermark bandwidth calculations
912  * Returns true if the display fits, false if not.
913  */
914 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
915 {
916         if (dce_v6_0_average_bandwidth(wm) <=
917             (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
918                 return true;
919         else
920                 return false;
921 }
922
923 /**
924  * dce_v6_0_check_latency_hiding - check latency hiding
925  *
926  * @wm: watermark calculation data
927  *
928  * Check latency hiding (CIK).
929  * Used for display watermark bandwidth calculations
930  * Returns true if the display fits, false if not.
931  */
932 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
933 {
934         u32 lb_partitions = wm->lb_size / wm->src_width;
935         u32 line_time = wm->active_time + wm->blank_time;
936         u32 latency_tolerant_lines;
937         u32 latency_hiding;
938         fixed20_12 a;
939
940         a.full = dfixed_const(1);
941         if (wm->vsc.full > a.full)
942                 latency_tolerant_lines = 1;
943         else {
944                 if (lb_partitions <= (wm->vtaps + 1))
945                         latency_tolerant_lines = 1;
946                 else
947                         latency_tolerant_lines = 2;
948         }
949
950         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
951
952         if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
953                 return true;
954         else
955                 return false;
956 }
957
958 /**
959  * dce_v6_0_program_watermarks - program display watermarks
960  *
961  * @adev: amdgpu_device pointer
962  * @amdgpu_crtc: the selected display controller
963  * @lb_size: line buffer size
964  * @num_heads: number of display controllers in use
965  *
966  * Calculate and program the display watermarks for the
967  * selected display controller (CIK).
968  */
969 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
970                                         struct amdgpu_crtc *amdgpu_crtc,
971                                         u32 lb_size, u32 num_heads)
972 {
973         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
974         struct dce6_wm_params wm_low, wm_high;
975         u32 dram_channels;
976         u32 active_time;
977         u32 line_time = 0;
978         u32 latency_watermark_a = 0, latency_watermark_b = 0;
979         u32 priority_a_mark = 0, priority_b_mark = 0;
980         u32 priority_a_cnt = PRIORITY_OFF;
981         u32 priority_b_cnt = PRIORITY_OFF;
982         u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
983         fixed20_12 a, b, c;
984
985         if (amdgpu_crtc->base.enabled && num_heads && mode) {
986                 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
987                 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
988                 priority_a_cnt = 0;
989                 priority_b_cnt = 0;
990
991                 dram_channels = si_get_number_of_dram_channels(adev);
992
993                 /* watermark for high clocks */
994                 if (adev->pm.dpm_enabled) {
995                         wm_high.yclk =
996                                 amdgpu_dpm_get_mclk(adev, false) * 10;
997                         wm_high.sclk =
998                                 amdgpu_dpm_get_sclk(adev, false) * 10;
999                 } else {
1000                         wm_high.yclk = adev->pm.current_mclk * 10;
1001                         wm_high.sclk = adev->pm.current_sclk * 10;
1002                 }
1003
1004                 wm_high.disp_clk = mode->clock;
1005                 wm_high.src_width = mode->crtc_hdisplay;
1006                 wm_high.active_time = active_time;
1007                 wm_high.blank_time = line_time - wm_high.active_time;
1008                 wm_high.interlaced = false;
1009                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1010                         wm_high.interlaced = true;
1011                 wm_high.vsc = amdgpu_crtc->vsc;
1012                 wm_high.vtaps = 1;
1013                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1014                         wm_high.vtaps = 2;
1015                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1016                 wm_high.lb_size = lb_size;
1017                 wm_high.dram_channels = dram_channels;
1018                 wm_high.num_heads = num_heads;
1019
1020                 if (adev->pm.dpm_enabled) {
1021                 /* watermark for low clocks */
1022                         wm_low.yclk =
1023                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1024                         wm_low.sclk =
1025                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1026                 } else {
1027                         wm_low.yclk = adev->pm.current_mclk * 10;
1028                         wm_low.sclk = adev->pm.current_sclk * 10;
1029                 }
1030
1031                 wm_low.disp_clk = mode->clock;
1032                 wm_low.src_width = mode->crtc_hdisplay;
1033                 wm_low.active_time = active_time;
1034                 wm_low.blank_time = line_time - wm_low.active_time;
1035                 wm_low.interlaced = false;
1036                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1037                         wm_low.interlaced = true;
1038                 wm_low.vsc = amdgpu_crtc->vsc;
1039                 wm_low.vtaps = 1;
1040                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1041                         wm_low.vtaps = 2;
1042                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1043                 wm_low.lb_size = lb_size;
1044                 wm_low.dram_channels = dram_channels;
1045                 wm_low.num_heads = num_heads;
1046
1047                 /* set for high clocks */
1048                 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1049                 /* set for low clocks */
1050                 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1051
1052                 /* possibly force display priority to high */
1053                 /* should really do this at mode validation time... */
1054                 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1055                     !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1056                     !dce_v6_0_check_latency_hiding(&wm_high) ||
1057                     (adev->mode_info.disp_priority == 2)) {
1058                         DRM_DEBUG_KMS("force priority to high\n");
1059                         priority_a_cnt |= PRIORITY_ALWAYS_ON;
1060                         priority_b_cnt |= PRIORITY_ALWAYS_ON;
1061                 }
1062                 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1063                     !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1064                     !dce_v6_0_check_latency_hiding(&wm_low) ||
1065                     (adev->mode_info.disp_priority == 2)) {
1066                         DRM_DEBUG_KMS("force priority to high\n");
1067                         priority_a_cnt |= PRIORITY_ALWAYS_ON;
1068                         priority_b_cnt |= PRIORITY_ALWAYS_ON;
1069                 }
1070
1071                 a.full = dfixed_const(1000);
1072                 b.full = dfixed_const(mode->clock);
1073                 b.full = dfixed_div(b, a);
1074                 c.full = dfixed_const(latency_watermark_a);
1075                 c.full = dfixed_mul(c, b);
1076                 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1077                 c.full = dfixed_div(c, a);
1078                 a.full = dfixed_const(16);
1079                 c.full = dfixed_div(c, a);
1080                 priority_a_mark = dfixed_trunc(c);
1081                 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1082
1083                 a.full = dfixed_const(1000);
1084                 b.full = dfixed_const(mode->clock);
1085                 b.full = dfixed_div(b, a);
1086                 c.full = dfixed_const(latency_watermark_b);
1087                 c.full = dfixed_mul(c, b);
1088                 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1089                 c.full = dfixed_div(c, a);
1090                 a.full = dfixed_const(16);
1091                 c.full = dfixed_div(c, a);
1092                 priority_b_mark = dfixed_trunc(c);
1093                 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1094
1095                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1096         }
1097
1098         /* select wm A */
1099         arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1100         tmp = arb_control3;
1101         tmp &= ~LATENCY_WATERMARK_MASK(3);
1102         tmp |= LATENCY_WATERMARK_MASK(1);
1103         WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1104         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1105                ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1106                 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1107         /* select wm B */
1108         tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1109         tmp &= ~LATENCY_WATERMARK_MASK(3);
1110         tmp |= LATENCY_WATERMARK_MASK(2);
1111         WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1112         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1113                ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1114                 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1115         /* restore original selection */
1116         WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1117
1118         /* write the priority marks */
1119         WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1120         WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1121
1122         /* save values for DPM */
1123         amdgpu_crtc->line_time = line_time;
1124         amdgpu_crtc->wm_high = latency_watermark_a;
1125
1126         /* Save number of lines the linebuffer leads before the scanout */
1127         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1128 }
1129
1130 /* watermark setup */
1131 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1132                                    struct amdgpu_crtc *amdgpu_crtc,
1133                                    struct drm_display_mode *mode,
1134                                    struct drm_display_mode *other_mode)
1135 {
1136         u32 tmp, buffer_alloc, i;
1137         u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1138         /*
1139          * Line Buffer Setup
1140          * There are 3 line buffers, each one shared by 2 display controllers.
1141          * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1142          * the display controllers.  The paritioning is done via one of four
1143          * preset allocations specified in bits 21:20:
1144          *  0 - half lb
1145          *  2 - whole lb, other crtc must be disabled
1146          */
1147         /* this can get tricky if we have two large displays on a paired group
1148          * of crtcs.  Ideally for multiple large displays we'd assign them to
1149          * non-linked crtcs for maximum line buffer allocation.
1150          */
1151         if (amdgpu_crtc->base.enabled && mode) {
1152                 if (other_mode) {
1153                         tmp = 0; /* 1/2 */
1154                         buffer_alloc = 1;
1155                 } else {
1156                         tmp = 2; /* whole */
1157                         buffer_alloc = 2;
1158                 }
1159         } else {
1160                 tmp = 0;
1161                 buffer_alloc = 0;
1162         }
1163
1164         WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1165                DC_LB_MEMORY_CONFIG(tmp));
1166
1167         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1168                (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1169         for (i = 0; i < adev->usec_timeout; i++) {
1170                 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1171                     PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1172                         break;
1173                 udelay(1);
1174         }
1175
1176         if (amdgpu_crtc->base.enabled && mode) {
1177                 switch (tmp) {
1178                 case 0:
1179                 default:
1180                         return 4096 * 2;
1181                 case 2:
1182                         return 8192 * 2;
1183                 }
1184         }
1185
1186         /* controller not enabled, so no lb used */
1187         return 0;
1188 }
1189
1190
1191 /**
1192  *
1193  * dce_v6_0_bandwidth_update - program display watermarks
1194  *
1195  * @adev: amdgpu_device pointer
1196  *
1197  * Calculate and program the display watermarks and line
1198  * buffer allocation (CIK).
1199  */
1200 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1201 {
1202         struct drm_display_mode *mode0 = NULL;
1203         struct drm_display_mode *mode1 = NULL;
1204         u32 num_heads = 0, lb_size;
1205         int i;
1206
1207         if (!adev->mode_info.mode_config_initialized)
1208                 return;
1209
1210         amdgpu_update_display_priority(adev);
1211
1212         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1213                 if (adev->mode_info.crtcs[i]->base.enabled)
1214                         num_heads++;
1215         }
1216         for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1217                 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1218                 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1219                 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1220                 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1221                 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1222                 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1223         }
1224 }
1225 /*
1226 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1227 {
1228         int i;
1229         u32 offset, tmp;
1230
1231         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1232                 offset = adev->mode_info.audio.pin[i].offset;
1233                 tmp = RREG32_AUDIO_ENDPT(offset,
1234                                       AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1235                 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1236                         adev->mode_info.audio.pin[i].connected = false;
1237                 else
1238                         adev->mode_info.audio.pin[i].connected = true;
1239         }
1240
1241 }
1242
1243 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1244 {
1245         int i;
1246
1247         dce_v6_0_audio_get_connected_pins(adev);
1248
1249         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1250                 if (adev->mode_info.audio.pin[i].connected)
1251                         return &adev->mode_info.audio.pin[i];
1252         }
1253         DRM_ERROR("No connected audio pins found!\n");
1254         return NULL;
1255 }
1256
1257 static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1258 {
1259         struct amdgpu_device *adev = encoder->dev->dev_private;
1260         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1261         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1262         u32 offset;
1263
1264         if (!dig || !dig->afmt || !dig->afmt->pin)
1265                 return;
1266
1267         offset = dig->afmt->offset;
1268
1269         WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1270                AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1271
1272 }
1273
1274 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1275                                                 struct drm_display_mode *mode)
1276 {
1277         DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1278 }
1279
1280 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1281 {
1282         DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1283 }
1284
1285 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1286 {
1287         DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1288
1289 }
1290 */
1291 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1292                                   struct amdgpu_audio_pin *pin,
1293                                   bool enable)
1294 {
1295         DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1296 }
1297
1298 static const u32 pin_offsets[7] =
1299 {
1300         (0x1780 - 0x1780),
1301         (0x1786 - 0x1780),
1302         (0x178c - 0x1780),
1303         (0x1792 - 0x1780),
1304         (0x1798 - 0x1780),
1305         (0x179d - 0x1780),
1306         (0x17a4 - 0x1780),
1307 };
1308
1309 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1310 {
1311         return 0;
1312 }
1313
1314 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1315 {
1316
1317 }
1318
1319 /*
1320 static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1321 {
1322         DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1323 }
1324 */
1325 /*
1326  * build a HDMI Video Info Frame
1327  */
1328 /*
1329 static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1330                                                void *buffer, size_t size)
1331 {
1332         DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1333 }
1334
1335 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1336 {
1337         DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1338 }
1339 */
1340 /*
1341  * update the info frames with the data from the current display mode
1342  */
1343 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1344                                   struct drm_display_mode *mode)
1345 {
1346         DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1347 }
1348
1349 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1350 {
1351         struct drm_device *dev = encoder->dev;
1352         struct amdgpu_device *adev = dev->dev_private;
1353         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1354         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1355
1356         if (!dig || !dig->afmt)
1357                 return;
1358
1359         /* Silent, r600_hdmi_enable will raise WARN for us */
1360         if (enable && dig->afmt->enabled)
1361                 return;
1362         if (!enable && !dig->afmt->enabled)
1363                 return;
1364
1365         if (!enable && dig->afmt->pin) {
1366                 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1367                 dig->afmt->pin = NULL;
1368         }
1369
1370         dig->afmt->enabled = enable;
1371
1372         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1373                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1374 }
1375
1376 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1377 {
1378         int i, j;
1379
1380         for (i = 0; i < adev->mode_info.num_dig; i++)
1381                 adev->mode_info.afmt[i] = NULL;
1382
1383         /* DCE6 has audio blocks tied to DIG encoders */
1384         for (i = 0; i < adev->mode_info.num_dig; i++) {
1385                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1386                 if (adev->mode_info.afmt[i]) {
1387                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1388                         adev->mode_info.afmt[i]->id = i;
1389                 } else {
1390                         for (j = 0; j < i; j++) {
1391                                 kfree(adev->mode_info.afmt[j]);
1392                                 adev->mode_info.afmt[j] = NULL;
1393                         }
1394                         DRM_ERROR("Out of memory allocating afmt table\n");
1395                         return -ENOMEM;
1396                 }
1397         }
1398         return 0;
1399 }
1400
1401 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1402 {
1403         int i;
1404
1405         for (i = 0; i < adev->mode_info.num_dig; i++) {
1406                 kfree(adev->mode_info.afmt[i]);
1407                 adev->mode_info.afmt[i] = NULL;
1408         }
1409 }
1410
1411 static const u32 vga_control_regs[6] =
1412 {
1413         mmD1VGA_CONTROL,
1414         mmD2VGA_CONTROL,
1415         mmD3VGA_CONTROL,
1416         mmD4VGA_CONTROL,
1417         mmD5VGA_CONTROL,
1418         mmD6VGA_CONTROL,
1419 };
1420
1421 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1422 {
1423         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1424         struct drm_device *dev = crtc->dev;
1425         struct amdgpu_device *adev = dev->dev_private;
1426         u32 vga_control;
1427
1428         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1429         WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1430 }
1431
1432 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1433 {
1434         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1435         struct drm_device *dev = crtc->dev;
1436         struct amdgpu_device *adev = dev->dev_private;
1437
1438         WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1439 }
1440
1441 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1442                                      struct drm_framebuffer *fb,
1443                                      int x, int y, int atomic)
1444 {
1445         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1446         struct drm_device *dev = crtc->dev;
1447         struct amdgpu_device *adev = dev->dev_private;
1448         struct amdgpu_framebuffer *amdgpu_fb;
1449         struct drm_framebuffer *target_fb;
1450         struct drm_gem_object *obj;
1451         struct amdgpu_bo *abo;
1452         uint64_t fb_location, tiling_flags;
1453         uint32_t fb_format, fb_pitch_pixels, pipe_config;
1454         u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1455         u32 viewport_w, viewport_h;
1456         int r;
1457         bool bypass_lut = false;
1458         struct drm_format_name_buf format_name;
1459
1460         /* no fb bound */
1461         if (!atomic && !crtc->primary->fb) {
1462                 DRM_DEBUG_KMS("No FB bound\n");
1463                 return 0;
1464         }
1465
1466         if (atomic) {
1467                 amdgpu_fb = to_amdgpu_framebuffer(fb);
1468                 target_fb = fb;
1469         } else {
1470                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1471                 target_fb = crtc->primary->fb;
1472         }
1473
1474         /* If atomic, assume fb object is pinned & idle & fenced and
1475          * just update base pointers
1476          */
1477         obj = amdgpu_fb->obj;
1478         abo = gem_to_amdgpu_bo(obj);
1479         r = amdgpu_bo_reserve(abo, false);
1480         if (unlikely(r != 0))
1481                 return r;
1482
1483         if (atomic) {
1484                 fb_location = amdgpu_bo_gpu_offset(abo);
1485         } else {
1486                 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1487                 if (unlikely(r != 0)) {
1488                         amdgpu_bo_unreserve(abo);
1489                         return -EINVAL;
1490                 }
1491         }
1492
1493         amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1494         amdgpu_bo_unreserve(abo);
1495
1496         switch (target_fb->format->format) {
1497         case DRM_FORMAT_C8:
1498                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1499                              GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1500                 break;
1501         case DRM_FORMAT_XRGB4444:
1502         case DRM_FORMAT_ARGB4444:
1503                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1504                              GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1505 #ifdef __BIG_ENDIAN
1506                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1507 #endif
1508                 break;
1509         case DRM_FORMAT_XRGB1555:
1510         case DRM_FORMAT_ARGB1555:
1511                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512                              GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1513 #ifdef __BIG_ENDIAN
1514                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1515 #endif
1516                 break;
1517         case DRM_FORMAT_BGRX5551:
1518         case DRM_FORMAT_BGRA5551:
1519                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520                              GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1521 #ifdef __BIG_ENDIAN
1522                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1523 #endif
1524                 break;
1525         case DRM_FORMAT_RGB565:
1526                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1527                              GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1528 #ifdef __BIG_ENDIAN
1529                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1530 #endif
1531                 break;
1532         case DRM_FORMAT_XRGB8888:
1533         case DRM_FORMAT_ARGB8888:
1534                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1535                              GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1536 #ifdef __BIG_ENDIAN
1537                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1538 #endif
1539                 break;
1540         case DRM_FORMAT_XRGB2101010:
1541         case DRM_FORMAT_ARGB2101010:
1542                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543                              GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1544 #ifdef __BIG_ENDIAN
1545                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1546 #endif
1547                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1548                 bypass_lut = true;
1549                 break;
1550         case DRM_FORMAT_BGRX1010102:
1551         case DRM_FORMAT_BGRA1010102:
1552                 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1553                              GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1554 #ifdef __BIG_ENDIAN
1555                 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1556 #endif
1557                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1558                 bypass_lut = true;
1559                 break;
1560         default:
1561                 DRM_ERROR("Unsupported screen format %s\n",
1562                           drm_get_format_name(target_fb->format->format, &format_name));
1563                 return -EINVAL;
1564         }
1565
1566         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1567                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1568
1569                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1570                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1571                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1572                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1573                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1574
1575                 fb_format |= GRPH_NUM_BANKS(num_banks);
1576                 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1577                 fb_format |= GRPH_TILE_SPLIT(tile_split);
1578                 fb_format |= GRPH_BANK_WIDTH(bankw);
1579                 fb_format |= GRPH_BANK_HEIGHT(bankh);
1580                 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1581         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1582                 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1583         }
1584
1585         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1586         fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1587
1588         dce_v6_0_vga_enable(crtc, false);
1589
1590         /* Make sure surface address is updated at vertical blank rather than
1591          * horizontal blank
1592          */
1593         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1594
1595         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1596                upper_32_bits(fb_location));
1597         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1598                upper_32_bits(fb_location));
1599         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1600                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1601         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1602                (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1603         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1604         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1605
1606         /*
1607          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1608          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1609          * retain the full precision throughout the pipeline.
1610          */
1611         WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1612                  (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1613                  ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1614
1615         if (bypass_lut)
1616                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1617
1618         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1619         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1620         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1621         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1622         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1623         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1624
1625         fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1626         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1627
1628         dce_v6_0_grph_enable(crtc, true);
1629
1630         WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1631                        target_fb->height);
1632         x &= ~3;
1633         y &= ~1;
1634         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1635                (x << 16) | y);
1636         viewport_w = crtc->mode.hdisplay;
1637         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1638
1639         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1640                (viewport_w << 16) | viewport_h);
1641
1642         /* set pageflip to happen anywhere in vblank interval */
1643         WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1644
1645         if (!atomic && fb && fb != crtc->primary->fb) {
1646                 amdgpu_fb = to_amdgpu_framebuffer(fb);
1647                 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1648                 r = amdgpu_bo_reserve(abo, true);
1649                 if (unlikely(r != 0))
1650                         return r;
1651                 amdgpu_bo_unpin(abo);
1652                 amdgpu_bo_unreserve(abo);
1653         }
1654
1655         /* Bytes per pixel may have changed */
1656         dce_v6_0_bandwidth_update(adev);
1657
1658         return 0;
1659
1660 }
1661
1662 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1663                                     struct drm_display_mode *mode)
1664 {
1665         struct drm_device *dev = crtc->dev;
1666         struct amdgpu_device *adev = dev->dev_private;
1667         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1668
1669         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1670                 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1671                        INTERLEAVE_EN);
1672         else
1673                 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1674 }
1675
1676 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1677 {
1678
1679         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1680         struct drm_device *dev = crtc->dev;
1681         struct amdgpu_device *adev = dev->dev_private;
1682         int i;
1683
1684         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1685
1686         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1687                ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1688                 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1689         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1690                PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1691         WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1692                PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1693         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1694                ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1695                 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1696
1697         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1698
1699         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1700         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1701         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1702
1703         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1704         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1705         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1706
1707         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1708         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1709
1710         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1711         for (i = 0; i < 256; i++) {
1712                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1713                        (amdgpu_crtc->lut_r[i] << 20) |
1714                        (amdgpu_crtc->lut_g[i] << 10) |
1715                        (amdgpu_crtc->lut_b[i] << 0));
1716         }
1717
1718         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1719                ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1720                 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1721                 ICON_DEGAMMA_MODE(0) |
1722                 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1723         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1724                ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1725                 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1726         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727                ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1728                 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1729         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1730                ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1731                 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1732         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1733         WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1734
1735
1736 }
1737
1738 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1739 {
1740         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1741         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1742
1743         switch (amdgpu_encoder->encoder_id) {
1744         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1745                 return dig->linkb ? 1 : 0;
1746         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1747                 return dig->linkb ? 3 : 2;
1748         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1749                 return dig->linkb ? 5 : 4;
1750         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1751                 return 6;
1752         default:
1753                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1754                 return 0;
1755         }
1756 }
1757
1758 /**
1759  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1760  *
1761  * @crtc: drm crtc
1762  *
1763  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
1764  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
1765  * monitors a dedicated PPLL must be used.  If a particular board has
1766  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1767  * as there is no need to program the PLL itself.  If we are not able to
1768  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1769  * avoid messing up an existing monitor.
1770  *
1771  *
1772  */
1773 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1774 {
1775         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1776         struct drm_device *dev = crtc->dev;
1777         struct amdgpu_device *adev = dev->dev_private;
1778         u32 pll_in_use;
1779         int pll;
1780
1781         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1782                 if (adev->clock.dp_extclk)
1783                         /* skip PPLL programming if using ext clock */
1784                         return ATOM_PPLL_INVALID;
1785                 else
1786                         return ATOM_PPLL0;
1787         } else {
1788                 /* use the same PPLL for all monitors with the same clock */
1789                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1790                 if (pll != ATOM_PPLL_INVALID)
1791                         return pll;
1792         }
1793
1794         /*  PPLL1, and PPLL2 */
1795         pll_in_use = amdgpu_pll_get_use_mask(crtc);
1796         if (!(pll_in_use & (1 << ATOM_PPLL2)))
1797                 return ATOM_PPLL2;
1798         if (!(pll_in_use & (1 << ATOM_PPLL1)))
1799                 return ATOM_PPLL1;
1800         DRM_ERROR("unable to allocate a PPLL\n");
1801         return ATOM_PPLL_INVALID;
1802 }
1803
1804 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1805 {
1806         struct amdgpu_device *adev = crtc->dev->dev_private;
1807         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1808         uint32_t cur_lock;
1809
1810         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1811         if (lock)
1812                 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1813         else
1814                 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1815         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1816 }
1817
1818 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1819 {
1820         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1821         struct amdgpu_device *adev = crtc->dev->dev_private;
1822
1823         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1824                    (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1825                    (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1826
1827
1828 }
1829
1830 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1831 {
1832         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1833         struct amdgpu_device *adev = crtc->dev->dev_private;
1834
1835         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1836                upper_32_bits(amdgpu_crtc->cursor_addr));
1837         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1838                lower_32_bits(amdgpu_crtc->cursor_addr));
1839
1840         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1841                    CUR_CONTROL__CURSOR_EN_MASK |
1842                    (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1843                    (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1844
1845 }
1846
1847 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1848                                        int x, int y)
1849 {
1850         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1851         struct amdgpu_device *adev = crtc->dev->dev_private;
1852         int xorigin = 0, yorigin = 0;
1853
1854         int w = amdgpu_crtc->cursor_width;
1855
1856         amdgpu_crtc->cursor_x = x;
1857         amdgpu_crtc->cursor_y = y;
1858
1859         /* avivo cursor are offset into the total surface */
1860         x += crtc->x;
1861         y += crtc->y;
1862         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1863
1864         if (x < 0) {
1865                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1866                 x = 0;
1867         }
1868         if (y < 0) {
1869                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1870                 y = 0;
1871         }
1872
1873         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1874         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1875         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1876                ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1877
1878         return 0;
1879 }
1880
1881 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1882                                      int x, int y)
1883 {
1884         int ret;
1885
1886         dce_v6_0_lock_cursor(crtc, true);
1887         ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1888         dce_v6_0_lock_cursor(crtc, false);
1889
1890         return ret;
1891 }
1892
1893 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1894                                      struct drm_file *file_priv,
1895                                      uint32_t handle,
1896                                      uint32_t width,
1897                                      uint32_t height,
1898                                      int32_t hot_x,
1899                                      int32_t hot_y)
1900 {
1901         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1902         struct drm_gem_object *obj;
1903         struct amdgpu_bo *aobj;
1904         int ret;
1905
1906         if (!handle) {
1907                 /* turn off cursor */
1908                 dce_v6_0_hide_cursor(crtc);
1909                 obj = NULL;
1910                 goto unpin;
1911         }
1912
1913         if ((width > amdgpu_crtc->max_cursor_width) ||
1914             (height > amdgpu_crtc->max_cursor_height)) {
1915                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1916                 return -EINVAL;
1917         }
1918
1919         obj = drm_gem_object_lookup(file_priv, handle);
1920         if (!obj) {
1921                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1922                 return -ENOENT;
1923         }
1924
1925         aobj = gem_to_amdgpu_bo(obj);
1926         ret = amdgpu_bo_reserve(aobj, false);
1927         if (ret != 0) {
1928                 drm_gem_object_unreference_unlocked(obj);
1929                 return ret;
1930         }
1931
1932         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1933         amdgpu_bo_unreserve(aobj);
1934         if (ret) {
1935                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1936                 drm_gem_object_unreference_unlocked(obj);
1937                 return ret;
1938         }
1939
1940         dce_v6_0_lock_cursor(crtc, true);
1941
1942         if (width != amdgpu_crtc->cursor_width ||
1943             height != amdgpu_crtc->cursor_height ||
1944             hot_x != amdgpu_crtc->cursor_hot_x ||
1945             hot_y != amdgpu_crtc->cursor_hot_y) {
1946                 int x, y;
1947
1948                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1949                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1950
1951                 dce_v6_0_cursor_move_locked(crtc, x, y);
1952
1953                 amdgpu_crtc->cursor_width = width;
1954                 amdgpu_crtc->cursor_height = height;
1955                 amdgpu_crtc->cursor_hot_x = hot_x;
1956                 amdgpu_crtc->cursor_hot_y = hot_y;
1957         }
1958
1959         dce_v6_0_show_cursor(crtc);
1960         dce_v6_0_lock_cursor(crtc, false);
1961
1962 unpin:
1963         if (amdgpu_crtc->cursor_bo) {
1964                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1965                 ret = amdgpu_bo_reserve(aobj, true);
1966                 if (likely(ret == 0)) {
1967                         amdgpu_bo_unpin(aobj);
1968                         amdgpu_bo_unreserve(aobj);
1969                 }
1970                 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1971         }
1972
1973         amdgpu_crtc->cursor_bo = obj;
1974         return 0;
1975 }
1976
1977 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1978 {
1979         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1980
1981         if (amdgpu_crtc->cursor_bo) {
1982                 dce_v6_0_lock_cursor(crtc, true);
1983
1984                 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1985                                             amdgpu_crtc->cursor_y);
1986
1987                 dce_v6_0_show_cursor(crtc);
1988                 dce_v6_0_lock_cursor(crtc, false);
1989         }
1990 }
1991
1992 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1993                                    u16 *blue, uint32_t size,
1994                                    struct drm_modeset_acquire_ctx *ctx)
1995 {
1996         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1997         int i;
1998
1999         /* userspace palettes are always correct as is */
2000         for (i = 0; i < size; i++) {
2001                 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2002                 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2003                 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2004         }
2005         dce_v6_0_crtc_load_lut(crtc);
2006
2007         return 0;
2008 }
2009
2010 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2011 {
2012         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2013
2014         drm_crtc_cleanup(crtc);
2015         kfree(amdgpu_crtc);
2016 }
2017
2018 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2019         .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2020         .cursor_move = dce_v6_0_crtc_cursor_move,
2021         .gamma_set = dce_v6_0_crtc_gamma_set,
2022         .set_config = amdgpu_crtc_set_config,
2023         .destroy = dce_v6_0_crtc_destroy,
2024         .page_flip_target = amdgpu_crtc_page_flip_target,
2025 };
2026
2027 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2028 {
2029         struct drm_device *dev = crtc->dev;
2030         struct amdgpu_device *adev = dev->dev_private;
2031         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2032         unsigned type;
2033
2034         switch (mode) {
2035         case DRM_MODE_DPMS_ON:
2036                 amdgpu_crtc->enabled = true;
2037                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2038                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2039                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2040                 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2041                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2042                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2043                 drm_crtc_vblank_on(crtc);
2044                 dce_v6_0_crtc_load_lut(crtc);
2045                 break;
2046         case DRM_MODE_DPMS_STANDBY:
2047         case DRM_MODE_DPMS_SUSPEND:
2048         case DRM_MODE_DPMS_OFF:
2049                 drm_crtc_vblank_off(crtc);
2050                 if (amdgpu_crtc->enabled)
2051                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2052                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2053                 amdgpu_crtc->enabled = false;
2054                 break;
2055         }
2056         /* adjust pm to dpms */
2057         amdgpu_pm_compute_clocks(adev);
2058 }
2059
2060 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2061 {
2062         /* disable crtc pair power gating before programming */
2063         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2064         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2065         dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2066 }
2067
2068 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2069 {
2070         dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2071         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2072 }
2073
2074 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2075 {
2076
2077         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2078         struct drm_device *dev = crtc->dev;
2079         struct amdgpu_device *adev = dev->dev_private;
2080         struct amdgpu_atom_ss ss;
2081         int i;
2082
2083         dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2084         if (crtc->primary->fb) {
2085                 int r;
2086                 struct amdgpu_framebuffer *amdgpu_fb;
2087                 struct amdgpu_bo *abo;
2088
2089                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2090                 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2091                 r = amdgpu_bo_reserve(abo, true);
2092                 if (unlikely(r))
2093                         DRM_ERROR("failed to reserve abo before unpin\n");
2094                 else {
2095                         amdgpu_bo_unpin(abo);
2096                         amdgpu_bo_unreserve(abo);
2097                 }
2098         }
2099         /* disable the GRPH */
2100         dce_v6_0_grph_enable(crtc, false);
2101
2102         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2103
2104         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2105                 if (adev->mode_info.crtcs[i] &&
2106                     adev->mode_info.crtcs[i]->enabled &&
2107                     i != amdgpu_crtc->crtc_id &&
2108                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2109                         /* one other crtc is using this pll don't turn
2110                          * off the pll
2111                          */
2112                         goto done;
2113                 }
2114         }
2115
2116         switch (amdgpu_crtc->pll_id) {
2117         case ATOM_PPLL1:
2118         case ATOM_PPLL2:
2119                 /* disable the ppll */
2120                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2121                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2122                 break;
2123         default:
2124                 break;
2125         }
2126 done:
2127         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2128         amdgpu_crtc->adjusted_clock = 0;
2129         amdgpu_crtc->encoder = NULL;
2130         amdgpu_crtc->connector = NULL;
2131 }
2132
2133 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2134                                   struct drm_display_mode *mode,
2135                                   struct drm_display_mode *adjusted_mode,
2136                                   int x, int y, struct drm_framebuffer *old_fb)
2137 {
2138         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2139
2140         if (!amdgpu_crtc->adjusted_clock)
2141                 return -EINVAL;
2142
2143         amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2144         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2145         dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2146         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2147         amdgpu_atombios_crtc_scaler_setup(crtc);
2148         dce_v6_0_cursor_reset(crtc);
2149         /* update the hw version fpr dpm */
2150         amdgpu_crtc->hw_mode = *adjusted_mode;
2151
2152         return 0;
2153 }
2154
2155 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2156                                      const struct drm_display_mode *mode,
2157                                      struct drm_display_mode *adjusted_mode)
2158 {
2159
2160         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2161         struct drm_device *dev = crtc->dev;
2162         struct drm_encoder *encoder;
2163
2164         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2165         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2166                 if (encoder->crtc == crtc) {
2167                         amdgpu_crtc->encoder = encoder;
2168                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2169                         break;
2170                 }
2171         }
2172         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2173                 amdgpu_crtc->encoder = NULL;
2174                 amdgpu_crtc->connector = NULL;
2175                 return false;
2176         }
2177         if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2178                 return false;
2179         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2180                 return false;
2181         /* pick pll */
2182         amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2183         /* if we can't get a PPLL for a non-DP encoder, fail */
2184         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2185             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2186                 return false;
2187
2188         return true;
2189 }
2190
2191 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2192                                   struct drm_framebuffer *old_fb)
2193 {
2194         return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2195 }
2196
2197 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2198                                          struct drm_framebuffer *fb,
2199                                          int x, int y, enum mode_set_atomic state)
2200 {
2201        return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2202 }
2203
2204 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2205         .dpms = dce_v6_0_crtc_dpms,
2206         .mode_fixup = dce_v6_0_crtc_mode_fixup,
2207         .mode_set = dce_v6_0_crtc_mode_set,
2208         .mode_set_base = dce_v6_0_crtc_set_base,
2209         .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2210         .prepare = dce_v6_0_crtc_prepare,
2211         .commit = dce_v6_0_crtc_commit,
2212         .load_lut = dce_v6_0_crtc_load_lut,
2213         .disable = dce_v6_0_crtc_disable,
2214 };
2215
2216 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2217 {
2218         struct amdgpu_crtc *amdgpu_crtc;
2219         int i;
2220
2221         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2222                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2223         if (amdgpu_crtc == NULL)
2224                 return -ENOMEM;
2225
2226         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2227
2228         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2229         amdgpu_crtc->crtc_id = index;
2230         adev->mode_info.crtcs[index] = amdgpu_crtc;
2231
2232         amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2233         amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2234         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2235         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2236
2237         for (i = 0; i < 256; i++) {
2238                 amdgpu_crtc->lut_r[i] = i << 2;
2239                 amdgpu_crtc->lut_g[i] = i << 2;
2240                 amdgpu_crtc->lut_b[i] = i << 2;
2241         }
2242
2243         amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2244
2245         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2246         amdgpu_crtc->adjusted_clock = 0;
2247         amdgpu_crtc->encoder = NULL;
2248         amdgpu_crtc->connector = NULL;
2249         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2250
2251         return 0;
2252 }
2253
2254 static int dce_v6_0_early_init(void *handle)
2255 {
2256         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2257
2258         adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2259         adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2260
2261         dce_v6_0_set_display_funcs(adev);
2262         dce_v6_0_set_irq_funcs(adev);
2263
2264         adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2265
2266         switch (adev->asic_type) {
2267         case CHIP_TAHITI:
2268         case CHIP_PITCAIRN:
2269         case CHIP_VERDE:
2270                 adev->mode_info.num_hpd = 6;
2271                 adev->mode_info.num_dig = 6;
2272                 break;
2273         case CHIP_OLAND:
2274                 adev->mode_info.num_hpd = 2;
2275                 adev->mode_info.num_dig = 2;
2276                 break;
2277         default:
2278                 return -EINVAL;
2279         }
2280
2281         return 0;
2282 }
2283
2284 static int dce_v6_0_sw_init(void *handle)
2285 {
2286         int r, i;
2287         bool ret;
2288         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2289
2290         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2291                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2292                 if (r)
2293                         return r;
2294         }
2295
2296         for (i = 8; i < 20; i += 2) {
2297                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2298                 if (r)
2299                         return r;
2300         }
2301
2302         /* HPD hotplug */
2303         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2304         if (r)
2305                 return r;
2306
2307         adev->mode_info.mode_config_initialized = true;
2308
2309         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2310         adev->ddev->mode_config.async_page_flip = true;
2311         adev->ddev->mode_config.max_width = 16384;
2312         adev->ddev->mode_config.max_height = 16384;
2313         adev->ddev->mode_config.preferred_depth = 24;
2314         adev->ddev->mode_config.prefer_shadow = 1;
2315         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2316
2317         r = amdgpu_modeset_create_props(adev);
2318         if (r)
2319                 return r;
2320
2321         adev->ddev->mode_config.max_width = 16384;
2322         adev->ddev->mode_config.max_height = 16384;
2323
2324         /* allocate crtcs */
2325         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2326                 r = dce_v6_0_crtc_init(adev, i);
2327                 if (r)
2328                         return r;
2329         }
2330
2331         ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2332         if (ret)
2333                 amdgpu_print_display_setup(adev->ddev);
2334         else
2335                 return -EINVAL;
2336
2337         /* setup afmt */
2338         r = dce_v6_0_afmt_init(adev);
2339         if (r)
2340                 return r;
2341
2342         r = dce_v6_0_audio_init(adev);
2343         if (r)
2344                 return r;
2345
2346         drm_kms_helper_poll_init(adev->ddev);
2347
2348         return r;
2349 }
2350
2351 static int dce_v6_0_sw_fini(void *handle)
2352 {
2353         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2354
2355         kfree(adev->mode_info.bios_hardcoded_edid);
2356
2357         drm_kms_helper_poll_fini(adev->ddev);
2358
2359         dce_v6_0_audio_fini(adev);
2360         dce_v6_0_afmt_fini(adev);
2361
2362         drm_mode_config_cleanup(adev->ddev);
2363         adev->mode_info.mode_config_initialized = false;
2364
2365         return 0;
2366 }
2367
2368 static int dce_v6_0_hw_init(void *handle)
2369 {
2370         int i;
2371         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2372
2373         /* init dig PHYs, disp eng pll */
2374         amdgpu_atombios_encoder_init_dig(adev);
2375         amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2376
2377         /* initialize hpd */
2378         dce_v6_0_hpd_init(adev);
2379
2380         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2381                 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2382         }
2383
2384         dce_v6_0_pageflip_interrupt_init(adev);
2385
2386         return 0;
2387 }
2388
2389 static int dce_v6_0_hw_fini(void *handle)
2390 {
2391         int i;
2392         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2393
2394         dce_v6_0_hpd_fini(adev);
2395
2396         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2397                 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2398         }
2399
2400         dce_v6_0_pageflip_interrupt_fini(adev);
2401
2402         return 0;
2403 }
2404
2405 static int dce_v6_0_suspend(void *handle)
2406 {
2407         return dce_v6_0_hw_fini(handle);
2408 }
2409
2410 static int dce_v6_0_resume(void *handle)
2411 {
2412         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2413         int ret;
2414
2415         ret = dce_v6_0_hw_init(handle);
2416
2417         /* turn on the BL */
2418         if (adev->mode_info.bl_encoder) {
2419                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2420                                                                   adev->mode_info.bl_encoder);
2421                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2422                                                     bl_level);
2423         }
2424
2425         return ret;
2426 }
2427
2428 static bool dce_v6_0_is_idle(void *handle)
2429 {
2430         return true;
2431 }
2432
2433 static int dce_v6_0_wait_for_idle(void *handle)
2434 {
2435         return 0;
2436 }
2437
2438 static int dce_v6_0_soft_reset(void *handle)
2439 {
2440         DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2441         return 0;
2442 }
2443
2444 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2445                                                      int crtc,
2446                                                      enum amdgpu_interrupt_state state)
2447 {
2448         u32 reg_block, interrupt_mask;
2449
2450         if (crtc >= adev->mode_info.num_crtc) {
2451                 DRM_DEBUG("invalid crtc %d\n", crtc);
2452                 return;
2453         }
2454
2455         switch (crtc) {
2456         case 0:
2457                 reg_block = SI_CRTC0_REGISTER_OFFSET;
2458                 break;
2459         case 1:
2460                 reg_block = SI_CRTC1_REGISTER_OFFSET;
2461                 break;
2462         case 2:
2463                 reg_block = SI_CRTC2_REGISTER_OFFSET;
2464                 break;
2465         case 3:
2466                 reg_block = SI_CRTC3_REGISTER_OFFSET;
2467                 break;
2468         case 4:
2469                 reg_block = SI_CRTC4_REGISTER_OFFSET;
2470                 break;
2471         case 5:
2472                 reg_block = SI_CRTC5_REGISTER_OFFSET;
2473                 break;
2474         default:
2475                 DRM_DEBUG("invalid crtc %d\n", crtc);
2476                 return;
2477         }
2478
2479         switch (state) {
2480         case AMDGPU_IRQ_STATE_DISABLE:
2481                 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2482                 interrupt_mask &= ~VBLANK_INT_MASK;
2483                 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2484                 break;
2485         case AMDGPU_IRQ_STATE_ENABLE:
2486                 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2487                 interrupt_mask |= VBLANK_INT_MASK;
2488                 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2489                 break;
2490         default:
2491                 break;
2492         }
2493 }
2494
2495 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2496                                                     int crtc,
2497                                                     enum amdgpu_interrupt_state state)
2498 {
2499
2500 }
2501
2502 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2503                                             struct amdgpu_irq_src *src,
2504                                             unsigned type,
2505                                             enum amdgpu_interrupt_state state)
2506 {
2507         u32 dc_hpd_int_cntl;
2508
2509         if (type >= adev->mode_info.num_hpd) {
2510                 DRM_DEBUG("invalid hdp %d\n", type);
2511                 return 0;
2512         }
2513
2514         switch (state) {
2515         case AMDGPU_IRQ_STATE_DISABLE:
2516                 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2517                 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2518                 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2519                 break;
2520         case AMDGPU_IRQ_STATE_ENABLE:
2521                 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2522                 dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2523                 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2524                 break;
2525         default:
2526                 break;
2527         }
2528
2529         return 0;
2530 }
2531
2532 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2533                                              struct amdgpu_irq_src *src,
2534                                              unsigned type,
2535                                              enum amdgpu_interrupt_state state)
2536 {
2537         switch (type) {
2538         case AMDGPU_CRTC_IRQ_VBLANK1:
2539                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2540                 break;
2541         case AMDGPU_CRTC_IRQ_VBLANK2:
2542                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2543                 break;
2544         case AMDGPU_CRTC_IRQ_VBLANK3:
2545                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2546                 break;
2547         case AMDGPU_CRTC_IRQ_VBLANK4:
2548                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2549                 break;
2550         case AMDGPU_CRTC_IRQ_VBLANK5:
2551                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2552                 break;
2553         case AMDGPU_CRTC_IRQ_VBLANK6:
2554                 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2555                 break;
2556         case AMDGPU_CRTC_IRQ_VLINE1:
2557                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2558                 break;
2559         case AMDGPU_CRTC_IRQ_VLINE2:
2560                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2561                 break;
2562         case AMDGPU_CRTC_IRQ_VLINE3:
2563                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2564                 break;
2565         case AMDGPU_CRTC_IRQ_VLINE4:
2566                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2567                 break;
2568         case AMDGPU_CRTC_IRQ_VLINE5:
2569                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2570                 break;
2571         case AMDGPU_CRTC_IRQ_VLINE6:
2572                 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2573                 break;
2574         default:
2575                 break;
2576         }
2577         return 0;
2578 }
2579
2580 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2581                              struct amdgpu_irq_src *source,
2582                              struct amdgpu_iv_entry *entry)
2583 {
2584         unsigned crtc = entry->src_id - 1;
2585         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2586         unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2587
2588         switch (entry->src_data[0]) {
2589         case 0: /* vblank */
2590                 if (disp_int & interrupt_status_offsets[crtc].vblank)
2591                         WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2592                 else
2593                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2594
2595                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2596                         drm_handle_vblank(adev->ddev, crtc);
2597                 }
2598                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2599                 break;
2600         case 1: /* vline */
2601                 if (disp_int & interrupt_status_offsets[crtc].vline)
2602                         WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2603                 else
2604                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2605
2606                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2607                 break;
2608         default:
2609                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2610                 break;
2611         }
2612
2613         return 0;
2614 }
2615
2616 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2617                                                  struct amdgpu_irq_src *src,
2618                                                  unsigned type,
2619                                                  enum amdgpu_interrupt_state state)
2620 {
2621         u32 reg;
2622
2623         if (type >= adev->mode_info.num_crtc) {
2624                 DRM_ERROR("invalid pageflip crtc %d\n", type);
2625                 return -EINVAL;
2626         }
2627
2628         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2629         if (state == AMDGPU_IRQ_STATE_DISABLE)
2630                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2631                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2632         else
2633                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2634                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2635
2636         return 0;
2637 }
2638
2639 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2640                                  struct amdgpu_irq_src *source,
2641                                  struct amdgpu_iv_entry *entry)
2642 {
2643                 unsigned long flags;
2644         unsigned crtc_id;
2645         struct amdgpu_crtc *amdgpu_crtc;
2646         struct amdgpu_flip_work *works;
2647
2648         crtc_id = (entry->src_id - 8) >> 1;
2649         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2650
2651         if (crtc_id >= adev->mode_info.num_crtc) {
2652                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2653                 return -EINVAL;
2654         }
2655
2656         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2657             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2658                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2659                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2660
2661         /* IRQ could occur when in initial stage */
2662         if (amdgpu_crtc == NULL)
2663                 return 0;
2664
2665         spin_lock_irqsave(&adev->ddev->event_lock, flags);
2666         works = amdgpu_crtc->pflip_works;
2667         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2668                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2669                                                 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2670                                                 amdgpu_crtc->pflip_status,
2671                                                 AMDGPU_FLIP_SUBMITTED);
2672                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2673                 return 0;
2674         }
2675
2676         /* page flip completed. clean up */
2677         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2678         amdgpu_crtc->pflip_works = NULL;
2679
2680         /* wakeup usersapce */
2681         if (works->event)
2682                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2683
2684         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2685
2686         drm_crtc_vblank_put(&amdgpu_crtc->base);
2687         schedule_work(&works->unpin_work);
2688
2689         return 0;
2690 }
2691
2692 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2693                             struct amdgpu_irq_src *source,
2694                             struct amdgpu_iv_entry *entry)
2695 {
2696         uint32_t disp_int, mask, tmp;
2697         unsigned hpd;
2698
2699         if (entry->src_data[0] >= adev->mode_info.num_hpd) {
2700                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2701                 return 0;
2702         }
2703
2704         hpd = entry->src_data[0];
2705         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2706         mask = interrupt_status_offsets[hpd].hpd;
2707
2708         if (disp_int & mask) {
2709                 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2710                 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2711                 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2712                 schedule_work(&adev->hotplug_work);
2713                 DRM_INFO("IH: HPD%d\n", hpd + 1);
2714         }
2715
2716         return 0;
2717
2718 }
2719
2720 static int dce_v6_0_set_clockgating_state(void *handle,
2721                                           enum amd_clockgating_state state)
2722 {
2723         return 0;
2724 }
2725
2726 static int dce_v6_0_set_powergating_state(void *handle,
2727                                           enum amd_powergating_state state)
2728 {
2729         return 0;
2730 }
2731
2732 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2733         .name = "dce_v6_0",
2734         .early_init = dce_v6_0_early_init,
2735         .late_init = NULL,
2736         .sw_init = dce_v6_0_sw_init,
2737         .sw_fini = dce_v6_0_sw_fini,
2738         .hw_init = dce_v6_0_hw_init,
2739         .hw_fini = dce_v6_0_hw_fini,
2740         .suspend = dce_v6_0_suspend,
2741         .resume = dce_v6_0_resume,
2742         .is_idle = dce_v6_0_is_idle,
2743         .wait_for_idle = dce_v6_0_wait_for_idle,
2744         .soft_reset = dce_v6_0_soft_reset,
2745         .set_clockgating_state = dce_v6_0_set_clockgating_state,
2746         .set_powergating_state = dce_v6_0_set_powergating_state,
2747 };
2748
2749 static void
2750 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2751                           struct drm_display_mode *mode,
2752                           struct drm_display_mode *adjusted_mode)
2753 {
2754
2755         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2756
2757         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2758
2759         /* need to call this here rather than in prepare() since we need some crtc info */
2760         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2761
2762         /* set scaler clears this on some chips */
2763         dce_v6_0_set_interleave(encoder->crtc, mode);
2764
2765         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2766                 dce_v6_0_afmt_enable(encoder, true);
2767                 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2768         }
2769 }
2770
2771 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2772 {
2773
2774         struct amdgpu_device *adev = encoder->dev->dev_private;
2775         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2776         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2777
2778         if ((amdgpu_encoder->active_device &
2779              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2780             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2781              ENCODER_OBJECT_ID_NONE)) {
2782                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2783                 if (dig) {
2784                         dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2785                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2786                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2787                 }
2788         }
2789
2790         amdgpu_atombios_scratch_regs_lock(adev, true);
2791
2792         if (connector) {
2793                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2794
2795                 /* select the clock/data port if it uses a router */
2796                 if (amdgpu_connector->router.cd_valid)
2797                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2798
2799                 /* turn eDP panel on for mode set */
2800                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2801                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
2802                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
2803         }
2804
2805         /* this is needed for the pll/ss setup to work correctly in some cases */
2806         amdgpu_atombios_encoder_set_crtc_source(encoder);
2807         /* set up the FMT blocks */
2808         dce_v6_0_program_fmt(encoder);
2809 }
2810
2811 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2812 {
2813
2814         struct drm_device *dev = encoder->dev;
2815         struct amdgpu_device *adev = dev->dev_private;
2816
2817         /* need to call this here as we need the crtc set up */
2818         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2819         amdgpu_atombios_scratch_regs_lock(adev, false);
2820 }
2821
2822 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2823 {
2824
2825         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2826         struct amdgpu_encoder_atom_dig *dig;
2827
2828         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2829
2830         if (amdgpu_atombios_encoder_is_digital(encoder)) {
2831                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2832                         dce_v6_0_afmt_enable(encoder, false);
2833                 dig = amdgpu_encoder->enc_priv;
2834                 dig->dig_encoder = -1;
2835         }
2836         amdgpu_encoder->active_device = 0;
2837 }
2838
2839 /* these are handled by the primary encoders */
2840 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2841 {
2842
2843 }
2844
2845 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2846 {
2847
2848 }
2849
2850 static void
2851 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2852                       struct drm_display_mode *mode,
2853                       struct drm_display_mode *adjusted_mode)
2854 {
2855
2856 }
2857
2858 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2859 {
2860
2861 }
2862
2863 static void
2864 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2865 {
2866
2867 }
2868
2869 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2870                                     const struct drm_display_mode *mode,
2871                                     struct drm_display_mode *adjusted_mode)
2872 {
2873         return true;
2874 }
2875
2876 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2877         .dpms = dce_v6_0_ext_dpms,
2878         .mode_fixup = dce_v6_0_ext_mode_fixup,
2879         .prepare = dce_v6_0_ext_prepare,
2880         .mode_set = dce_v6_0_ext_mode_set,
2881         .commit = dce_v6_0_ext_commit,
2882         .disable = dce_v6_0_ext_disable,
2883         /* no detect for TMDS/LVDS yet */
2884 };
2885
2886 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2887         .dpms = amdgpu_atombios_encoder_dpms,
2888         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2889         .prepare = dce_v6_0_encoder_prepare,
2890         .mode_set = dce_v6_0_encoder_mode_set,
2891         .commit = dce_v6_0_encoder_commit,
2892         .disable = dce_v6_0_encoder_disable,
2893         .detect = amdgpu_atombios_encoder_dig_detect,
2894 };
2895
2896 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2897         .dpms = amdgpu_atombios_encoder_dpms,
2898         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2899         .prepare = dce_v6_0_encoder_prepare,
2900         .mode_set = dce_v6_0_encoder_mode_set,
2901         .commit = dce_v6_0_encoder_commit,
2902         .detect = amdgpu_atombios_encoder_dac_detect,
2903 };
2904
2905 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2906 {
2907         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2908         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2909                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2910         kfree(amdgpu_encoder->enc_priv);
2911         drm_encoder_cleanup(encoder);
2912         kfree(amdgpu_encoder);
2913 }
2914
2915 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2916         .destroy = dce_v6_0_encoder_destroy,
2917 };
2918
2919 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2920                                  uint32_t encoder_enum,
2921                                  uint32_t supported_device,
2922                                  u16 caps)
2923 {
2924         struct drm_device *dev = adev->ddev;
2925         struct drm_encoder *encoder;
2926         struct amdgpu_encoder *amdgpu_encoder;
2927
2928         /* see if we already added it */
2929         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2930                 amdgpu_encoder = to_amdgpu_encoder(encoder);
2931                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
2932                         amdgpu_encoder->devices |= supported_device;
2933                         return;
2934                 }
2935
2936         }
2937
2938         /* add a new one */
2939         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2940         if (!amdgpu_encoder)
2941                 return;
2942
2943         encoder = &amdgpu_encoder->base;
2944         switch (adev->mode_info.num_crtc) {
2945         case 1:
2946                 encoder->possible_crtcs = 0x1;
2947                 break;
2948         case 2:
2949         default:
2950                 encoder->possible_crtcs = 0x3;
2951                 break;
2952         case 4:
2953                 encoder->possible_crtcs = 0xf;
2954                 break;
2955         case 6:
2956                 encoder->possible_crtcs = 0x3f;
2957                 break;
2958         }
2959
2960         amdgpu_encoder->enc_priv = NULL;
2961         amdgpu_encoder->encoder_enum = encoder_enum;
2962         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2963         amdgpu_encoder->devices = supported_device;
2964         amdgpu_encoder->rmx_type = RMX_OFF;
2965         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2966         amdgpu_encoder->is_ext_encoder = false;
2967         amdgpu_encoder->caps = caps;
2968
2969         switch (amdgpu_encoder->encoder_id) {
2970         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2971         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2972                 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2973                                  DRM_MODE_ENCODER_DAC, NULL);
2974                 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2975                 break;
2976         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2977         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2978         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2979         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2980         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2981                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2982                         amdgpu_encoder->rmx_type = RMX_FULL;
2983                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2984                                          DRM_MODE_ENCODER_LVDS, NULL);
2985                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2986                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2987                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2988                                          DRM_MODE_ENCODER_DAC, NULL);
2989                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2990                 } else {
2991                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2992                                          DRM_MODE_ENCODER_TMDS, NULL);
2993                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2994                 }
2995                 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
2996                 break;
2997         case ENCODER_OBJECT_ID_SI170B:
2998         case ENCODER_OBJECT_ID_CH7303:
2999         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3000         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3001         case ENCODER_OBJECT_ID_TITFP513:
3002         case ENCODER_OBJECT_ID_VT1623:
3003         case ENCODER_OBJECT_ID_HDMI_SI1930:
3004         case ENCODER_OBJECT_ID_TRAVIS:
3005         case ENCODER_OBJECT_ID_NUTMEG:
3006                 /* these are handled by the primary encoders */
3007                 amdgpu_encoder->is_ext_encoder = true;
3008                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3009                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3010                                          DRM_MODE_ENCODER_LVDS, NULL);
3011                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3012                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3013                                          DRM_MODE_ENCODER_DAC, NULL);
3014                 else
3015                         drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3016                                          DRM_MODE_ENCODER_TMDS, NULL);
3017                 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3018                 break;
3019         }
3020 }
3021
3022 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3023         .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3024         .bandwidth_update = &dce_v6_0_bandwidth_update,
3025         .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3026         .vblank_wait = &dce_v6_0_vblank_wait,
3027         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3028         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3029         .hpd_sense = &dce_v6_0_hpd_sense,
3030         .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3031         .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3032         .page_flip = &dce_v6_0_page_flip,
3033         .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3034         .add_encoder = &dce_v6_0_encoder_add,
3035         .add_connector = &amdgpu_connector_add,
3036         .stop_mc_access = &dce_v6_0_stop_mc_access,
3037         .resume_mc_access = &dce_v6_0_resume_mc_access,
3038 };
3039
3040 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3041 {
3042         if (adev->mode_info.funcs == NULL)
3043                 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3044 }
3045
3046 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3047         .set = dce_v6_0_set_crtc_interrupt_state,
3048         .process = dce_v6_0_crtc_irq,
3049 };
3050
3051 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3052         .set = dce_v6_0_set_pageflip_interrupt_state,
3053         .process = dce_v6_0_pageflip_irq,
3054 };
3055
3056 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3057         .set = dce_v6_0_set_hpd_interrupt_state,
3058         .process = dce_v6_0_hpd_irq,
3059 };
3060
3061 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3062 {
3063         adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3064         adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3065
3066         adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3067         adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3068
3069         adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3070         adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3071 }
3072
3073 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3074 {
3075         .type = AMD_IP_BLOCK_TYPE_DCE,
3076         .major = 6,
3077         .minor = 0,
3078         .rev = 0,
3079         .funcs = &dce_v6_0_ip_funcs,
3080 };
3081
3082 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3083 {
3084         .type = AMD_IP_BLOCK_TYPE_DCE,
3085         .major = 6,
3086         .minor = 4,
3087         .rev = 0,
3088         .funcs = &dce_v6_0_ip_funcs,
3089 };