2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
28 #include "amdgpu_atombios.h"
29 #include "atombios_crtc.h"
30 #include "atombios_encoders.h"
31 #include "amdgpu_pll.h"
32 #include "amdgpu_connectors.h"
34 #include "bif/bif_3_0_d.h"
35 #include "bif/bif_3_0_sh_mask.h"
36 #include "oss/oss_1_0_d.h"
37 #include "oss/oss_1_0_sh_mask.h"
38 #include "gca/gfx_6_0_d.h"
39 #include "gca/gfx_6_0_sh_mask.h"
40 #include "gmc/gmc_6_0_d.h"
41 #include "gmc/gmc_6_0_sh_mask.h"
42 #include "dce/dce_6_0_d.h"
43 #include "dce/dce_6_0_sh_mask.h"
44 #include "gca/gfx_7_2_enum.h"
47 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
48 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
50 static const u32 crtc_offsets[6] =
52 SI_CRTC0_REGISTER_OFFSET,
53 SI_CRTC1_REGISTER_OFFSET,
54 SI_CRTC2_REGISTER_OFFSET,
55 SI_CRTC3_REGISTER_OFFSET,
56 SI_CRTC4_REGISTER_OFFSET,
57 SI_CRTC5_REGISTER_OFFSET
60 static const u32 hpd_offsets[] =
62 mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
63 mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
64 mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
65 mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
66 mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
67 mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
70 static const uint32_t dig_offsets[] = {
71 SI_CRTC0_REGISTER_OFFSET,
72 SI_CRTC1_REGISTER_OFFSET,
73 SI_CRTC2_REGISTER_OFFSET,
74 SI_CRTC3_REGISTER_OFFSET,
75 SI_CRTC4_REGISTER_OFFSET,
76 SI_CRTC5_REGISTER_OFFSET,
77 (0x13830 - 0x7030) >> 2,
86 } interrupt_status_offsets[6] = { {
87 .reg = mmDISP_INTERRUPT_STATUS,
88 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
92 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
97 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
102 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
107 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
112 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
118 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
119 u32 block_offset, u32 reg)
121 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
125 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
126 u32 block_offset, u32 reg, u32 v)
128 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
131 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
133 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
139 static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
143 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
144 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
153 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
155 * @crtc: crtc to wait for vblank on
157 * Wait for vblank on the requested crtc (evergreen+).
159 static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
163 if (crtc >= adev->mode_info.num_crtc)
166 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
169 /* depending on when we hit vblank, we may be close to active; if so,
170 * wait for another frame.
172 while (dce_v6_0_is_in_vblank(adev, crtc)) {
175 if (!dce_v6_0_is_counter_moving(adev, crtc))
180 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
183 if (!dce_v6_0_is_counter_moving(adev, crtc))
189 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
191 if (crtc >= adev->mode_info.num_crtc)
194 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
197 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
201 /* Enable pflip interrupts */
202 for (i = 0; i < adev->mode_info.num_crtc; i++)
203 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
206 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
210 /* Disable pflip interrupts */
211 for (i = 0; i < adev->mode_info.num_crtc; i++)
212 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
216 * dce_v6_0_page_flip - pageflip callback.
218 * @adev: amdgpu_device pointer
219 * @crtc_id: crtc to cleanup pageflip on
220 * @crtc_base: new address of the crtc (GPU MC address)
222 * Does the actual pageflip (evergreen+).
223 * During vblank we take the crtc lock and wait for the update_pending
224 * bit to go high, when it does, we release the lock, and allow the
225 * double buffered update to take place.
226 * Returns the current update pending status.
228 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
229 int crtc_id, u64 crtc_base, bool async)
231 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
233 /* flip at hsync for async, default is vsync */
234 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
235 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
236 /* update the scanout addresses */
237 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
238 upper_32_bits(crtc_base));
239 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
243 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
246 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 u32 *vbl, u32 *position)
249 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
252 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
259 * dce_v6_0_hpd_sense - hpd sense callback.
261 * @adev: amdgpu_device pointer
262 * @hpd: hpd (hotplug detect) pin
264 * Checks if a digital monitor is connected (evergreen+).
265 * Returns true if connected, false if not connected.
267 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
268 enum amdgpu_hpd_id hpd)
270 bool connected = false;
272 if (hpd >= adev->mode_info.num_hpd)
275 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
282 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
284 * @adev: amdgpu_device pointer
285 * @hpd: hpd (hotplug detect) pin
287 * Set the polarity of the hpd pin (evergreen+).
289 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
290 enum amdgpu_hpd_id hpd)
293 bool connected = dce_v6_0_hpd_sense(adev, hpd);
295 if (hpd >= adev->mode_info.num_hpd)
298 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
300 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
302 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
303 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
307 * dce_v6_0_hpd_init - hpd setup callback.
309 * @adev: amdgpu_device pointer
311 * Setup the hpd pins used by the card (evergreen+).
312 * Enable the pin, set the polarity, and enable the hpd interrupts.
314 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
316 struct drm_device *dev = adev->ddev;
317 struct drm_connector *connector;
320 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
321 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
323 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
326 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
327 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
328 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
330 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
331 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
332 /* don't try to enable hpd on eDP or LVDS avoid breaking the
333 * aux dp channel on imac and help (but not completely fix)
334 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
335 * also avoid interrupt storms during dpms.
337 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
338 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
339 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
343 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
344 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
350 * dce_v6_0_hpd_fini - hpd tear down callback.
352 * @adev: amdgpu_device pointer
354 * Tear down the hpd pins used by the card (evergreen+).
355 * Disable the hpd interrupts.
357 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
359 struct drm_device *dev = adev->ddev;
360 struct drm_connector *connector;
363 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
364 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
366 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
369 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
370 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
371 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
373 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
377 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
379 return mmDC_GPIO_HPD_A;
382 static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
384 if (crtc >= adev->mode_info.num_crtc)
387 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
390 static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
391 struct amdgpu_mode_mc_save *save)
393 u32 crtc_enabled, tmp, frame_count;
396 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
397 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
399 /* disable VGA render */
400 WREG32(mmVGA_RENDER_CONTROL, 0);
402 /* blank the display controllers */
403 for (i = 0; i < adev->mode_info.num_crtc; i++) {
404 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
406 save->crtc_enabled[i] = true;
407 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
409 if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
410 dce_v6_0_vblank_wait(adev, i);
411 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
412 tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
413 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
414 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
416 /* wait for the next frame */
417 frame_count = evergreen_get_vblank_counter(adev, i);
418 for (j = 0; j < adev->usec_timeout; j++) {
419 if (evergreen_get_vblank_counter(adev, i) != frame_count)
424 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
425 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
426 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
427 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
428 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
429 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
430 save->crtc_enabled[i] = false;
433 save->crtc_enabled[i] = false;
438 static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
439 struct amdgpu_mode_mc_save *save)
444 /* update crtc base addresses */
445 for (i = 0; i < adev->mode_info.num_crtc; i++) {
446 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
447 upper_32_bits(adev->mc.vram_start));
448 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
449 upper_32_bits(adev->mc.vram_start));
450 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
451 (u32)adev->mc.vram_start);
452 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
453 (u32)adev->mc.vram_start);
456 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
457 WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
459 /* unlock regs and wait for update */
460 for (i = 0; i < adev->mode_info.num_crtc; i++) {
461 if (save->crtc_enabled[i]) {
462 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
463 if ((tmp & 0x7) != 0) {
465 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
467 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
468 if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
469 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
470 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
472 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
475 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
477 for (j = 0; j < adev->usec_timeout; j++) {
478 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
479 if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
486 /* Unlock vga access */
487 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
489 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
493 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
497 WREG32(mmVGA_RENDER_CONTROL,
498 RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
502 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
506 switch (adev->asic_type) {
521 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
523 /*Disable VGA render and enabled crtc, if has DCE engine*/
524 if (amdgpu_atombios_has_dce_engine_info(adev)) {
528 dce_v6_0_set_vga_render_state(adev, false);
531 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
532 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
533 CRTC_CONTROL__CRTC_MASTER_EN_MASK;
535 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
536 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
537 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
538 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
539 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
545 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
548 struct drm_device *dev = encoder->dev;
549 struct amdgpu_device *adev = dev->dev_private;
550 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
551 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
552 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
555 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
558 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
559 bpc = amdgpu_connector_get_monitor_bpc(connector);
560 dither = amdgpu_connector->dither;
563 /* LVDS FMT is set up by atom */
564 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
573 if (dither == AMDGPU_FMT_DITHER_ENABLE)
574 /* XXX sort out optimal dither settings */
575 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
576 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
577 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
579 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
582 if (dither == AMDGPU_FMT_DITHER_ENABLE)
583 /* XXX sort out optimal dither settings */
584 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
585 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
586 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
587 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
588 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
590 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
591 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
599 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
603 * cik_get_number_of_dram_channels - get the number of dram channels
605 * @adev: amdgpu_device pointer
607 * Look up the number of video ram channels (CIK).
608 * Used for display watermark bandwidth calculations
609 * Returns the number of dram channels
611 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
613 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
615 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
638 struct dce6_wm_params {
639 u32 dram_channels; /* number of dram channels */
640 u32 yclk; /* bandwidth per dram data pin in kHz */
641 u32 sclk; /* engine clock in kHz */
642 u32 disp_clk; /* display clock in kHz */
643 u32 src_width; /* viewport width */
644 u32 active_time; /* active display time in ns */
645 u32 blank_time; /* blank time in ns */
646 bool interlaced; /* mode is interlaced */
647 fixed20_12 vsc; /* vertical scale ratio */
648 u32 num_heads; /* number of active crtcs */
649 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
650 u32 lb_size; /* line buffer allocated to pipe */
651 u32 vtaps; /* vertical scaler taps */
655 * dce_v6_0_dram_bandwidth - get the dram bandwidth
657 * @wm: watermark calculation data
659 * Calculate the raw dram bandwidth (CIK).
660 * Used for display watermark bandwidth calculations
661 * Returns the dram bandwidth in MBytes/s
663 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
665 /* Calculate raw DRAM Bandwidth */
666 fixed20_12 dram_efficiency; /* 0.7 */
667 fixed20_12 yclk, dram_channels, bandwidth;
670 a.full = dfixed_const(1000);
671 yclk.full = dfixed_const(wm->yclk);
672 yclk.full = dfixed_div(yclk, a);
673 dram_channels.full = dfixed_const(wm->dram_channels * 4);
674 a.full = dfixed_const(10);
675 dram_efficiency.full = dfixed_const(7);
676 dram_efficiency.full = dfixed_div(dram_efficiency, a);
677 bandwidth.full = dfixed_mul(dram_channels, yclk);
678 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
680 return dfixed_trunc(bandwidth);
684 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
686 * @wm: watermark calculation data
688 * Calculate the dram bandwidth used for display (CIK).
689 * Used for display watermark bandwidth calculations
690 * Returns the dram bandwidth for display in MBytes/s
692 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
694 /* Calculate DRAM Bandwidth and the part allocated to display. */
695 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
696 fixed20_12 yclk, dram_channels, bandwidth;
699 a.full = dfixed_const(1000);
700 yclk.full = dfixed_const(wm->yclk);
701 yclk.full = dfixed_div(yclk, a);
702 dram_channels.full = dfixed_const(wm->dram_channels * 4);
703 a.full = dfixed_const(10);
704 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
705 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
706 bandwidth.full = dfixed_mul(dram_channels, yclk);
707 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
709 return dfixed_trunc(bandwidth);
713 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
715 * @wm: watermark calculation data
717 * Calculate the data return bandwidth used for display (CIK).
718 * Used for display watermark bandwidth calculations
719 * Returns the data return bandwidth in MBytes/s
721 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
723 /* Calculate the display Data return Bandwidth */
724 fixed20_12 return_efficiency; /* 0.8 */
725 fixed20_12 sclk, bandwidth;
728 a.full = dfixed_const(1000);
729 sclk.full = dfixed_const(wm->sclk);
730 sclk.full = dfixed_div(sclk, a);
731 a.full = dfixed_const(10);
732 return_efficiency.full = dfixed_const(8);
733 return_efficiency.full = dfixed_div(return_efficiency, a);
734 a.full = dfixed_const(32);
735 bandwidth.full = dfixed_mul(a, sclk);
736 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
738 return dfixed_trunc(bandwidth);
742 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
744 * @wm: watermark calculation data
746 * Calculate the dmif bandwidth used for display (CIK).
747 * Used for display watermark bandwidth calculations
748 * Returns the dmif bandwidth in MBytes/s
750 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
752 /* Calculate the DMIF Request Bandwidth */
753 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
754 fixed20_12 disp_clk, bandwidth;
757 a.full = dfixed_const(1000);
758 disp_clk.full = dfixed_const(wm->disp_clk);
759 disp_clk.full = dfixed_div(disp_clk, a);
760 a.full = dfixed_const(32);
761 b.full = dfixed_mul(a, disp_clk);
763 a.full = dfixed_const(10);
764 disp_clk_request_efficiency.full = dfixed_const(8);
765 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
767 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
769 return dfixed_trunc(bandwidth);
773 * dce_v6_0_available_bandwidth - get the min available bandwidth
775 * @wm: watermark calculation data
777 * Calculate the min available bandwidth used for display (CIK).
778 * Used for display watermark bandwidth calculations
779 * Returns the min available bandwidth in MBytes/s
781 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
783 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
784 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
785 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
786 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
788 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
792 * dce_v6_0_average_bandwidth - get the average available bandwidth
794 * @wm: watermark calculation data
796 * Calculate the average available bandwidth used for display (CIK).
797 * Used for display watermark bandwidth calculations
798 * Returns the average available bandwidth in MBytes/s
800 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
802 /* Calculate the display mode Average Bandwidth
803 * DisplayMode should contain the source and destination dimensions,
807 fixed20_12 line_time;
808 fixed20_12 src_width;
809 fixed20_12 bandwidth;
812 a.full = dfixed_const(1000);
813 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
814 line_time.full = dfixed_div(line_time, a);
815 bpp.full = dfixed_const(wm->bytes_per_pixel);
816 src_width.full = dfixed_const(wm->src_width);
817 bandwidth.full = dfixed_mul(src_width, bpp);
818 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
819 bandwidth.full = dfixed_div(bandwidth, line_time);
821 return dfixed_trunc(bandwidth);
825 * dce_v6_0_latency_watermark - get the latency watermark
827 * @wm: watermark calculation data
829 * Calculate the latency watermark (CIK).
830 * Used for display watermark bandwidth calculations
831 * Returns the latency watermark in ns
833 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
835 /* First calculate the latency in ns */
836 u32 mc_latency = 2000; /* 2000 ns. */
837 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
838 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
839 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
840 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
841 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
842 (wm->num_heads * cursor_line_pair_return_time);
843 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
844 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
845 u32 tmp, dmif_size = 12288;
848 if (wm->num_heads == 0)
851 a.full = dfixed_const(2);
852 b.full = dfixed_const(1);
853 if ((wm->vsc.full > a.full) ||
854 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
856 ((wm->vsc.full >= a.full) && wm->interlaced))
857 max_src_lines_per_dst_line = 4;
859 max_src_lines_per_dst_line = 2;
861 a.full = dfixed_const(available_bandwidth);
862 b.full = dfixed_const(wm->num_heads);
863 a.full = dfixed_div(a, b);
864 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
865 tmp = min(dfixed_trunc(a), tmp);
867 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
869 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
870 b.full = dfixed_const(1000);
871 c.full = dfixed_const(lb_fill_bw);
872 b.full = dfixed_div(c, b);
873 a.full = dfixed_div(a, b);
874 line_fill_time = dfixed_trunc(a);
876 if (line_fill_time < wm->active_time)
879 return latency + (line_fill_time - wm->active_time);
884 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
885 * average and available dram bandwidth
887 * @wm: watermark calculation data
889 * Check if the display average bandwidth fits in the display
890 * dram bandwidth (CIK).
891 * Used for display watermark bandwidth calculations
892 * Returns true if the display fits, false if not.
894 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
896 if (dce_v6_0_average_bandwidth(wm) <=
897 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
904 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
905 * average and available bandwidth
907 * @wm: watermark calculation data
909 * Check if the display average bandwidth fits in the display
910 * available bandwidth (CIK).
911 * Used for display watermark bandwidth calculations
912 * Returns true if the display fits, false if not.
914 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
916 if (dce_v6_0_average_bandwidth(wm) <=
917 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
924 * dce_v6_0_check_latency_hiding - check latency hiding
926 * @wm: watermark calculation data
928 * Check latency hiding (CIK).
929 * Used for display watermark bandwidth calculations
930 * Returns true if the display fits, false if not.
932 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
934 u32 lb_partitions = wm->lb_size / wm->src_width;
935 u32 line_time = wm->active_time + wm->blank_time;
936 u32 latency_tolerant_lines;
940 a.full = dfixed_const(1);
941 if (wm->vsc.full > a.full)
942 latency_tolerant_lines = 1;
944 if (lb_partitions <= (wm->vtaps + 1))
945 latency_tolerant_lines = 1;
947 latency_tolerant_lines = 2;
950 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
952 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
959 * dce_v6_0_program_watermarks - program display watermarks
961 * @adev: amdgpu_device pointer
962 * @amdgpu_crtc: the selected display controller
963 * @lb_size: line buffer size
964 * @num_heads: number of display controllers in use
966 * Calculate and program the display watermarks for the
967 * selected display controller (CIK).
969 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
970 struct amdgpu_crtc *amdgpu_crtc,
971 u32 lb_size, u32 num_heads)
973 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
974 struct dce6_wm_params wm_low, wm_high;
978 u32 latency_watermark_a = 0, latency_watermark_b = 0;
979 u32 priority_a_mark = 0, priority_b_mark = 0;
980 u32 priority_a_cnt = PRIORITY_OFF;
981 u32 priority_b_cnt = PRIORITY_OFF;
982 u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
985 if (amdgpu_crtc->base.enabled && num_heads && mode) {
986 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
987 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
991 dram_channels = si_get_number_of_dram_channels(adev);
993 /* watermark for high clocks */
994 if (adev->pm.dpm_enabled) {
996 amdgpu_dpm_get_mclk(adev, false) * 10;
998 amdgpu_dpm_get_sclk(adev, false) * 10;
1000 wm_high.yclk = adev->pm.current_mclk * 10;
1001 wm_high.sclk = adev->pm.current_sclk * 10;
1004 wm_high.disp_clk = mode->clock;
1005 wm_high.src_width = mode->crtc_hdisplay;
1006 wm_high.active_time = active_time;
1007 wm_high.blank_time = line_time - wm_high.active_time;
1008 wm_high.interlaced = false;
1009 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1010 wm_high.interlaced = true;
1011 wm_high.vsc = amdgpu_crtc->vsc;
1013 if (amdgpu_crtc->rmx_type != RMX_OFF)
1015 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1016 wm_high.lb_size = lb_size;
1017 wm_high.dram_channels = dram_channels;
1018 wm_high.num_heads = num_heads;
1020 if (adev->pm.dpm_enabled) {
1021 /* watermark for low clocks */
1023 amdgpu_dpm_get_mclk(adev, true) * 10;
1025 amdgpu_dpm_get_sclk(adev, true) * 10;
1027 wm_low.yclk = adev->pm.current_mclk * 10;
1028 wm_low.sclk = adev->pm.current_sclk * 10;
1031 wm_low.disp_clk = mode->clock;
1032 wm_low.src_width = mode->crtc_hdisplay;
1033 wm_low.active_time = active_time;
1034 wm_low.blank_time = line_time - wm_low.active_time;
1035 wm_low.interlaced = false;
1036 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1037 wm_low.interlaced = true;
1038 wm_low.vsc = amdgpu_crtc->vsc;
1040 if (amdgpu_crtc->rmx_type != RMX_OFF)
1042 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1043 wm_low.lb_size = lb_size;
1044 wm_low.dram_channels = dram_channels;
1045 wm_low.num_heads = num_heads;
1047 /* set for high clocks */
1048 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1049 /* set for low clocks */
1050 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1052 /* possibly force display priority to high */
1053 /* should really do this at mode validation time... */
1054 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1055 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1056 !dce_v6_0_check_latency_hiding(&wm_high) ||
1057 (adev->mode_info.disp_priority == 2)) {
1058 DRM_DEBUG_KMS("force priority to high\n");
1059 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1060 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1062 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1063 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1064 !dce_v6_0_check_latency_hiding(&wm_low) ||
1065 (adev->mode_info.disp_priority == 2)) {
1066 DRM_DEBUG_KMS("force priority to high\n");
1067 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1068 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1071 a.full = dfixed_const(1000);
1072 b.full = dfixed_const(mode->clock);
1073 b.full = dfixed_div(b, a);
1074 c.full = dfixed_const(latency_watermark_a);
1075 c.full = dfixed_mul(c, b);
1076 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1077 c.full = dfixed_div(c, a);
1078 a.full = dfixed_const(16);
1079 c.full = dfixed_div(c, a);
1080 priority_a_mark = dfixed_trunc(c);
1081 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1083 a.full = dfixed_const(1000);
1084 b.full = dfixed_const(mode->clock);
1085 b.full = dfixed_div(b, a);
1086 c.full = dfixed_const(latency_watermark_b);
1087 c.full = dfixed_mul(c, b);
1088 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1089 c.full = dfixed_div(c, a);
1090 a.full = dfixed_const(16);
1091 c.full = dfixed_div(c, a);
1092 priority_b_mark = dfixed_trunc(c);
1093 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1095 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1099 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1101 tmp &= ~LATENCY_WATERMARK_MASK(3);
1102 tmp |= LATENCY_WATERMARK_MASK(1);
1103 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1104 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1105 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1106 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1108 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1109 tmp &= ~LATENCY_WATERMARK_MASK(3);
1110 tmp |= LATENCY_WATERMARK_MASK(2);
1111 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1112 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1113 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1114 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1115 /* restore original selection */
1116 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1118 /* write the priority marks */
1119 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1120 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1122 /* save values for DPM */
1123 amdgpu_crtc->line_time = line_time;
1124 amdgpu_crtc->wm_high = latency_watermark_a;
1126 /* Save number of lines the linebuffer leads before the scanout */
1127 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1130 /* watermark setup */
1131 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1132 struct amdgpu_crtc *amdgpu_crtc,
1133 struct drm_display_mode *mode,
1134 struct drm_display_mode *other_mode)
1136 u32 tmp, buffer_alloc, i;
1137 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1140 * There are 3 line buffers, each one shared by 2 display controllers.
1141 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1142 * the display controllers. The paritioning is done via one of four
1143 * preset allocations specified in bits 21:20:
1145 * 2 - whole lb, other crtc must be disabled
1147 /* this can get tricky if we have two large displays on a paired group
1148 * of crtcs. Ideally for multiple large displays we'd assign them to
1149 * non-linked crtcs for maximum line buffer allocation.
1151 if (amdgpu_crtc->base.enabled && mode) {
1156 tmp = 2; /* whole */
1164 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1165 DC_LB_MEMORY_CONFIG(tmp));
1167 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1168 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1169 for (i = 0; i < adev->usec_timeout; i++) {
1170 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1171 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1176 if (amdgpu_crtc->base.enabled && mode) {
1186 /* controller not enabled, so no lb used */
1193 * dce_v6_0_bandwidth_update - program display watermarks
1195 * @adev: amdgpu_device pointer
1197 * Calculate and program the display watermarks and line
1198 * buffer allocation (CIK).
1200 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1202 struct drm_display_mode *mode0 = NULL;
1203 struct drm_display_mode *mode1 = NULL;
1204 u32 num_heads = 0, lb_size;
1207 if (!adev->mode_info.mode_config_initialized)
1210 amdgpu_update_display_priority(adev);
1212 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1213 if (adev->mode_info.crtcs[i]->base.enabled)
1216 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1217 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1218 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1219 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1220 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1221 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1222 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1226 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1231 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1232 offset = adev->mode_info.audio.pin[i].offset;
1233 tmp = RREG32_AUDIO_ENDPT(offset,
1234 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1235 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1236 adev->mode_info.audio.pin[i].connected = false;
1238 adev->mode_info.audio.pin[i].connected = true;
1243 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1247 dce_v6_0_audio_get_connected_pins(adev);
1249 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1250 if (adev->mode_info.audio.pin[i].connected)
1251 return &adev->mode_info.audio.pin[i];
1253 DRM_ERROR("No connected audio pins found!\n");
1257 static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1259 struct amdgpu_device *adev = encoder->dev->dev_private;
1260 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1261 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1264 if (!dig || !dig->afmt || !dig->afmt->pin)
1267 offset = dig->afmt->offset;
1269 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1270 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1274 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1275 struct drm_display_mode *mode)
1277 DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1280 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1282 DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1285 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1287 DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1291 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1292 struct amdgpu_audio_pin *pin,
1295 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1298 static const u32 pin_offsets[7] =
1309 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1314 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1320 static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1322 DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1326 * build a HDMI Video Info Frame
1329 static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1330 void *buffer, size_t size)
1332 DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1335 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1337 DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1341 * update the info frames with the data from the current display mode
1343 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1344 struct drm_display_mode *mode)
1346 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1349 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1351 struct drm_device *dev = encoder->dev;
1352 struct amdgpu_device *adev = dev->dev_private;
1353 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1354 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1356 if (!dig || !dig->afmt)
1359 /* Silent, r600_hdmi_enable will raise WARN for us */
1360 if (enable && dig->afmt->enabled)
1362 if (!enable && !dig->afmt->enabled)
1365 if (!enable && dig->afmt->pin) {
1366 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1367 dig->afmt->pin = NULL;
1370 dig->afmt->enabled = enable;
1372 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1373 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1376 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1380 for (i = 0; i < adev->mode_info.num_dig; i++)
1381 adev->mode_info.afmt[i] = NULL;
1383 /* DCE6 has audio blocks tied to DIG encoders */
1384 for (i = 0; i < adev->mode_info.num_dig; i++) {
1385 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1386 if (adev->mode_info.afmt[i]) {
1387 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1388 adev->mode_info.afmt[i]->id = i;
1390 for (j = 0; j < i; j++) {
1391 kfree(adev->mode_info.afmt[j]);
1392 adev->mode_info.afmt[j] = NULL;
1394 DRM_ERROR("Out of memory allocating afmt table\n");
1401 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1405 for (i = 0; i < adev->mode_info.num_dig; i++) {
1406 kfree(adev->mode_info.afmt[i]);
1407 adev->mode_info.afmt[i] = NULL;
1411 static const u32 vga_control_regs[6] =
1421 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1423 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1424 struct drm_device *dev = crtc->dev;
1425 struct amdgpu_device *adev = dev->dev_private;
1428 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1429 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1432 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1434 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1435 struct drm_device *dev = crtc->dev;
1436 struct amdgpu_device *adev = dev->dev_private;
1438 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1441 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1442 struct drm_framebuffer *fb,
1443 int x, int y, int atomic)
1445 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1446 struct drm_device *dev = crtc->dev;
1447 struct amdgpu_device *adev = dev->dev_private;
1448 struct amdgpu_framebuffer *amdgpu_fb;
1449 struct drm_framebuffer *target_fb;
1450 struct drm_gem_object *obj;
1451 struct amdgpu_bo *abo;
1452 uint64_t fb_location, tiling_flags;
1453 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1454 u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1455 u32 viewport_w, viewport_h;
1457 bool bypass_lut = false;
1458 struct drm_format_name_buf format_name;
1461 if (!atomic && !crtc->primary->fb) {
1462 DRM_DEBUG_KMS("No FB bound\n");
1467 amdgpu_fb = to_amdgpu_framebuffer(fb);
1470 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1471 target_fb = crtc->primary->fb;
1474 /* If atomic, assume fb object is pinned & idle & fenced and
1475 * just update base pointers
1477 obj = amdgpu_fb->obj;
1478 abo = gem_to_amdgpu_bo(obj);
1479 r = amdgpu_bo_reserve(abo, false);
1480 if (unlikely(r != 0))
1484 fb_location = amdgpu_bo_gpu_offset(abo);
1486 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1487 if (unlikely(r != 0)) {
1488 amdgpu_bo_unreserve(abo);
1493 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1494 amdgpu_bo_unreserve(abo);
1496 switch (target_fb->format->format) {
1498 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1499 GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1501 case DRM_FORMAT_XRGB4444:
1502 case DRM_FORMAT_ARGB4444:
1503 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1504 GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1506 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1509 case DRM_FORMAT_XRGB1555:
1510 case DRM_FORMAT_ARGB1555:
1511 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1512 GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1514 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1517 case DRM_FORMAT_BGRX5551:
1518 case DRM_FORMAT_BGRA5551:
1519 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1520 GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1522 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1525 case DRM_FORMAT_RGB565:
1526 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1527 GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1529 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1532 case DRM_FORMAT_XRGB8888:
1533 case DRM_FORMAT_ARGB8888:
1534 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1535 GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1537 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1540 case DRM_FORMAT_XRGB2101010:
1541 case DRM_FORMAT_ARGB2101010:
1542 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1543 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1545 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1547 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1550 case DRM_FORMAT_BGRX1010102:
1551 case DRM_FORMAT_BGRA1010102:
1552 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1553 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1555 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1557 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1561 DRM_ERROR("Unsupported screen format %s\n",
1562 drm_get_format_name(target_fb->format->format, &format_name));
1566 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1567 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1569 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1570 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1571 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1572 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1573 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1575 fb_format |= GRPH_NUM_BANKS(num_banks);
1576 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1577 fb_format |= GRPH_TILE_SPLIT(tile_split);
1578 fb_format |= GRPH_BANK_WIDTH(bankw);
1579 fb_format |= GRPH_BANK_HEIGHT(bankh);
1580 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1581 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1582 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1585 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1586 fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1588 dce_v6_0_vga_enable(crtc, false);
1590 /* Make sure surface address is updated at vertical blank rather than
1593 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1595 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1596 upper_32_bits(fb_location));
1597 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1598 upper_32_bits(fb_location));
1599 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1600 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1601 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1602 (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1603 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1604 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1607 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1608 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1609 * retain the full precision throughout the pipeline.
1611 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1612 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1613 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1616 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1618 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1619 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1620 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1621 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1622 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1623 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1625 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1626 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1628 dce_v6_0_grph_enable(crtc, true);
1630 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1634 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1636 viewport_w = crtc->mode.hdisplay;
1637 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1639 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1640 (viewport_w << 16) | viewport_h);
1642 /* set pageflip to happen anywhere in vblank interval */
1643 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1645 if (!atomic && fb && fb != crtc->primary->fb) {
1646 amdgpu_fb = to_amdgpu_framebuffer(fb);
1647 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1648 r = amdgpu_bo_reserve(abo, true);
1649 if (unlikely(r != 0))
1651 amdgpu_bo_unpin(abo);
1652 amdgpu_bo_unreserve(abo);
1655 /* Bytes per pixel may have changed */
1656 dce_v6_0_bandwidth_update(adev);
1662 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1663 struct drm_display_mode *mode)
1665 struct drm_device *dev = crtc->dev;
1666 struct amdgpu_device *adev = dev->dev_private;
1667 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1669 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1670 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
1673 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1676 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1679 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1680 struct drm_device *dev = crtc->dev;
1681 struct amdgpu_device *adev = dev->dev_private;
1684 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1686 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1687 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
1688 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
1689 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1690 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
1691 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1692 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
1693 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1694 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
1695 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
1697 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1699 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1700 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1701 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1703 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1704 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1705 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1707 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1708 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1710 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1711 for (i = 0; i < 256; i++) {
1712 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1713 (amdgpu_crtc->lut_r[i] << 20) |
1714 (amdgpu_crtc->lut_g[i] << 10) |
1715 (amdgpu_crtc->lut_b[i] << 0));
1718 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1719 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
1720 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
1721 ICON_DEGAMMA_MODE(0) |
1722 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
1723 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1724 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
1725 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
1726 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1727 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
1728 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
1729 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1730 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
1731 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
1732 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1733 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1738 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1740 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1741 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1743 switch (amdgpu_encoder->encoder_id) {
1744 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1745 return dig->linkb ? 1 : 0;
1746 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1747 return dig->linkb ? 3 : 2;
1748 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1749 return dig->linkb ? 5 : 4;
1750 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1753 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1759 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1763 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1764 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1765 * monitors a dedicated PPLL must be used. If a particular board has
1766 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1767 * as there is no need to program the PLL itself. If we are not able to
1768 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1769 * avoid messing up an existing monitor.
1773 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1775 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1776 struct drm_device *dev = crtc->dev;
1777 struct amdgpu_device *adev = dev->dev_private;
1781 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1782 if (adev->clock.dp_extclk)
1783 /* skip PPLL programming if using ext clock */
1784 return ATOM_PPLL_INVALID;
1788 /* use the same PPLL for all monitors with the same clock */
1789 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1790 if (pll != ATOM_PPLL_INVALID)
1794 /* PPLL1, and PPLL2 */
1795 pll_in_use = amdgpu_pll_get_use_mask(crtc);
1796 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1798 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1800 DRM_ERROR("unable to allocate a PPLL\n");
1801 return ATOM_PPLL_INVALID;
1804 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1806 struct amdgpu_device *adev = crtc->dev->dev_private;
1807 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1810 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
1812 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1814 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
1815 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1818 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1820 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1821 struct amdgpu_device *adev = crtc->dev->dev_private;
1823 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1824 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1825 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1830 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1832 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1833 struct amdgpu_device *adev = crtc->dev->dev_private;
1835 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1836 upper_32_bits(amdgpu_crtc->cursor_addr));
1837 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1838 lower_32_bits(amdgpu_crtc->cursor_addr));
1840 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
1841 CUR_CONTROL__CURSOR_EN_MASK |
1842 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
1843 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
1847 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1850 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1851 struct amdgpu_device *adev = crtc->dev->dev_private;
1852 int xorigin = 0, yorigin = 0;
1854 int w = amdgpu_crtc->cursor_width;
1856 amdgpu_crtc->cursor_x = x;
1857 amdgpu_crtc->cursor_y = y;
1859 /* avivo cursor are offset into the total surface */
1862 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1865 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1869 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1873 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1874 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1875 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1876 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1881 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1886 dce_v6_0_lock_cursor(crtc, true);
1887 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1888 dce_v6_0_lock_cursor(crtc, false);
1893 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1894 struct drm_file *file_priv,
1901 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1902 struct drm_gem_object *obj;
1903 struct amdgpu_bo *aobj;
1907 /* turn off cursor */
1908 dce_v6_0_hide_cursor(crtc);
1913 if ((width > amdgpu_crtc->max_cursor_width) ||
1914 (height > amdgpu_crtc->max_cursor_height)) {
1915 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1919 obj = drm_gem_object_lookup(file_priv, handle);
1921 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1925 aobj = gem_to_amdgpu_bo(obj);
1926 ret = amdgpu_bo_reserve(aobj, false);
1928 drm_gem_object_unreference_unlocked(obj);
1932 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
1933 amdgpu_bo_unreserve(aobj);
1935 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
1936 drm_gem_object_unreference_unlocked(obj);
1940 dce_v6_0_lock_cursor(crtc, true);
1942 if (width != amdgpu_crtc->cursor_width ||
1943 height != amdgpu_crtc->cursor_height ||
1944 hot_x != amdgpu_crtc->cursor_hot_x ||
1945 hot_y != amdgpu_crtc->cursor_hot_y) {
1948 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
1949 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
1951 dce_v6_0_cursor_move_locked(crtc, x, y);
1953 amdgpu_crtc->cursor_width = width;
1954 amdgpu_crtc->cursor_height = height;
1955 amdgpu_crtc->cursor_hot_x = hot_x;
1956 amdgpu_crtc->cursor_hot_y = hot_y;
1959 dce_v6_0_show_cursor(crtc);
1960 dce_v6_0_lock_cursor(crtc, false);
1963 if (amdgpu_crtc->cursor_bo) {
1964 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1965 ret = amdgpu_bo_reserve(aobj, true);
1966 if (likely(ret == 0)) {
1967 amdgpu_bo_unpin(aobj);
1968 amdgpu_bo_unreserve(aobj);
1970 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
1973 amdgpu_crtc->cursor_bo = obj;
1977 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1979 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1981 if (amdgpu_crtc->cursor_bo) {
1982 dce_v6_0_lock_cursor(crtc, true);
1984 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1985 amdgpu_crtc->cursor_y);
1987 dce_v6_0_show_cursor(crtc);
1988 dce_v6_0_lock_cursor(crtc, false);
1992 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1993 u16 *blue, uint32_t size,
1994 struct drm_modeset_acquire_ctx *ctx)
1996 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1999 /* userspace palettes are always correct as is */
2000 for (i = 0; i < size; i++) {
2001 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2002 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2003 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2005 dce_v6_0_crtc_load_lut(crtc);
2010 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2012 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2014 drm_crtc_cleanup(crtc);
2018 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2019 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2020 .cursor_move = dce_v6_0_crtc_cursor_move,
2021 .gamma_set = dce_v6_0_crtc_gamma_set,
2022 .set_config = amdgpu_crtc_set_config,
2023 .destroy = dce_v6_0_crtc_destroy,
2024 .page_flip_target = amdgpu_crtc_page_flip_target,
2027 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2029 struct drm_device *dev = crtc->dev;
2030 struct amdgpu_device *adev = dev->dev_private;
2031 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2035 case DRM_MODE_DPMS_ON:
2036 amdgpu_crtc->enabled = true;
2037 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2038 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2039 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2040 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2041 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2042 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2043 drm_crtc_vblank_on(crtc);
2044 dce_v6_0_crtc_load_lut(crtc);
2046 case DRM_MODE_DPMS_STANDBY:
2047 case DRM_MODE_DPMS_SUSPEND:
2048 case DRM_MODE_DPMS_OFF:
2049 drm_crtc_vblank_off(crtc);
2050 if (amdgpu_crtc->enabled)
2051 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2052 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2053 amdgpu_crtc->enabled = false;
2056 /* adjust pm to dpms */
2057 amdgpu_pm_compute_clocks(adev);
2060 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2062 /* disable crtc pair power gating before programming */
2063 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2064 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2065 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2068 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2070 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2071 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2074 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2077 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2078 struct drm_device *dev = crtc->dev;
2079 struct amdgpu_device *adev = dev->dev_private;
2080 struct amdgpu_atom_ss ss;
2083 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2084 if (crtc->primary->fb) {
2086 struct amdgpu_framebuffer *amdgpu_fb;
2087 struct amdgpu_bo *abo;
2089 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2090 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2091 r = amdgpu_bo_reserve(abo, true);
2093 DRM_ERROR("failed to reserve abo before unpin\n");
2095 amdgpu_bo_unpin(abo);
2096 amdgpu_bo_unreserve(abo);
2099 /* disable the GRPH */
2100 dce_v6_0_grph_enable(crtc, false);
2102 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2104 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2105 if (adev->mode_info.crtcs[i] &&
2106 adev->mode_info.crtcs[i]->enabled &&
2107 i != amdgpu_crtc->crtc_id &&
2108 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2109 /* one other crtc is using this pll don't turn
2116 switch (amdgpu_crtc->pll_id) {
2119 /* disable the ppll */
2120 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2121 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2127 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2128 amdgpu_crtc->adjusted_clock = 0;
2129 amdgpu_crtc->encoder = NULL;
2130 amdgpu_crtc->connector = NULL;
2133 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2134 struct drm_display_mode *mode,
2135 struct drm_display_mode *adjusted_mode,
2136 int x, int y, struct drm_framebuffer *old_fb)
2138 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2140 if (!amdgpu_crtc->adjusted_clock)
2143 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2144 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2145 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2146 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2147 amdgpu_atombios_crtc_scaler_setup(crtc);
2148 dce_v6_0_cursor_reset(crtc);
2149 /* update the hw version fpr dpm */
2150 amdgpu_crtc->hw_mode = *adjusted_mode;
2155 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2156 const struct drm_display_mode *mode,
2157 struct drm_display_mode *adjusted_mode)
2160 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2161 struct drm_device *dev = crtc->dev;
2162 struct drm_encoder *encoder;
2164 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2165 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2166 if (encoder->crtc == crtc) {
2167 amdgpu_crtc->encoder = encoder;
2168 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2172 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2173 amdgpu_crtc->encoder = NULL;
2174 amdgpu_crtc->connector = NULL;
2177 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2179 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2182 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2183 /* if we can't get a PPLL for a non-DP encoder, fail */
2184 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2185 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2191 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2192 struct drm_framebuffer *old_fb)
2194 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2197 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2198 struct drm_framebuffer *fb,
2199 int x, int y, enum mode_set_atomic state)
2201 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2204 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2205 .dpms = dce_v6_0_crtc_dpms,
2206 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2207 .mode_set = dce_v6_0_crtc_mode_set,
2208 .mode_set_base = dce_v6_0_crtc_set_base,
2209 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2210 .prepare = dce_v6_0_crtc_prepare,
2211 .commit = dce_v6_0_crtc_commit,
2212 .load_lut = dce_v6_0_crtc_load_lut,
2213 .disable = dce_v6_0_crtc_disable,
2216 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2218 struct amdgpu_crtc *amdgpu_crtc;
2221 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2222 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2223 if (amdgpu_crtc == NULL)
2226 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2228 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2229 amdgpu_crtc->crtc_id = index;
2230 adev->mode_info.crtcs[index] = amdgpu_crtc;
2232 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2233 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2234 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2235 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2237 for (i = 0; i < 256; i++) {
2238 amdgpu_crtc->lut_r[i] = i << 2;
2239 amdgpu_crtc->lut_g[i] = i << 2;
2240 amdgpu_crtc->lut_b[i] = i << 2;
2243 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2245 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2246 amdgpu_crtc->adjusted_clock = 0;
2247 amdgpu_crtc->encoder = NULL;
2248 amdgpu_crtc->connector = NULL;
2249 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2254 static int dce_v6_0_early_init(void *handle)
2256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2259 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2261 dce_v6_0_set_display_funcs(adev);
2262 dce_v6_0_set_irq_funcs(adev);
2264 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2266 switch (adev->asic_type) {
2270 adev->mode_info.num_hpd = 6;
2271 adev->mode_info.num_dig = 6;
2274 adev->mode_info.num_hpd = 2;
2275 adev->mode_info.num_dig = 2;
2284 static int dce_v6_0_sw_init(void *handle)
2288 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2290 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2291 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2296 for (i = 8; i < 20; i += 2) {
2297 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2303 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2307 adev->mode_info.mode_config_initialized = true;
2309 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2310 adev->ddev->mode_config.async_page_flip = true;
2311 adev->ddev->mode_config.max_width = 16384;
2312 adev->ddev->mode_config.max_height = 16384;
2313 adev->ddev->mode_config.preferred_depth = 24;
2314 adev->ddev->mode_config.prefer_shadow = 1;
2315 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2317 r = amdgpu_modeset_create_props(adev);
2321 adev->ddev->mode_config.max_width = 16384;
2322 adev->ddev->mode_config.max_height = 16384;
2324 /* allocate crtcs */
2325 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2326 r = dce_v6_0_crtc_init(adev, i);
2331 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2333 amdgpu_print_display_setup(adev->ddev);
2338 r = dce_v6_0_afmt_init(adev);
2342 r = dce_v6_0_audio_init(adev);
2346 drm_kms_helper_poll_init(adev->ddev);
2351 static int dce_v6_0_sw_fini(void *handle)
2353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2355 kfree(adev->mode_info.bios_hardcoded_edid);
2357 drm_kms_helper_poll_fini(adev->ddev);
2359 dce_v6_0_audio_fini(adev);
2360 dce_v6_0_afmt_fini(adev);
2362 drm_mode_config_cleanup(adev->ddev);
2363 adev->mode_info.mode_config_initialized = false;
2368 static int dce_v6_0_hw_init(void *handle)
2371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2373 /* init dig PHYs, disp eng pll */
2374 amdgpu_atombios_encoder_init_dig(adev);
2375 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2377 /* initialize hpd */
2378 dce_v6_0_hpd_init(adev);
2380 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2381 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2384 dce_v6_0_pageflip_interrupt_init(adev);
2389 static int dce_v6_0_hw_fini(void *handle)
2392 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2394 dce_v6_0_hpd_fini(adev);
2396 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2397 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2400 dce_v6_0_pageflip_interrupt_fini(adev);
2405 static int dce_v6_0_suspend(void *handle)
2407 return dce_v6_0_hw_fini(handle);
2410 static int dce_v6_0_resume(void *handle)
2412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2415 ret = dce_v6_0_hw_init(handle);
2417 /* turn on the BL */
2418 if (adev->mode_info.bl_encoder) {
2419 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2420 adev->mode_info.bl_encoder);
2421 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2428 static bool dce_v6_0_is_idle(void *handle)
2433 static int dce_v6_0_wait_for_idle(void *handle)
2438 static int dce_v6_0_soft_reset(void *handle)
2440 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2444 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2446 enum amdgpu_interrupt_state state)
2448 u32 reg_block, interrupt_mask;
2450 if (crtc >= adev->mode_info.num_crtc) {
2451 DRM_DEBUG("invalid crtc %d\n", crtc);
2457 reg_block = SI_CRTC0_REGISTER_OFFSET;
2460 reg_block = SI_CRTC1_REGISTER_OFFSET;
2463 reg_block = SI_CRTC2_REGISTER_OFFSET;
2466 reg_block = SI_CRTC3_REGISTER_OFFSET;
2469 reg_block = SI_CRTC4_REGISTER_OFFSET;
2472 reg_block = SI_CRTC5_REGISTER_OFFSET;
2475 DRM_DEBUG("invalid crtc %d\n", crtc);
2480 case AMDGPU_IRQ_STATE_DISABLE:
2481 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2482 interrupt_mask &= ~VBLANK_INT_MASK;
2483 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2485 case AMDGPU_IRQ_STATE_ENABLE:
2486 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2487 interrupt_mask |= VBLANK_INT_MASK;
2488 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2495 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2497 enum amdgpu_interrupt_state state)
2502 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2503 struct amdgpu_irq_src *src,
2505 enum amdgpu_interrupt_state state)
2507 u32 dc_hpd_int_cntl;
2509 if (type >= adev->mode_info.num_hpd) {
2510 DRM_DEBUG("invalid hdp %d\n", type);
2515 case AMDGPU_IRQ_STATE_DISABLE:
2516 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2517 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2518 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2520 case AMDGPU_IRQ_STATE_ENABLE:
2521 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2522 dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2523 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2532 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2533 struct amdgpu_irq_src *src,
2535 enum amdgpu_interrupt_state state)
2538 case AMDGPU_CRTC_IRQ_VBLANK1:
2539 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2541 case AMDGPU_CRTC_IRQ_VBLANK2:
2542 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2544 case AMDGPU_CRTC_IRQ_VBLANK3:
2545 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2547 case AMDGPU_CRTC_IRQ_VBLANK4:
2548 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2550 case AMDGPU_CRTC_IRQ_VBLANK5:
2551 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2553 case AMDGPU_CRTC_IRQ_VBLANK6:
2554 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2556 case AMDGPU_CRTC_IRQ_VLINE1:
2557 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2559 case AMDGPU_CRTC_IRQ_VLINE2:
2560 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2562 case AMDGPU_CRTC_IRQ_VLINE3:
2563 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2565 case AMDGPU_CRTC_IRQ_VLINE4:
2566 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2568 case AMDGPU_CRTC_IRQ_VLINE5:
2569 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2571 case AMDGPU_CRTC_IRQ_VLINE6:
2572 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2580 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2581 struct amdgpu_irq_src *source,
2582 struct amdgpu_iv_entry *entry)
2584 unsigned crtc = entry->src_id - 1;
2585 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2586 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2588 switch (entry->src_data[0]) {
2589 case 0: /* vblank */
2590 if (disp_int & interrupt_status_offsets[crtc].vblank)
2591 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2593 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2595 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2596 drm_handle_vblank(adev->ddev, crtc);
2598 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2601 if (disp_int & interrupt_status_offsets[crtc].vline)
2602 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2604 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2606 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2609 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2616 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2617 struct amdgpu_irq_src *src,
2619 enum amdgpu_interrupt_state state)
2623 if (type >= adev->mode_info.num_crtc) {
2624 DRM_ERROR("invalid pageflip crtc %d\n", type);
2628 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2629 if (state == AMDGPU_IRQ_STATE_DISABLE)
2630 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2631 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2633 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2634 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2639 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2640 struct amdgpu_irq_src *source,
2641 struct amdgpu_iv_entry *entry)
2643 unsigned long flags;
2645 struct amdgpu_crtc *amdgpu_crtc;
2646 struct amdgpu_flip_work *works;
2648 crtc_id = (entry->src_id - 8) >> 1;
2649 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2651 if (crtc_id >= adev->mode_info.num_crtc) {
2652 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2656 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
2657 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2658 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
2659 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2661 /* IRQ could occur when in initial stage */
2662 if (amdgpu_crtc == NULL)
2665 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2666 works = amdgpu_crtc->pflip_works;
2667 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2668 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2669 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2670 amdgpu_crtc->pflip_status,
2671 AMDGPU_FLIP_SUBMITTED);
2672 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2676 /* page flip completed. clean up */
2677 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2678 amdgpu_crtc->pflip_works = NULL;
2680 /* wakeup usersapce */
2682 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2684 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2686 drm_crtc_vblank_put(&amdgpu_crtc->base);
2687 schedule_work(&works->unpin_work);
2692 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2693 struct amdgpu_irq_src *source,
2694 struct amdgpu_iv_entry *entry)
2696 uint32_t disp_int, mask, tmp;
2699 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
2700 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2704 hpd = entry->src_data[0];
2705 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2706 mask = interrupt_status_offsets[hpd].hpd;
2708 if (disp_int & mask) {
2709 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
2710 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2711 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
2712 schedule_work(&adev->hotplug_work);
2713 DRM_INFO("IH: HPD%d\n", hpd + 1);
2720 static int dce_v6_0_set_clockgating_state(void *handle,
2721 enum amd_clockgating_state state)
2726 static int dce_v6_0_set_powergating_state(void *handle,
2727 enum amd_powergating_state state)
2732 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2734 .early_init = dce_v6_0_early_init,
2736 .sw_init = dce_v6_0_sw_init,
2737 .sw_fini = dce_v6_0_sw_fini,
2738 .hw_init = dce_v6_0_hw_init,
2739 .hw_fini = dce_v6_0_hw_fini,
2740 .suspend = dce_v6_0_suspend,
2741 .resume = dce_v6_0_resume,
2742 .is_idle = dce_v6_0_is_idle,
2743 .wait_for_idle = dce_v6_0_wait_for_idle,
2744 .soft_reset = dce_v6_0_soft_reset,
2745 .set_clockgating_state = dce_v6_0_set_clockgating_state,
2746 .set_powergating_state = dce_v6_0_set_powergating_state,
2750 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2751 struct drm_display_mode *mode,
2752 struct drm_display_mode *adjusted_mode)
2755 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2757 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2759 /* need to call this here rather than in prepare() since we need some crtc info */
2760 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2762 /* set scaler clears this on some chips */
2763 dce_v6_0_set_interleave(encoder->crtc, mode);
2765 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2766 dce_v6_0_afmt_enable(encoder, true);
2767 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2771 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2774 struct amdgpu_device *adev = encoder->dev->dev_private;
2775 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2776 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2778 if ((amdgpu_encoder->active_device &
2779 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2780 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2781 ENCODER_OBJECT_ID_NONE)) {
2782 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2784 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2785 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2786 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2790 amdgpu_atombios_scratch_regs_lock(adev, true);
2793 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2795 /* select the clock/data port if it uses a router */
2796 if (amdgpu_connector->router.cd_valid)
2797 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2799 /* turn eDP panel on for mode set */
2800 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2801 amdgpu_atombios_encoder_set_edp_panel_power(connector,
2802 ATOM_TRANSMITTER_ACTION_POWER_ON);
2805 /* this is needed for the pll/ss setup to work correctly in some cases */
2806 amdgpu_atombios_encoder_set_crtc_source(encoder);
2807 /* set up the FMT blocks */
2808 dce_v6_0_program_fmt(encoder);
2811 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2814 struct drm_device *dev = encoder->dev;
2815 struct amdgpu_device *adev = dev->dev_private;
2817 /* need to call this here as we need the crtc set up */
2818 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2819 amdgpu_atombios_scratch_regs_lock(adev, false);
2822 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2825 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2826 struct amdgpu_encoder_atom_dig *dig;
2828 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2830 if (amdgpu_atombios_encoder_is_digital(encoder)) {
2831 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2832 dce_v6_0_afmt_enable(encoder, false);
2833 dig = amdgpu_encoder->enc_priv;
2834 dig->dig_encoder = -1;
2836 amdgpu_encoder->active_device = 0;
2839 /* these are handled by the primary encoders */
2840 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2845 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2851 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2852 struct drm_display_mode *mode,
2853 struct drm_display_mode *adjusted_mode)
2858 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2864 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2869 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2870 const struct drm_display_mode *mode,
2871 struct drm_display_mode *adjusted_mode)
2876 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2877 .dpms = dce_v6_0_ext_dpms,
2878 .mode_fixup = dce_v6_0_ext_mode_fixup,
2879 .prepare = dce_v6_0_ext_prepare,
2880 .mode_set = dce_v6_0_ext_mode_set,
2881 .commit = dce_v6_0_ext_commit,
2882 .disable = dce_v6_0_ext_disable,
2883 /* no detect for TMDS/LVDS yet */
2886 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2887 .dpms = amdgpu_atombios_encoder_dpms,
2888 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2889 .prepare = dce_v6_0_encoder_prepare,
2890 .mode_set = dce_v6_0_encoder_mode_set,
2891 .commit = dce_v6_0_encoder_commit,
2892 .disable = dce_v6_0_encoder_disable,
2893 .detect = amdgpu_atombios_encoder_dig_detect,
2896 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2897 .dpms = amdgpu_atombios_encoder_dpms,
2898 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2899 .prepare = dce_v6_0_encoder_prepare,
2900 .mode_set = dce_v6_0_encoder_mode_set,
2901 .commit = dce_v6_0_encoder_commit,
2902 .detect = amdgpu_atombios_encoder_dac_detect,
2905 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
2907 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2908 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2909 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
2910 kfree(amdgpu_encoder->enc_priv);
2911 drm_encoder_cleanup(encoder);
2912 kfree(amdgpu_encoder);
2915 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
2916 .destroy = dce_v6_0_encoder_destroy,
2919 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
2920 uint32_t encoder_enum,
2921 uint32_t supported_device,
2924 struct drm_device *dev = adev->ddev;
2925 struct drm_encoder *encoder;
2926 struct amdgpu_encoder *amdgpu_encoder;
2928 /* see if we already added it */
2929 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2930 amdgpu_encoder = to_amdgpu_encoder(encoder);
2931 if (amdgpu_encoder->encoder_enum == encoder_enum) {
2932 amdgpu_encoder->devices |= supported_device;
2939 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
2940 if (!amdgpu_encoder)
2943 encoder = &amdgpu_encoder->base;
2944 switch (adev->mode_info.num_crtc) {
2946 encoder->possible_crtcs = 0x1;
2950 encoder->possible_crtcs = 0x3;
2953 encoder->possible_crtcs = 0xf;
2956 encoder->possible_crtcs = 0x3f;
2960 amdgpu_encoder->enc_priv = NULL;
2961 amdgpu_encoder->encoder_enum = encoder_enum;
2962 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2963 amdgpu_encoder->devices = supported_device;
2964 amdgpu_encoder->rmx_type = RMX_OFF;
2965 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
2966 amdgpu_encoder->is_ext_encoder = false;
2967 amdgpu_encoder->caps = caps;
2969 switch (amdgpu_encoder->encoder_id) {
2970 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2971 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2972 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2973 DRM_MODE_ENCODER_DAC, NULL);
2974 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
2976 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2977 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2978 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2979 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2980 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2981 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2982 amdgpu_encoder->rmx_type = RMX_FULL;
2983 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2984 DRM_MODE_ENCODER_LVDS, NULL);
2985 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
2986 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2987 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2988 DRM_MODE_ENCODER_DAC, NULL);
2989 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2991 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
2992 DRM_MODE_ENCODER_TMDS, NULL);
2993 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
2995 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
2997 case ENCODER_OBJECT_ID_SI170B:
2998 case ENCODER_OBJECT_ID_CH7303:
2999 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3000 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3001 case ENCODER_OBJECT_ID_TITFP513:
3002 case ENCODER_OBJECT_ID_VT1623:
3003 case ENCODER_OBJECT_ID_HDMI_SI1930:
3004 case ENCODER_OBJECT_ID_TRAVIS:
3005 case ENCODER_OBJECT_ID_NUTMEG:
3006 /* these are handled by the primary encoders */
3007 amdgpu_encoder->is_ext_encoder = true;
3008 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3009 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3010 DRM_MODE_ENCODER_LVDS, NULL);
3011 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3012 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3013 DRM_MODE_ENCODER_DAC, NULL);
3015 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3016 DRM_MODE_ENCODER_TMDS, NULL);
3017 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3022 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3023 .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3024 .bandwidth_update = &dce_v6_0_bandwidth_update,
3025 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3026 .vblank_wait = &dce_v6_0_vblank_wait,
3027 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3028 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3029 .hpd_sense = &dce_v6_0_hpd_sense,
3030 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3031 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3032 .page_flip = &dce_v6_0_page_flip,
3033 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3034 .add_encoder = &dce_v6_0_encoder_add,
3035 .add_connector = &amdgpu_connector_add,
3036 .stop_mc_access = &dce_v6_0_stop_mc_access,
3037 .resume_mc_access = &dce_v6_0_resume_mc_access,
3040 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3042 if (adev->mode_info.funcs == NULL)
3043 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3046 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3047 .set = dce_v6_0_set_crtc_interrupt_state,
3048 .process = dce_v6_0_crtc_irq,
3051 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3052 .set = dce_v6_0_set_pageflip_interrupt_state,
3053 .process = dce_v6_0_pageflip_irq,
3056 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3057 .set = dce_v6_0_set_hpd_interrupt_state,
3058 .process = dce_v6_0_hpd_irq,
3061 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3063 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3064 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3066 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3067 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3069 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3070 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3073 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3075 .type = AMD_IP_BLOCK_TYPE_DCE,
3079 .funcs = &dce_v6_0_ip_funcs,
3082 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3084 .type = AMD_IP_BLOCK_TYPE_DCE,
3088 .funcs = &dce_v6_0_ip_funcs,