]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'drm-fixes-for-v4.12-rc3' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 May 2017 15:54:06 +0000 (08:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 May 2017 15:54:06 +0000 (08:54 -0700)
Pull drm fixes from Dave Airlie:
 "Not a whole lot happening here, a set of amdgpu fixes and one core
  deadlock fix, and some misc drivers fixes"

* tag 'drm-fixes-for-v4.12-rc3' of git://people.freedesktop.org/~airlied/linux:
  drm/amdgpu: fix null point error when rmmod amdgpu.
  drm/amd/powerplay: fix a signedness bugs
  drm/amdgpu: fix NULL pointer panic of emit_gds_switch
  drm/radeon: Unbreak HPD handling for r600+
  drm/amd/powerplay/smu7: disable mclk switching for high refresh rates
  drm/amd/powerplay/smu7: add vblank check for mclk switching (v2)
  drm/radeon/ci: disable mclk switching for high refresh rates (v2)
  drm/amdgpu/ci: disable mclk switching for high refresh rates (v2)
  drm/amdgpu: fix fundamental suspend/resume issue
  drm/gma500/psb: Actually use VBT mode when it is found
  drm: Fix deadlock retry loop in page_flip_ioctl
  drm: qxl: Delay entering atomic context during cursor update
  drm/radeon: Fix oops upon driver load on PowerXpress laptops

19 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si.c

index 236d9950221b62665e8728941faa5793fc757980..c0d8c6ff6380e8a69de8faf58d28963dde29c8de 100644 (file)
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
 
 void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct amdgpu_fbdev *afbdev;
        struct drm_fb_helper *fb_helper;
        int ret;
 
+       if (!adev)
+               return;
+
+       afbdev = adev->mode_info.rfbdev;
+
        if (!afbdev)
                return;
 
index 07ff3b1514f129edc23875c1f42053f7ef1aaa72..8ecf82c5fe74dc4d34e55d4dbaec5931bca1a2e8 100644 (file)
@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
                mutex_unlock(&id_mgr->lock);
        }
 
-       if (gds_switch_needed) {
+       if (ring->funcs->emit_gds_switch && gds_switch_needed) {
                id->gds_base = job->gds_base;
                id->gds_size = job->gds_size;
                id->gws_base = job->gws_base;
@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
 
+       atomic64_set(&id->owner, 0);
        id->gds_base = 0;
        id->gds_size = 0;
        id->gws_base = 0;
@@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        id->oa_size = 0;
 }
 
+/**
+ * amdgpu_vm_reset_all_id - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
+{
+       unsigned i, j;
+
+       for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+               struct amdgpu_vm_id_manager *id_mgr =
+                       &adev->vm_manager.id_mgr[i];
+
+               for (j = 1; j < id_mgr->num_ids; ++j)
+                       amdgpu_vm_reset_id(adev, i, j);
+       }
+}
+
 /**
  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
  *
@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                adev->vm_manager.seqno[i] = 0;
 
-
        atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
        atomic64_set(&adev->vm_manager.client_counter, 0);
        spin_lock_init(&adev->vm_manager.prt_lock);
index d97e28b4bdc41cbb52e70647685b58db4886514a..e1d951ece4333672512f96ae914ee740f0a5922b 100644 (file)
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
                        unsigned vmid);
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                                 struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
index 6dc1410b380f376982551dbebe06f4dd84edf3b2..ec93714e4524eeaf80dbd5181da396e48024827b 100644 (file)
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
        u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
        u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (amdgpu_dpm_get_vrefresh(adev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index a572979f186cdaeba52701fb8183850e16581143..d860939152df234517e7806135015a23aa316df9 100644 (file)
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v6_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v6_0_hw_fini(adev);
 
        return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v6_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v6_0_is_idle(void *handle)
index a9083a16a250920c64605bc447ca19c8b3014f3a..2750e5c2381301ceebddb8f614f7e5868de23d2c 100644 (file)
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v7_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v7_0_hw_fini(adev);
 
        return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v7_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v7_0_is_idle(void *handle)
index 4ac99784160a3ed9fbb59c5bf53a8d1b0a6ec1b4..f56b4089ee9f3fe7581cd6c541d434867c613cbb 100644 (file)
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v8_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v8_0_hw_fini(adev);
 
        return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v8_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v8_0_is_idle(void *handle)
index dc1e1c1d6b2430cb9957047a454cde87bf439561..f936332a069d2d1c9329a3d52a17f9e44776f659 100644 (file)
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v9_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v9_0_hw_fini(adev);
 
        return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v9_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev,
-                               "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v9_0_is_idle(void *handle)
index a74a3db3056c9c4a991e2b4525eb09de8a234a0a..102eb6d029faeb27887215ada8aeccf93d4039b0 100644 (file)
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
        return sizeof(struct smu7_power_state);
 }
 
+static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+                                uint32_t vblank_time_us)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t switch_limit_us;
+
+       switch (hwmgr->chip_id) {
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+               switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+               break;
+       default:
+               switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+               break;
+       }
+
+       if (vblank_time_us < switch_limit_us)
+               return true;
+       else
+               return false;
+}
 
 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        bool disable_mclk_switching;
        bool disable_mclk_switching_for_frame_lock;
        struct cgs_display_info info = {0};
+       struct cgs_mode_info mode_info = {0};
        const struct phm_clock_and_voltage_limits *max_limits;
        uint32_t i;
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        int32_t count;
        int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
 
+       info.mode_info = &mode_info;
        data->battery_state = (PP_StateUILabel_Battery ==
                        request_ps->classification.ui_label);
 
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        cgs_get_active_displays_info(hwmgr->device, &info);
 
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
        minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
        minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
 
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
+       disable_mclk_switching = ((1 < info.display_count) ||
+                                 disable_mclk_switching_for_frame_lock ||
+                                 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+                                 (mode_info.refresh_rate > 120));
 
        sclk = smu7_ps->performance_levels[0].engine_clock;
        mclk = smu7_ps->performance_levels[0].memory_clock;
index ad30f5d3a10d5ea2a0118203ff66110a11b0583a..2614af2f553f3007ae25cb70e1f8f322c23a62d6 100644 (file)
@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
        struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
-       uint32_t i;
+       int i;
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
                return -EINVAL;
index fedd4d60d9cd5b2bab64ed8aae1391ea2f402fc5..5dc8c4350602a561fe4cfd77fce77e26770696fb 100644 (file)
@@ -948,8 +948,6 @@ retry:
        }
 
 out:
-       if (ret && crtc->funcs->page_flip_target)
-               drm_crtc_vblank_put(crtc);
        if (fb)
                drm_framebuffer_put(fb);
        if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
+       if (ret && crtc->funcs->page_flip_target)
+               drm_crtc_vblank_put(crtc);
+
        return ret;
 }
index 0066fe7e622ef75de181cc54770daeb2006a2d24..be3eefec5152aa0cdf9dd891bf4d4fe423ff87d4 100644 (file)
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
                        mode_dev->panel_fixed_mode =
                            drm_mode_duplicate(dev, scan);
+                       DRM_DEBUG_KMS("Using mode from DDC\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
 
        /* Failed to get EDID, what about VBT? do we need this? */
-       if (mode_dev->vbt_mode)
+       if (dev_priv->lfp_lvds_vbt_mode) {
                mode_dev->panel_fixed_mode =
-                   drm_mode_duplicate(dev, mode_dev->vbt_mode);
+                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 
-       if (!mode_dev->panel_fixed_mode)
-               if (dev_priv->lfp_lvds_vbt_mode)
-                       mode_dev->panel_fixed_mode =
-                               drm_mode_duplicate(dev,
-                                       dev_priv->lfp_lvds_vbt_mode);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using mode from VBT\n");
+                       goto out;
+               }
+       }
 
        /*
         * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (mode_dev->panel_fixed_mode) {
                        mode_dev->panel_fixed_mode->type |=
                            DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using pre-programmed mode\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
index 058340a002c29daef072f21bf90e1e8a572b3fa9..4a340efd8ba67ac23aadc6611ea3ce7e7d460e1a 100644 (file)
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        if (ret)
                return;
 
-       cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
-
        if (fb != old_state->fb) {
                obj = to_qxl_framebuffer(fb)->obj;
                user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                qxl_bo_kunmap(cursor_bo);
                qxl_bo_kunmap(user_bo);
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->u.set.visible = 1;
                cmd->u.set.shape = qxl_bo_physical_address(qdev,
                                                           cursor_bo, 0);
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                if (ret)
                        goto out_free_release;
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->type = QXL_CURSOR_MOVE;
        }
 
index 7ba450832e6b7a59db9499a0919a8bb5f4a3fd1a..ea36dc4dd5d22ec7b30678ea811dcf5009f454f7 100644 (file)
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
        u32 vblank_time = r600_dpm_get_vblank_time(rdev);
        u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (r600_dpm_get_vrefresh(rdev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index ccebe0f8d2e1e3b4ec15d6170b788e2e8fc8b980..008c145b7f29f60a298419931f1922555de5e35a 100644 (file)
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index f130ec41ee4bbcad63516335dfaf17e24e782b60..0bf103536404e5dde2d480bf692a496a6865e817 100644 (file)
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index 0a085176e79b35b2887c19f7b33091804b58e645..e06e2d8feab397822361ba18a3e4b420cc2f0c4d 100644 (file)
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
                        WREG32(DC_HPD5_INT_CONTROL, tmp);
                }
                if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       tmp = RREG32(DC_HPD6_INT_CONTROL);
                        tmp |= DC_HPDx_INT_ACK;
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
index e3e7cb1d10a2941d1790d36a9f5250f70aeb78cd..4761f27f2ca2a073a8ffb30806a9cac905003f23 100644 (file)
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
        if ((radeon_runtime_pm != 0) &&
            radeon_has_atpx() &&
            ((flags & RADEON_IS_IGP) == 0) &&
-           !pci_is_thunderbolt_attached(rdev->pdev))
+           !pci_is_thunderbolt_attached(dev->pdev))
                flags |= RADEON_IS_PX;
 
        /* radeon_device_init should report only fatal error
index ceee87f029d9a3479d374c4fc305338377dee961..76d1888528e675c700b543fa6e10c77a466054d7 100644 (file)
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }