2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
40 #define GFX9_NUM_GFX_RINGS 1
41 #define GFX9_MEC_HPD_SIZE 2048
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
60 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
61 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/raven_me.bin");
63 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
64 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
67 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
69 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
70 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
71 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
72 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
73 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
74 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
75 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
76 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
77 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
78 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
79 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
80 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
81 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
82 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
83 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
84 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
85 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
86 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
87 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
88 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
89 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
90 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
91 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
92 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
93 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
94 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
95 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
96 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
97 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
98 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
99 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
100 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
103 static const u32 golden_settings_gc_9_0[] =
105 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
106 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
107 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
108 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
109 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
110 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
111 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
112 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
113 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
114 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
115 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
116 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
117 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
118 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
119 SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
120 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
121 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
122 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
123 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
124 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
125 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
128 static const u32 golden_settings_gc_9_0_vg10[] =
130 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
131 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
132 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
133 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
134 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
135 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
136 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
139 static const u32 golden_settings_gc_9_1[] =
141 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
142 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
143 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
144 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
145 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
146 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
147 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
148 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
149 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
150 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
151 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
152 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
153 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
154 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
155 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
156 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
157 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
158 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
159 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
160 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
161 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
164 static const u32 golden_settings_gc_9_1_rv1[] =
166 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
167 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
168 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
169 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
170 SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
171 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
172 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
175 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
176 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
178 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
179 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
180 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
181 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
182 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
183 struct amdgpu_cu_info *cu_info);
184 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
185 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
186 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
188 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
190 switch (adev->asic_type) {
192 amdgpu_program_register_sequence(adev,
193 golden_settings_gc_9_0,
194 (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
195 amdgpu_program_register_sequence(adev,
196 golden_settings_gc_9_0_vg10,
197 (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
200 amdgpu_program_register_sequence(adev,
201 golden_settings_gc_9_1,
202 (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
203 amdgpu_program_register_sequence(adev,
204 golden_settings_gc_9_1_rv1,
205 (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
212 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
214 adev->gfx.scratch.num_reg = 7;
215 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
216 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
219 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
220 bool wc, uint32_t reg, uint32_t val)
222 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
223 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
224 WRITE_DATA_DST_SEL(0) |
225 (wc ? WR_CONFIRM : 0));
226 amdgpu_ring_write(ring, reg);
227 amdgpu_ring_write(ring, 0);
228 amdgpu_ring_write(ring, val);
231 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
232 int mem_space, int opt, uint32_t addr0,
233 uint32_t addr1, uint32_t ref, uint32_t mask,
236 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
237 amdgpu_ring_write(ring,
238 /* memory (1) or register (0) */
239 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
240 WAIT_REG_MEM_OPERATION(opt) | /* wait */
241 WAIT_REG_MEM_FUNCTION(3) | /* equal */
242 WAIT_REG_MEM_ENGINE(eng_sel)));
245 BUG_ON(addr0 & 0x3); /* Dword align */
246 amdgpu_ring_write(ring, addr0);
247 amdgpu_ring_write(ring, addr1);
248 amdgpu_ring_write(ring, ref);
249 amdgpu_ring_write(ring, mask);
250 amdgpu_ring_write(ring, inv); /* poll interval */
253 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
255 struct amdgpu_device *adev = ring->adev;
261 r = amdgpu_gfx_scratch_get(adev, &scratch);
263 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
266 WREG32(scratch, 0xCAFEDEAD);
267 r = amdgpu_ring_alloc(ring, 3);
269 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
271 amdgpu_gfx_scratch_free(adev, scratch);
274 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
275 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
276 amdgpu_ring_write(ring, 0xDEADBEEF);
277 amdgpu_ring_commit(ring);
279 for (i = 0; i < adev->usec_timeout; i++) {
280 tmp = RREG32(scratch);
281 if (tmp == 0xDEADBEEF)
285 if (i < adev->usec_timeout) {
286 DRM_INFO("ring test on %d succeeded in %d usecs\n",
289 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
290 ring->idx, scratch, tmp);
293 amdgpu_gfx_scratch_free(adev, scratch);
297 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
299 struct amdgpu_device *adev = ring->adev;
301 struct dma_fence *f = NULL;
306 r = amdgpu_gfx_scratch_get(adev, &scratch);
308 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
311 WREG32(scratch, 0xCAFEDEAD);
312 memset(&ib, 0, sizeof(ib));
313 r = amdgpu_ib_get(adev, NULL, 256, &ib);
315 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
318 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
319 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
320 ib.ptr[2] = 0xDEADBEEF;
323 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
327 r = dma_fence_wait_timeout(f, false, timeout);
329 DRM_ERROR("amdgpu: IB test timed out.\n");
333 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
336 tmp = RREG32(scratch);
337 if (tmp == 0xDEADBEEF) {
338 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
341 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
346 amdgpu_ib_free(adev, &ib, NULL);
349 amdgpu_gfx_scratch_free(adev, scratch);
353 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
355 const char *chip_name;
358 struct amdgpu_firmware_info *info = NULL;
359 const struct common_firmware_header *header = NULL;
360 const struct gfx_firmware_header_v1_0 *cp_hdr;
361 const struct rlc_firmware_header_v2_0 *rlc_hdr;
362 unsigned int *tmp = NULL;
367 switch (adev->asic_type) {
369 chip_name = "vega10";
378 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
379 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
382 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
385 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
386 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
387 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
389 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
390 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
393 err = amdgpu_ucode_validate(adev->gfx.me_fw);
396 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
397 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
398 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
400 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
401 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
404 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
407 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
408 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
409 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
411 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
412 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
415 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
416 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
417 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
418 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
419 adev->gfx.rlc.save_and_restore_offset =
420 le32_to_cpu(rlc_hdr->save_and_restore_offset);
421 adev->gfx.rlc.clear_state_descriptor_offset =
422 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
423 adev->gfx.rlc.avail_scratch_ram_locations =
424 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
425 adev->gfx.rlc.reg_restore_list_size =
426 le32_to_cpu(rlc_hdr->reg_restore_list_size);
427 adev->gfx.rlc.reg_list_format_start =
428 le32_to_cpu(rlc_hdr->reg_list_format_start);
429 adev->gfx.rlc.reg_list_format_separate_start =
430 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
431 adev->gfx.rlc.starting_offsets_start =
432 le32_to_cpu(rlc_hdr->starting_offsets_start);
433 adev->gfx.rlc.reg_list_format_size_bytes =
434 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
435 adev->gfx.rlc.reg_list_size_bytes =
436 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
437 adev->gfx.rlc.register_list_format =
438 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
439 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
440 if (!adev->gfx.rlc.register_list_format) {
445 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
446 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
447 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
448 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
450 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
452 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
453 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
454 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
455 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
457 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
458 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
461 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
464 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
465 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
466 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
469 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
470 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
472 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
475 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
476 adev->gfx.mec2_fw->data;
477 adev->gfx.mec2_fw_version =
478 le32_to_cpu(cp_hdr->header.ucode_version);
479 adev->gfx.mec2_feature_version =
480 le32_to_cpu(cp_hdr->ucode_feature_version);
483 adev->gfx.mec2_fw = NULL;
486 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
487 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
488 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
489 info->fw = adev->gfx.pfp_fw;
490 header = (const struct common_firmware_header *)info->fw->data;
491 adev->firmware.fw_size +=
492 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
494 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
495 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
496 info->fw = adev->gfx.me_fw;
497 header = (const struct common_firmware_header *)info->fw->data;
498 adev->firmware.fw_size +=
499 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
501 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
502 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
503 info->fw = adev->gfx.ce_fw;
504 header = (const struct common_firmware_header *)info->fw->data;
505 adev->firmware.fw_size +=
506 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
508 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
509 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
510 info->fw = adev->gfx.rlc_fw;
511 header = (const struct common_firmware_header *)info->fw->data;
512 adev->firmware.fw_size +=
513 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
515 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
516 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
517 info->fw = adev->gfx.mec_fw;
518 header = (const struct common_firmware_header *)info->fw->data;
519 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
520 adev->firmware.fw_size +=
521 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
523 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
524 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
525 info->fw = adev->gfx.mec_fw;
526 adev->firmware.fw_size +=
527 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
529 if (adev->gfx.mec2_fw) {
530 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
531 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
532 info->fw = adev->gfx.mec2_fw;
533 header = (const struct common_firmware_header *)info->fw->data;
534 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
535 adev->firmware.fw_size +=
536 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
537 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
538 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
539 info->fw = adev->gfx.mec2_fw;
540 adev->firmware.fw_size +=
541 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
549 "gfx9: Failed to load firmware \"%s\"\n",
551 release_firmware(adev->gfx.pfp_fw);
552 adev->gfx.pfp_fw = NULL;
553 release_firmware(adev->gfx.me_fw);
554 adev->gfx.me_fw = NULL;
555 release_firmware(adev->gfx.ce_fw);
556 adev->gfx.ce_fw = NULL;
557 release_firmware(adev->gfx.rlc_fw);
558 adev->gfx.rlc_fw = NULL;
559 release_firmware(adev->gfx.mec_fw);
560 adev->gfx.mec_fw = NULL;
561 release_firmware(adev->gfx.mec2_fw);
562 adev->gfx.mec2_fw = NULL;
567 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
570 const struct cs_section_def *sect = NULL;
571 const struct cs_extent_def *ext = NULL;
573 /* begin clear state */
575 /* context control state */
578 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
579 for (ext = sect->section; ext->extent != NULL; ++ext) {
580 if (sect->id == SECT_CONTEXT)
581 count += 2 + ext->reg_count;
587 /* end clear state */
595 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
596 volatile u32 *buffer)
599 const struct cs_section_def *sect = NULL;
600 const struct cs_extent_def *ext = NULL;
602 if (adev->gfx.rlc.cs_data == NULL)
607 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
608 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
610 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
611 buffer[count++] = cpu_to_le32(0x80000000);
612 buffer[count++] = cpu_to_le32(0x80000000);
614 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
615 for (ext = sect->section; ext->extent != NULL; ++ext) {
616 if (sect->id == SECT_CONTEXT) {
618 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
619 buffer[count++] = cpu_to_le32(ext->reg_index -
620 PACKET3_SET_CONTEXT_REG_START);
621 for (i = 0; i < ext->reg_count; i++)
622 buffer[count++] = cpu_to_le32(ext->extent[i]);
629 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
630 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
632 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
633 buffer[count++] = cpu_to_le32(0);
636 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
640 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
641 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
642 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
643 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
644 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
646 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
647 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
649 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
650 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
652 mutex_lock(&adev->grbm_idx_mutex);
653 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
654 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
655 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
657 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
658 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
659 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
660 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
661 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
663 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
664 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
667 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
669 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
670 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
672 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
673 * but used for RLC_LB_CNTL configuration */
674 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
675 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
676 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
677 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
678 mutex_unlock(&adev->grbm_idx_mutex);
681 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
683 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
686 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
688 const __le32 *fw_data;
689 volatile u32 *dst_ptr;
690 int me, i, max_me = 5;
692 u32 table_offset, table_size;
694 /* write the cp table buffer */
695 dst_ptr = adev->gfx.rlc.cp_table_ptr;
696 for (me = 0; me < max_me; me++) {
698 const struct gfx_firmware_header_v1_0 *hdr =
699 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
700 fw_data = (const __le32 *)
701 (adev->gfx.ce_fw->data +
702 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
703 table_offset = le32_to_cpu(hdr->jt_offset);
704 table_size = le32_to_cpu(hdr->jt_size);
705 } else if (me == 1) {
706 const struct gfx_firmware_header_v1_0 *hdr =
707 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
708 fw_data = (const __le32 *)
709 (adev->gfx.pfp_fw->data +
710 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
711 table_offset = le32_to_cpu(hdr->jt_offset);
712 table_size = le32_to_cpu(hdr->jt_size);
713 } else if (me == 2) {
714 const struct gfx_firmware_header_v1_0 *hdr =
715 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
716 fw_data = (const __le32 *)
717 (adev->gfx.me_fw->data +
718 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
719 table_offset = le32_to_cpu(hdr->jt_offset);
720 table_size = le32_to_cpu(hdr->jt_size);
721 } else if (me == 3) {
722 const struct gfx_firmware_header_v1_0 *hdr =
723 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
724 fw_data = (const __le32 *)
725 (adev->gfx.mec_fw->data +
726 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
727 table_offset = le32_to_cpu(hdr->jt_offset);
728 table_size = le32_to_cpu(hdr->jt_size);
729 } else if (me == 4) {
730 const struct gfx_firmware_header_v1_0 *hdr =
731 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
732 fw_data = (const __le32 *)
733 (adev->gfx.mec2_fw->data +
734 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
735 table_offset = le32_to_cpu(hdr->jt_offset);
736 table_size = le32_to_cpu(hdr->jt_size);
739 for (i = 0; i < table_size; i ++) {
740 dst_ptr[bo_offset + i] =
741 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
744 bo_offset += table_size;
748 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
750 /* clear state block */
751 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
752 &adev->gfx.rlc.clear_state_gpu_addr,
753 (void **)&adev->gfx.rlc.cs_ptr);
755 /* jump table block */
756 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
757 &adev->gfx.rlc.cp_table_gpu_addr,
758 (void **)&adev->gfx.rlc.cp_table_ptr);
761 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
763 volatile u32 *dst_ptr;
765 const struct cs_section_def *cs_data;
768 adev->gfx.rlc.cs_data = gfx9_cs_data;
770 cs_data = adev->gfx.rlc.cs_data;
773 /* clear state block */
774 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
775 if (adev->gfx.rlc.clear_state_obj == NULL) {
776 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
777 AMDGPU_GEM_DOMAIN_VRAM,
778 &adev->gfx.rlc.clear_state_obj,
779 &adev->gfx.rlc.clear_state_gpu_addr,
780 (void **)&adev->gfx.rlc.cs_ptr);
783 "(%d) failed to create rlc csb bo\n", r);
784 gfx_v9_0_rlc_fini(adev);
788 /* set up the cs buffer */
789 dst_ptr = adev->gfx.rlc.cs_ptr;
790 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
791 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
792 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
795 if (adev->asic_type == CHIP_RAVEN) {
796 /* TODO: double check the cp_table_size for RV */
797 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
798 if (adev->gfx.rlc.cp_table_obj == NULL) {
799 r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
800 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
801 &adev->gfx.rlc.cp_table_obj,
802 &adev->gfx.rlc.cp_table_gpu_addr,
803 (void **)&adev->gfx.rlc.cp_table_ptr);
806 "(%d) failed to create cp table bo\n", r);
807 gfx_v9_0_rlc_fini(adev);
812 rv_init_cp_jump_table(adev);
813 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
814 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
816 gfx_v9_0_init_lbpw(adev);
822 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
826 if (adev->gfx.mec.hpd_eop_obj) {
827 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
828 if (unlikely(r != 0))
829 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
830 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
831 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
833 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
834 adev->gfx.mec.hpd_eop_obj = NULL;
836 if (adev->gfx.mec.mec_fw_obj) {
837 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
838 if (unlikely(r != 0))
839 dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
840 amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
841 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
843 amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
844 adev->gfx.mec.mec_fw_obj = NULL;
848 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
852 const __le32 *fw_data;
857 const struct gfx_firmware_header_v1_0 *mec_hdr;
859 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
861 /* take ownership of the relevant compute queues */
862 amdgpu_gfx_compute_queue_acquire(adev);
863 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
865 if (adev->gfx.mec.hpd_eop_obj == NULL) {
866 r = amdgpu_bo_create(adev,
869 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
870 &adev->gfx.mec.hpd_eop_obj);
872 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
877 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
878 if (unlikely(r != 0)) {
879 gfx_v9_0_mec_fini(adev);
882 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
883 &adev->gfx.mec.hpd_eop_gpu_addr);
885 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
886 gfx_v9_0_mec_fini(adev);
889 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
891 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
892 gfx_v9_0_mec_fini(adev);
896 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
898 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
899 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
901 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
903 fw_data = (const __le32 *)
904 (adev->gfx.mec_fw->data +
905 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
906 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
908 if (adev->gfx.mec.mec_fw_obj == NULL) {
909 r = amdgpu_bo_create(adev,
910 mec_hdr->header.ucode_size_bytes,
912 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
913 &adev->gfx.mec.mec_fw_obj);
915 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
920 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
921 if (unlikely(r != 0)) {
922 gfx_v9_0_mec_fini(adev);
925 r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
926 &adev->gfx.mec.mec_fw_gpu_addr);
928 dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
929 gfx_v9_0_mec_fini(adev);
932 r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
934 dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
935 gfx_v9_0_mec_fini(adev);
938 memcpy(fw, fw_data, fw_size);
940 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
941 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
947 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
949 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
950 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
951 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
952 (address << SQ_IND_INDEX__INDEX__SHIFT) |
953 (SQ_IND_INDEX__FORCE_READ_MASK));
954 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
957 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
958 uint32_t wave, uint32_t thread,
959 uint32_t regno, uint32_t num, uint32_t *out)
961 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
962 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
963 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
964 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
965 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
966 (SQ_IND_INDEX__FORCE_READ_MASK) |
967 (SQ_IND_INDEX__AUTO_INCR_MASK));
969 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
972 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
974 /* type 1 wave data */
975 dst[(*no_fields)++] = 1;
976 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
977 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
978 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
979 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
980 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
981 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
982 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
983 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
984 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
985 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
986 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
987 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
988 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
989 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
992 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
993 uint32_t wave, uint32_t start,
994 uint32_t size, uint32_t *dst)
998 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1002 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1003 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1004 .select_se_sh = &gfx_v9_0_select_se_sh,
1005 .read_wave_data = &gfx_v9_0_read_wave_data,
1006 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1009 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1013 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1015 switch (adev->asic_type) {
1017 adev->gfx.config.max_hw_contexts = 8;
1018 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1019 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1020 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1021 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1022 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1025 adev->gfx.config.max_hw_contexts = 8;
1026 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1027 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1028 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1029 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1030 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1037 adev->gfx.config.gb_addr_config = gb_addr_config;
1039 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1041 adev->gfx.config.gb_addr_config,
1045 adev->gfx.config.max_tile_pipes =
1046 adev->gfx.config.gb_addr_config_fields.num_pipes;
1048 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1050 adev->gfx.config.gb_addr_config,
1053 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1055 adev->gfx.config.gb_addr_config,
1057 MAX_COMPRESSED_FRAGS);
1058 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1060 adev->gfx.config.gb_addr_config,
1063 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1065 adev->gfx.config.gb_addr_config,
1067 NUM_SHADER_ENGINES);
1068 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1070 adev->gfx.config.gb_addr_config,
1072 PIPE_INTERLEAVE_SIZE));
1075 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1076 struct amdgpu_ngg_buf *ngg_buf,
1078 int default_size_se)
1083 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1086 size_se = size_se ? size_se : default_size_se;
1088 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1089 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1090 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1095 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1098 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1103 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1107 for (i = 0; i < NGG_BUF_MAX; i++)
1108 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1109 &adev->gfx.ngg.buf[i].gpu_addr,
1112 memset(&adev->gfx.ngg.buf[0], 0,
1113 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1115 adev->gfx.ngg.init = false;
1120 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1124 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1127 /* GDS reserve memory: 64 bytes alignment */
1128 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1129 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1130 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1131 adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
1132 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
1134 /* Primitive Buffer */
1135 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1136 amdgpu_prim_buf_per_se,
1139 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1143 /* Position Buffer */
1144 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1145 amdgpu_pos_buf_per_se,
1148 dev_err(adev->dev, "Failed to create Position Buffer\n");
1152 /* Control Sideband */
1153 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1154 amdgpu_cntl_sb_buf_per_se,
1157 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1161 /* Parameter Cache, not created by default */
1162 if (amdgpu_param_buf_per_se <= 0)
1165 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1166 amdgpu_param_buf_per_se,
1169 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1174 adev->gfx.ngg.init = true;
1177 gfx_v9_0_ngg_fini(adev);
1181 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1183 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1192 /* Program buffer size */
1194 size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
1195 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
1197 size = adev->gfx.ngg.buf[NGG_POS].size / 256;
1198 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
1200 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1203 size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
1204 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
1206 size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
1207 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
1209 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1211 /* Program buffer base address */
1212 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1213 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1214 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1216 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1217 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1218 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1220 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1221 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1222 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1224 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1225 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1226 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1228 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1229 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1230 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1232 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1233 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1234 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1236 /* Clear GDS reserved memory */
1237 r = amdgpu_ring_alloc(ring, 17);
1239 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1244 gfx_v9_0_write_data_to_reg(ring, 0, false,
1245 amdgpu_gds_reg_offset[0].mem_size,
1246 (adev->gds.mem.total_size +
1247 adev->gfx.ngg.gds_reserve_size) >>
1250 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1251 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1252 PACKET3_DMA_DATA_SRC_SEL(2)));
1253 amdgpu_ring_write(ring, 0);
1254 amdgpu_ring_write(ring, 0);
1255 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1256 amdgpu_ring_write(ring, 0);
1257 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1260 gfx_v9_0_write_data_to_reg(ring, 0, false,
1261 amdgpu_gds_reg_offset[0].mem_size, 0);
1263 amdgpu_ring_commit(ring);
1268 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1269 int mec, int pipe, int queue)
1273 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1275 ring = &adev->gfx.compute_ring[ring_id];
1280 ring->queue = queue;
1282 ring->ring_obj = NULL;
1283 ring->use_doorbell = true;
1284 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1285 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1286 + (ring_id * GFX9_MEC_HPD_SIZE);
1287 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1289 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1290 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1293 /* type-2 packets are deprecated on MEC, use type-3 instead */
1294 r = amdgpu_ring_init(adev, ring, 1024,
1295 &adev->gfx.eop_irq, irq_type);
1303 static int gfx_v9_0_sw_init(void *handle)
1305 int i, j, k, r, ring_id;
1306 struct amdgpu_ring *ring;
1307 struct amdgpu_kiq *kiq;
1308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1310 switch (adev->asic_type) {
1313 adev->gfx.mec.num_mec = 2;
1316 adev->gfx.mec.num_mec = 1;
1320 adev->gfx.mec.num_pipe_per_mec = 4;
1321 adev->gfx.mec.num_queue_per_pipe = 8;
1324 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1329 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1333 /* Privileged reg */
1334 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1335 &adev->gfx.priv_reg_irq);
1339 /* Privileged inst */
1340 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1341 &adev->gfx.priv_inst_irq);
1345 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1347 gfx_v9_0_scratch_init(adev);
1349 r = gfx_v9_0_init_microcode(adev);
1351 DRM_ERROR("Failed to load gfx firmware!\n");
1355 r = gfx_v9_0_rlc_init(adev);
1357 DRM_ERROR("Failed to init rlc BOs!\n");
1361 r = gfx_v9_0_mec_init(adev);
1363 DRM_ERROR("Failed to init MEC BOs!\n");
1367 /* set up the gfx ring */
1368 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1369 ring = &adev->gfx.gfx_ring[i];
1370 ring->ring_obj = NULL;
1371 sprintf(ring->name, "gfx");
1372 ring->use_doorbell = true;
1373 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1374 r = amdgpu_ring_init(adev, ring, 1024,
1375 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1380 /* set up the compute queues - allocate horizontally across pipes */
1382 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1383 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1384 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1385 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1388 r = gfx_v9_0_compute_ring_init(adev,
1399 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1401 DRM_ERROR("Failed to init KIQ BOs!\n");
1405 kiq = &adev->gfx.kiq;
1406 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1410 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1411 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
1415 /* reserve GDS, GWS and OA resource for gfx */
1416 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1417 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1418 &adev->gds.gds_gfx_bo, NULL, NULL);
1422 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1423 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1424 &adev->gds.gws_gfx_bo, NULL, NULL);
1428 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1429 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1430 &adev->gds.oa_gfx_bo, NULL, NULL);
1434 adev->gfx.ce_ram_size = 0x8000;
1436 gfx_v9_0_gpu_early_init(adev);
1438 r = gfx_v9_0_ngg_init(adev);
1446 static int gfx_v9_0_sw_fini(void *handle)
1449 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1451 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1452 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1453 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1455 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1456 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1457 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1458 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1460 amdgpu_gfx_compute_mqd_sw_fini(adev);
1461 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1462 amdgpu_gfx_kiq_fini(adev);
1464 gfx_v9_0_mec_fini(adev);
1465 gfx_v9_0_ngg_fini(adev);
1471 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1476 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1478 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1480 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
1481 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1482 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1483 } else if (se_num == 0xffffffff) {
1484 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1485 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1486 } else if (sh_num == 0xffffffff) {
1487 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1490 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1493 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1496 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1500 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1501 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1503 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1504 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1506 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1507 adev->gfx.config.max_sh_per_se);
1509 return (~data) & mask;
1512 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1517 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1518 adev->gfx.config.max_sh_per_se;
1520 mutex_lock(&adev->grbm_idx_mutex);
1521 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1522 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1523 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1524 data = gfx_v9_0_get_rb_active_bitmap(adev);
1525 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1526 rb_bitmap_width_per_sh);
1529 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1530 mutex_unlock(&adev->grbm_idx_mutex);
1532 adev->gfx.config.backend_enable_mask = active_rbs;
1533 adev->gfx.config.num_rbs = hweight32(active_rbs);
1536 #define DEFAULT_SH_MEM_BASES (0x6000)
1537 #define FIRST_COMPUTE_VMID (8)
1538 #define LAST_COMPUTE_VMID (16)
1539 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1542 uint32_t sh_mem_config;
1543 uint32_t sh_mem_bases;
1546 * Configure apertures:
1547 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1548 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1549 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1551 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1553 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1554 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1555 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1557 mutex_lock(&adev->srbm_mutex);
1558 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1559 soc15_grbm_select(adev, 0, 0, 0, i);
1560 /* CP and shaders */
1561 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1562 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1564 soc15_grbm_select(adev, 0, 0, 0, 0);
1565 mutex_unlock(&adev->srbm_mutex);
1568 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1573 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1575 gfx_v9_0_tiling_mode_table_init(adev);
1577 gfx_v9_0_setup_rb(adev);
1578 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1580 /* XXX SH_MEM regs */
1581 /* where to put LDS, scratch, GPUVM in FSA64 space */
1582 mutex_lock(&adev->srbm_mutex);
1583 for (i = 0; i < 16; i++) {
1584 soc15_grbm_select(adev, 0, 0, 0, i);
1585 /* CP and shaders */
1587 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1588 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1589 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1590 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1592 soc15_grbm_select(adev, 0, 0, 0, 0);
1594 mutex_unlock(&adev->srbm_mutex);
1596 gfx_v9_0_init_compute_vmid(adev);
1598 mutex_lock(&adev->grbm_idx_mutex);
1600 * making sure that the following register writes will be broadcasted
1601 * to all the shaders
1603 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1605 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1606 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1607 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1608 (adev->gfx.config.sc_prim_fifo_size_backend <<
1609 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1610 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1611 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1612 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1613 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1614 mutex_unlock(&adev->grbm_idx_mutex);
1618 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1623 mutex_lock(&adev->grbm_idx_mutex);
1624 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1625 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1626 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1627 for (k = 0; k < adev->usec_timeout; k++) {
1628 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1634 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1635 mutex_unlock(&adev->grbm_idx_mutex);
1637 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1638 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1639 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1640 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1641 for (k = 0; k < adev->usec_timeout; k++) {
1642 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1648 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1651 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1653 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1654 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1655 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1656 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1658 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1661 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1664 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1665 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1666 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1667 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1668 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1669 adev->gfx.rlc.clear_state_size);
1672 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1673 int indirect_offset,
1675 int *unique_indirect_regs,
1676 int *unique_indirect_reg_count,
1677 int max_indirect_reg_count,
1678 int *indirect_start_offsets,
1679 int *indirect_start_offsets_count,
1680 int max_indirect_start_offsets_count)
1683 bool new_entry = true;
1685 for (; indirect_offset < list_size; indirect_offset++) {
1689 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1690 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1691 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1694 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1699 indirect_offset += 2;
1701 /* look for the matching indice */
1702 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1703 if (unique_indirect_regs[idx] ==
1704 register_list_format[indirect_offset])
1708 if (idx >= *unique_indirect_reg_count) {
1709 unique_indirect_regs[*unique_indirect_reg_count] =
1710 register_list_format[indirect_offset];
1711 idx = *unique_indirect_reg_count;
1712 *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1713 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1716 register_list_format[indirect_offset] = idx;
1720 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1722 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1723 int unique_indirect_reg_count = 0;
1725 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1726 int indirect_start_offsets_count = 0;
1732 u32 *register_list_format =
1733 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1734 if (!register_list_format)
1736 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1737 adev->gfx.rlc.reg_list_format_size_bytes);
1739 /* setup unique_indirect_regs array and indirect_start_offsets array */
1740 gfx_v9_0_parse_ind_reg_list(register_list_format,
1741 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1742 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1743 unique_indirect_regs,
1744 &unique_indirect_reg_count,
1745 sizeof(unique_indirect_regs)/sizeof(int),
1746 indirect_start_offsets,
1747 &indirect_start_offsets_count,
1748 sizeof(indirect_start_offsets)/sizeof(int));
1750 /* enable auto inc in case it is disabled */
1751 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1752 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1753 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1755 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1756 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1757 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1758 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1759 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1760 adev->gfx.rlc.register_restore[i]);
1762 /* load direct register */
1763 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1764 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1765 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1766 adev->gfx.rlc.register_restore[i]);
1768 /* load indirect register */
1769 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1770 adev->gfx.rlc.reg_list_format_start);
1771 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1772 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1773 register_list_format[i]);
1775 /* set save/restore list size */
1776 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1777 list_size = list_size >> 1;
1778 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1779 adev->gfx.rlc.reg_restore_list_size);
1780 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1782 /* write the starting offsets to RLC scratch ram */
1783 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1784 adev->gfx.rlc.starting_offsets_start);
1785 for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
1786 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1787 indirect_start_offsets[i]);
1789 /* load unique indirect regs*/
1790 for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
1791 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1792 unique_indirect_regs[i] & 0x3FFFF);
1793 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1794 unique_indirect_regs[i] >> 20);
1797 kfree(register_list_format);
1801 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1805 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1806 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1807 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1810 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1814 uint32_t default_data = 0;
1816 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1817 if (enable == true) {
1818 /* enable GFXIP control over CGPG */
1819 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1820 if(default_data != data)
1821 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1824 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1825 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1826 if(default_data != data)
1827 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1829 /* restore GFXIP control over GCPG */
1830 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1831 if(default_data != data)
1832 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1836 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1840 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1841 AMD_PG_SUPPORT_GFX_SMG |
1842 AMD_PG_SUPPORT_GFX_DMG)) {
1843 /* init IDLE_POLL_COUNT = 60 */
1844 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1845 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1846 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1847 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1849 /* init RLC PG Delay */
1851 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1852 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1853 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1854 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1855 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1857 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1858 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1859 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1860 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1862 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1863 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1864 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1865 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1867 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1868 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1870 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1871 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1872 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1874 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1878 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1882 uint32_t default_data = 0;
1884 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1886 if (enable == true) {
1887 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1888 if (default_data != data)
1889 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1891 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1892 if(default_data != data)
1893 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1897 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1901 uint32_t default_data = 0;
1903 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1905 if (enable == true) {
1906 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1907 if(default_data != data)
1908 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1910 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1911 if(default_data != data)
1912 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1916 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1920 uint32_t default_data = 0;
1922 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1924 if (enable == true) {
1925 data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1926 if(default_data != data)
1927 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1929 data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1930 if(default_data != data)
1931 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1935 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1938 uint32_t data, default_data;
1940 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1942 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1944 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1945 if(default_data != data)
1946 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1949 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1952 uint32_t data, default_data;
1954 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1956 data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1958 data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1959 if(default_data != data)
1960 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1963 /* read any GFX register to wake up GFX */
1964 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
1967 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
1970 uint32_t data, default_data;
1972 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1974 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1976 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
1977 if(default_data != data)
1978 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1981 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
1984 uint32_t data, default_data;
1986 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1988 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1990 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
1991 if(default_data != data)
1992 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1995 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
1997 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1998 AMD_PG_SUPPORT_GFX_SMG |
1999 AMD_PG_SUPPORT_GFX_DMG |
2001 AMD_PG_SUPPORT_GDS |
2002 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2003 gfx_v9_0_init_csb(adev);
2004 gfx_v9_0_init_rlc_save_restore_list(adev);
2005 gfx_v9_0_enable_save_restore_machine(adev);
2007 if (adev->asic_type == CHIP_RAVEN) {
2008 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2009 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2010 gfx_v9_0_init_gfx_power_gating(adev);
2012 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
2013 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
2014 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
2016 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
2017 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
2020 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
2021 gfx_v9_0_enable_cp_power_gating(adev, true);
2023 gfx_v9_0_enable_cp_power_gating(adev, false);
2028 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2030 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2032 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2033 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
2035 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2037 gfx_v9_0_wait_for_rlc_serdes(adev);
2040 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2042 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2044 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2048 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2050 #ifdef AMDGPU_RLC_DEBUG_RETRY
2054 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2056 /* carrizo do enable cp interrupt after cp inited */
2057 if (!(adev->flags & AMD_IS_APU))
2058 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2062 #ifdef AMDGPU_RLC_DEBUG_RETRY
2063 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2064 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2065 if(rlc_ucode_ver == 0x108) {
2066 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2067 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2068 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2069 * default is 0x9C4 to create a 100us interval */
2070 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2071 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2072 * to disable the page fault retry interrupts, default is
2074 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2079 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2081 const struct rlc_firmware_header_v2_0 *hdr;
2082 const __le32 *fw_data;
2083 unsigned i, fw_size;
2085 if (!adev->gfx.rlc_fw)
2088 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2089 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2091 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2092 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2093 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2095 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2096 RLCG_UCODE_LOADING_START_ADDRESS);
2097 for (i = 0; i < fw_size; i++)
2098 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2099 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2104 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2108 if (amdgpu_sriov_vf(adev))
2111 gfx_v9_0_rlc_stop(adev);
2114 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2117 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2119 gfx_v9_0_rlc_reset(adev);
2121 gfx_v9_0_init_pg(adev);
2123 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2124 /* legacy rlc firmware loading */
2125 r = gfx_v9_0_rlc_load_microcode(adev);
2130 if (adev->asic_type == CHIP_RAVEN) {
2131 if (amdgpu_lbpw != 0)
2132 gfx_v9_0_enable_lbpw(adev, true);
2134 gfx_v9_0_enable_lbpw(adev, false);
2137 gfx_v9_0_rlc_start(adev);
2142 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2145 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2147 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2148 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2149 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2151 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2152 adev->gfx.gfx_ring[i].ready = false;
2154 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2158 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2160 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2161 const struct gfx_firmware_header_v1_0 *ce_hdr;
2162 const struct gfx_firmware_header_v1_0 *me_hdr;
2163 const __le32 *fw_data;
2164 unsigned i, fw_size;
2166 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2169 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2170 adev->gfx.pfp_fw->data;
2171 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2172 adev->gfx.ce_fw->data;
2173 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2174 adev->gfx.me_fw->data;
2176 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2177 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2178 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2180 gfx_v9_0_cp_gfx_enable(adev, false);
2183 fw_data = (const __le32 *)
2184 (adev->gfx.pfp_fw->data +
2185 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2186 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2187 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2188 for (i = 0; i < fw_size; i++)
2189 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2190 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2193 fw_data = (const __le32 *)
2194 (adev->gfx.ce_fw->data +
2195 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2196 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2197 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2198 for (i = 0; i < fw_size; i++)
2199 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2200 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2203 fw_data = (const __le32 *)
2204 (adev->gfx.me_fw->data +
2205 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2206 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2207 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2208 for (i = 0; i < fw_size; i++)
2209 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2210 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2215 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2217 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2218 const struct cs_section_def *sect = NULL;
2219 const struct cs_extent_def *ext = NULL;
2223 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2224 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2226 gfx_v9_0_cp_gfx_enable(adev, true);
2228 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
2230 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2234 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2235 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2237 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2238 amdgpu_ring_write(ring, 0x80000000);
2239 amdgpu_ring_write(ring, 0x80000000);
2241 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2242 for (ext = sect->section; ext->extent != NULL; ++ext) {
2243 if (sect->id == SECT_CONTEXT) {
2244 amdgpu_ring_write(ring,
2245 PACKET3(PACKET3_SET_CONTEXT_REG,
2247 amdgpu_ring_write(ring,
2248 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2249 for (i = 0; i < ext->reg_count; i++)
2250 amdgpu_ring_write(ring, ext->extent[i]);
2255 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2256 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2258 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2259 amdgpu_ring_write(ring, 0);
2261 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2262 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2263 amdgpu_ring_write(ring, 0x8000);
2264 amdgpu_ring_write(ring, 0x8000);
2266 amdgpu_ring_commit(ring);
2271 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2273 struct amdgpu_ring *ring;
2276 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2278 /* Set the write pointer delay */
2279 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2281 /* set the RB to use vmid 0 */
2282 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2284 /* Set ring buffer size */
2285 ring = &adev->gfx.gfx_ring[0];
2286 rb_bufsz = order_base_2(ring->ring_size / 8);
2287 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2288 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2290 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2292 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2294 /* Initialize the ring buffer's write pointers */
2296 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2297 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2299 /* set the wb address wether it's enabled or not */
2300 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2301 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2302 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2304 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2305 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2306 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2309 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2311 rb_addr = ring->gpu_addr >> 8;
2312 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2313 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2315 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2316 if (ring->use_doorbell) {
2317 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2318 DOORBELL_OFFSET, ring->doorbell_index);
2319 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2322 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2324 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2326 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2327 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2328 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2330 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2331 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2334 /* start the ring */
2335 gfx_v9_0_cp_gfx_start(adev);
2341 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2346 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2348 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2349 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2350 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2351 adev->gfx.compute_ring[i].ready = false;
2352 adev->gfx.kiq.ring.ready = false;
2357 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2359 const struct gfx_firmware_header_v1_0 *mec_hdr;
2360 const __le32 *fw_data;
2364 if (!adev->gfx.mec_fw)
2367 gfx_v9_0_cp_compute_enable(adev, false);
2369 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2370 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2372 fw_data = (const __le32 *)
2373 (adev->gfx.mec_fw->data +
2374 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2376 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2377 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2378 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2380 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2381 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2382 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2383 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2386 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2387 mec_hdr->jt_offset);
2388 for (i = 0; i < mec_hdr->jt_size; i++)
2389 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2390 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2392 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2393 adev->gfx.mec_fw_version);
2394 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2400 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2403 struct amdgpu_device *adev = ring->adev;
2405 /* tell RLC which is KIQ queue */
2406 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2408 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2409 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2411 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2414 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2416 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2417 uint32_t scratch, tmp = 0;
2418 uint64_t queue_mask = 0;
2421 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2422 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2425 /* This situation may be hit in the future if a new HW
2426 * generation exposes more than 64 queues. If so, the
2427 * definition of queue_mask needs updating */
2428 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
2429 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2433 queue_mask |= (1ull << i);
2436 r = amdgpu_gfx_scratch_get(adev, &scratch);
2438 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2441 WREG32(scratch, 0xCAFEDEAD);
2443 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2445 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2446 amdgpu_gfx_scratch_free(adev, scratch);
2451 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2452 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2453 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2454 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2455 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2456 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2457 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2458 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2459 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2460 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2461 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2462 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2463 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2465 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2466 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2467 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2468 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2469 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2470 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2471 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2472 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2473 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2474 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2475 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2476 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2477 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2478 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2479 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2480 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2481 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2483 /* write to scratch for completion */
2484 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2485 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2486 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2487 amdgpu_ring_commit(kiq_ring);
2489 for (i = 0; i < adev->usec_timeout; i++) {
2490 tmp = RREG32(scratch);
2491 if (tmp == 0xDEADBEEF)
2495 if (i >= adev->usec_timeout) {
2496 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2500 amdgpu_gfx_scratch_free(adev, scratch);
2505 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2507 struct amdgpu_device *adev = ring->adev;
2508 struct v9_mqd *mqd = ring->mqd_ptr;
2509 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2512 mqd->header = 0xC0310800;
2513 mqd->compute_pipelinestat_enable = 0x00000001;
2514 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2515 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2516 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2517 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2518 mqd->compute_misc_reserved = 0x00000003;
2520 eop_base_addr = ring->eop_gpu_addr >> 8;
2521 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2522 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2524 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2525 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2526 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2527 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2529 mqd->cp_hqd_eop_control = tmp;
2531 /* enable doorbell? */
2532 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2534 if (ring->use_doorbell) {
2535 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2536 DOORBELL_OFFSET, ring->doorbell_index);
2537 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2539 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2540 DOORBELL_SOURCE, 0);
2541 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2545 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2548 mqd->cp_hqd_pq_doorbell_control = tmp;
2550 /* disable the queue if it's active */
2552 mqd->cp_hqd_dequeue_request = 0;
2553 mqd->cp_hqd_pq_rptr = 0;
2554 mqd->cp_hqd_pq_wptr_lo = 0;
2555 mqd->cp_hqd_pq_wptr_hi = 0;
2557 /* set the pointer to the MQD */
2558 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2559 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2561 /* set MQD vmid to 0 */
2562 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2563 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2564 mqd->cp_mqd_control = tmp;
2566 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2567 hqd_gpu_addr = ring->gpu_addr >> 8;
2568 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2569 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2571 /* set up the HQD, this is similar to CP_RB0_CNTL */
2572 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2573 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2574 (order_base_2(ring->ring_size / 4) - 1));
2575 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2576 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2578 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2580 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2581 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2582 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2583 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2584 mqd->cp_hqd_pq_control = tmp;
2586 /* set the wb address whether it's enabled or not */
2587 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2588 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2589 mqd->cp_hqd_pq_rptr_report_addr_hi =
2590 upper_32_bits(wb_gpu_addr) & 0xffff;
2592 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2593 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2594 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2595 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2598 /* enable the doorbell if requested */
2599 if (ring->use_doorbell) {
2600 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2601 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2602 DOORBELL_OFFSET, ring->doorbell_index);
2604 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2606 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2607 DOORBELL_SOURCE, 0);
2608 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2612 mqd->cp_hqd_pq_doorbell_control = tmp;
2614 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2616 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2618 /* set the vmid for the queue */
2619 mqd->cp_hqd_vmid = 0;
2621 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2622 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2623 mqd->cp_hqd_persistent_state = tmp;
2625 /* set MIN_IB_AVAIL_SIZE */
2626 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2627 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2628 mqd->cp_hqd_ib_control = tmp;
2630 /* activate the queue */
2631 mqd->cp_hqd_active = 1;
2636 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2638 struct amdgpu_device *adev = ring->adev;
2639 struct v9_mqd *mqd = ring->mqd_ptr;
2642 /* disable wptr polling */
2643 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2645 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2646 mqd->cp_hqd_eop_base_addr_lo);
2647 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2648 mqd->cp_hqd_eop_base_addr_hi);
2650 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2651 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2652 mqd->cp_hqd_eop_control);
2654 /* enable doorbell? */
2655 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2656 mqd->cp_hqd_pq_doorbell_control);
2658 /* disable the queue if it's active */
2659 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2660 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2661 for (j = 0; j < adev->usec_timeout; j++) {
2662 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2666 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2667 mqd->cp_hqd_dequeue_request);
2668 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2669 mqd->cp_hqd_pq_rptr);
2670 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2671 mqd->cp_hqd_pq_wptr_lo);
2672 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2673 mqd->cp_hqd_pq_wptr_hi);
2676 /* set the pointer to the MQD */
2677 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2678 mqd->cp_mqd_base_addr_lo);
2679 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2680 mqd->cp_mqd_base_addr_hi);
2682 /* set MQD vmid to 0 */
2683 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2684 mqd->cp_mqd_control);
2686 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2687 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2688 mqd->cp_hqd_pq_base_lo);
2689 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2690 mqd->cp_hqd_pq_base_hi);
2692 /* set up the HQD, this is similar to CP_RB0_CNTL */
2693 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2694 mqd->cp_hqd_pq_control);
2696 /* set the wb address whether it's enabled or not */
2697 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2698 mqd->cp_hqd_pq_rptr_report_addr_lo);
2699 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2700 mqd->cp_hqd_pq_rptr_report_addr_hi);
2702 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2703 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2704 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2705 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2706 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2708 /* enable the doorbell if requested */
2709 if (ring->use_doorbell) {
2710 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2711 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2712 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2713 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2716 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2717 mqd->cp_hqd_pq_doorbell_control);
2719 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2720 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2721 mqd->cp_hqd_pq_wptr_lo);
2722 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2723 mqd->cp_hqd_pq_wptr_hi);
2725 /* set the vmid for the queue */
2726 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2728 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2729 mqd->cp_hqd_persistent_state);
2731 /* activate the queue */
2732 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2733 mqd->cp_hqd_active);
2735 if (ring->use_doorbell)
2736 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2741 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2743 struct amdgpu_device *adev = ring->adev;
2744 struct v9_mqd *mqd = ring->mqd_ptr;
2745 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2747 gfx_v9_0_kiq_setting(ring);
2749 if (adev->gfx.in_reset) { /* for GPU_RESET case */
2750 /* reset MQD to a clean status */
2751 if (adev->gfx.mec.mqd_backup[mqd_idx])
2752 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2754 /* reset ring buffer */
2756 amdgpu_ring_clear_ring(ring);
2758 mutex_lock(&adev->srbm_mutex);
2759 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2760 gfx_v9_0_kiq_init_register(ring);
2761 soc15_grbm_select(adev, 0, 0, 0, 0);
2762 mutex_unlock(&adev->srbm_mutex);
2764 memset((void *)mqd, 0, sizeof(*mqd));
2765 mutex_lock(&adev->srbm_mutex);
2766 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2767 gfx_v9_0_mqd_init(ring);
2768 gfx_v9_0_kiq_init_register(ring);
2769 soc15_grbm_select(adev, 0, 0, 0, 0);
2770 mutex_unlock(&adev->srbm_mutex);
2772 if (adev->gfx.mec.mqd_backup[mqd_idx])
2773 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2779 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2781 struct amdgpu_device *adev = ring->adev;
2782 struct v9_mqd *mqd = ring->mqd_ptr;
2783 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2785 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
2786 memset((void *)mqd, 0, sizeof(*mqd));
2787 mutex_lock(&adev->srbm_mutex);
2788 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2789 gfx_v9_0_mqd_init(ring);
2790 soc15_grbm_select(adev, 0, 0, 0, 0);
2791 mutex_unlock(&adev->srbm_mutex);
2793 if (adev->gfx.mec.mqd_backup[mqd_idx])
2794 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2795 } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
2796 /* reset MQD to a clean status */
2797 if (adev->gfx.mec.mqd_backup[mqd_idx])
2798 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2800 /* reset ring buffer */
2802 amdgpu_ring_clear_ring(ring);
2804 amdgpu_ring_clear_ring(ring);
2810 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2812 struct amdgpu_ring *ring = NULL;
2815 gfx_v9_0_cp_compute_enable(adev, true);
2817 ring = &adev->gfx.kiq.ring;
2819 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2820 if (unlikely(r != 0))
2823 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2825 r = gfx_v9_0_kiq_init_queue(ring);
2826 amdgpu_bo_kunmap(ring->mqd_obj);
2827 ring->mqd_ptr = NULL;
2829 amdgpu_bo_unreserve(ring->mqd_obj);
2833 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2834 ring = &adev->gfx.compute_ring[i];
2836 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2837 if (unlikely(r != 0))
2839 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2841 r = gfx_v9_0_kcq_init_queue(ring);
2842 amdgpu_bo_kunmap(ring->mqd_obj);
2843 ring->mqd_ptr = NULL;
2845 amdgpu_bo_unreserve(ring->mqd_obj);
2850 r = gfx_v9_0_kiq_kcq_enable(adev);
2855 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2858 struct amdgpu_ring *ring;
2860 if (!(adev->flags & AMD_IS_APU))
2861 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2863 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2864 /* legacy firmware loading */
2865 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2869 r = gfx_v9_0_cp_compute_load_microcode(adev);
2874 r = gfx_v9_0_cp_gfx_resume(adev);
2878 r = gfx_v9_0_kiq_resume(adev);
2882 ring = &adev->gfx.gfx_ring[0];
2883 r = amdgpu_ring_test_ring(ring);
2885 ring->ready = false;
2889 ring = &adev->gfx.kiq.ring;
2891 r = amdgpu_ring_test_ring(ring);
2893 ring->ready = false;
2895 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2896 ring = &adev->gfx.compute_ring[i];
2899 r = amdgpu_ring_test_ring(ring);
2901 ring->ready = false;
2904 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2909 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2911 gfx_v9_0_cp_gfx_enable(adev, enable);
2912 gfx_v9_0_cp_compute_enable(adev, enable);
2915 static int gfx_v9_0_hw_init(void *handle)
2918 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2920 gfx_v9_0_init_golden_registers(adev);
2922 gfx_v9_0_gpu_init(adev);
2924 r = gfx_v9_0_rlc_resume(adev);
2928 r = gfx_v9_0_cp_resume(adev);
2932 r = gfx_v9_0_ngg_en(adev);
2939 static int gfx_v9_0_hw_fini(void *handle)
2941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2943 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2944 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2945 if (amdgpu_sriov_vf(adev)) {
2946 pr_debug("For SRIOV client, shouldn't do anything.\n");
2949 gfx_v9_0_cp_enable(adev, false);
2950 gfx_v9_0_rlc_stop(adev);
2955 static int gfx_v9_0_suspend(void *handle)
2957 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2959 adev->gfx.in_suspend = true;
2960 return gfx_v9_0_hw_fini(adev);
2963 static int gfx_v9_0_resume(void *handle)
2965 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2968 r = gfx_v9_0_hw_init(adev);
2969 adev->gfx.in_suspend = false;
2973 static bool gfx_v9_0_is_idle(void *handle)
2975 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2977 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
2978 GRBM_STATUS, GUI_ACTIVE))
2984 static int gfx_v9_0_wait_for_idle(void *handle)
2988 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2990 for (i = 0; i < adev->usec_timeout; i++) {
2991 /* read MC_STATUS */
2992 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
2993 GRBM_STATUS__GUI_ACTIVE_MASK;
2995 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3002 static int gfx_v9_0_soft_reset(void *handle)
3004 u32 grbm_soft_reset = 0;
3006 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3009 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3010 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3011 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3012 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3013 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3014 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3015 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3016 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3017 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3018 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3019 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3022 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3023 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3024 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3028 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3029 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3030 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3031 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3034 if (grbm_soft_reset) {
3036 gfx_v9_0_rlc_stop(adev);
3038 /* Disable GFX parsing/prefetching */
3039 gfx_v9_0_cp_gfx_enable(adev, false);
3041 /* Disable MEC parsing/prefetching */
3042 gfx_v9_0_cp_compute_enable(adev, false);
3044 if (grbm_soft_reset) {
3045 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3046 tmp |= grbm_soft_reset;
3047 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3048 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3049 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3053 tmp &= ~grbm_soft_reset;
3054 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3055 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3058 /* Wait a little for things to settle down */
3064 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3068 mutex_lock(&adev->gfx.gpu_clock_mutex);
3069 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3070 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3071 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3072 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3076 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3078 uint32_t gds_base, uint32_t gds_size,
3079 uint32_t gws_base, uint32_t gws_size,
3080 uint32_t oa_base, uint32_t oa_size)
3082 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3083 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3085 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3086 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3088 oa_base = oa_base >> AMDGPU_OA_SHIFT;
3089 oa_size = oa_size >> AMDGPU_OA_SHIFT;
3092 gfx_v9_0_write_data_to_reg(ring, 0, false,
3093 amdgpu_gds_reg_offset[vmid].mem_base,
3097 gfx_v9_0_write_data_to_reg(ring, 0, false,
3098 amdgpu_gds_reg_offset[vmid].mem_size,
3102 gfx_v9_0_write_data_to_reg(ring, 0, false,
3103 amdgpu_gds_reg_offset[vmid].gws,
3104 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3107 gfx_v9_0_write_data_to_reg(ring, 0, false,
3108 amdgpu_gds_reg_offset[vmid].oa,
3109 (1 << (oa_size + oa_base)) - (1 << oa_base));
3112 static int gfx_v9_0_early_init(void *handle)
3114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3116 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3117 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3118 gfx_v9_0_set_ring_funcs(adev);
3119 gfx_v9_0_set_irq_funcs(adev);
3120 gfx_v9_0_set_gds_init(adev);
3121 gfx_v9_0_set_rlc_funcs(adev);
3126 static int gfx_v9_0_late_init(void *handle)
3128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3131 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3135 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3142 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3144 uint32_t rlc_setting, data;
3147 if (adev->gfx.rlc.in_safe_mode)
3150 /* if RLC is not enabled, do nothing */
3151 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3152 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3155 if (adev->cg_flags &
3156 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3157 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3158 data = RLC_SAFE_MODE__CMD_MASK;
3159 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3160 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3162 /* wait for RLC_SAFE_MODE */
3163 for (i = 0; i < adev->usec_timeout; i++) {
3164 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3168 adev->gfx.rlc.in_safe_mode = true;
3172 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3174 uint32_t rlc_setting, data;
3176 if (!adev->gfx.rlc.in_safe_mode)
3179 /* if RLC is not enabled, do nothing */
3180 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3181 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3184 if (adev->cg_flags &
3185 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3187 * Try to exit safe mode only if it is already in safe
3190 data = RLC_SAFE_MODE__CMD_MASK;
3191 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3192 adev->gfx.rlc.in_safe_mode = false;
3196 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3199 /* TODO: double check if we need to perform under safe mdoe */
3200 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3202 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3203 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3204 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3205 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3207 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3208 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3211 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3214 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3217 /* TODO: double check if we need to perform under safe mode */
3218 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3220 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3221 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3223 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3225 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3226 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3228 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3230 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3233 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3238 /* It is disabled by HW by default */
3239 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3240 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3241 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3242 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3243 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3244 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3245 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3247 /* only for Vega10 & Raven1 */
3248 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3251 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3253 /* MGLS is a global flag to control all MGLS in GFX */
3254 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3255 /* 2 - RLC memory Light sleep */
3256 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3257 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3258 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3260 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3262 /* 3 - CP memory Light sleep */
3263 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3264 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3265 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3267 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3271 /* 1 - MGCG_OVERRIDE */
3272 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3273 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3274 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3275 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3276 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3277 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3279 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3281 /* 2 - disable MGLS in RLC */
3282 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3283 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3284 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3285 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3288 /* 3 - disable MGLS in CP */
3289 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3290 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3291 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3292 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3297 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3302 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3304 /* Enable 3D CGCG/CGLS */
3305 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3306 /* write cmd to clear cgcg/cgls ov */
3307 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3308 /* unset CGCG override */
3309 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3310 /* update CGCG and CGLS override bits */
3312 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3313 /* enable 3Dcgcg FSM(0x0020003f) */
3314 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3315 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3316 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3317 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3318 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3319 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3321 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3323 /* set IDLE_POLL_COUNT(0x00900100) */
3324 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3325 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3326 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3328 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3330 /* Disable CGCG/CGLS */
3331 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3332 /* disable cgcg, cgls should be disabled */
3333 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3334 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3335 /* disable cgcg and cgls in FSM */
3337 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3340 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3343 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3348 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3350 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3351 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3352 /* unset CGCG override */
3353 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3354 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3355 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3357 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3358 /* update CGCG and CGLS override bits */
3360 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3362 /* enable cgcg FSM(0x0020003F) */
3363 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3364 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3365 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3366 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3367 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3368 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3370 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3372 /* set IDLE_POLL_COUNT(0x00900100) */
3373 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3374 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3375 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3377 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3379 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3380 /* reset CGCG/CGLS bits */
3381 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3382 /* disable cgcg and cgls in FSM */
3384 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3387 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3390 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3394 /* CGCG/CGLS should be enabled after MGCG/MGLS
3395 * === MGCG + MGLS ===
3397 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3398 /* === CGCG /CGLS for GFX 3D Only === */
3399 gfx_v9_0_update_3d_clock_gating(adev, enable);
3400 /* === CGCG + CGLS === */
3401 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3403 /* CGCG/CGLS should be disabled before MGCG/MGLS
3404 * === CGCG + CGLS ===
3406 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3407 /* === CGCG /CGLS for GFX 3D Only === */
3408 gfx_v9_0_update_3d_clock_gating(adev, enable);
3409 /* === MGCG + MGLS === */
3410 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3415 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3416 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3417 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3420 static int gfx_v9_0_set_powergating_state(void *handle,
3421 enum amd_powergating_state state)
3423 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3424 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3426 switch (adev->asic_type) {
3428 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3429 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3430 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3432 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3433 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3436 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3437 gfx_v9_0_enable_cp_power_gating(adev, true);
3439 gfx_v9_0_enable_cp_power_gating(adev, false);
3441 /* update gfx cgpg state */
3442 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3444 /* update mgcg state */
3445 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3454 static int gfx_v9_0_set_clockgating_state(void *handle,
3455 enum amd_clockgating_state state)
3457 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3459 if (amdgpu_sriov_vf(adev))
3462 switch (adev->asic_type) {
3465 gfx_v9_0_update_gfx_clock_gating(adev,
3466 state == AMD_CG_STATE_GATE ? true : false);
3474 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3476 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3479 if (amdgpu_sriov_vf(adev))
3482 /* AMD_CG_SUPPORT_GFX_MGCG */
3483 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3484 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3485 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3487 /* AMD_CG_SUPPORT_GFX_CGCG */
3488 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3489 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3490 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3492 /* AMD_CG_SUPPORT_GFX_CGLS */
3493 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3494 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3496 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3497 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3498 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3499 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3501 /* AMD_CG_SUPPORT_GFX_CP_LS */
3502 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3503 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3504 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3506 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3507 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3508 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3509 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3511 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3512 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3513 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3516 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3518 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3521 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3523 struct amdgpu_device *adev = ring->adev;
3526 /* XXX check if swapping is necessary on BE */
3527 if (ring->use_doorbell) {
3528 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3530 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3531 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3537 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3539 struct amdgpu_device *adev = ring->adev;
3541 if (ring->use_doorbell) {
3542 /* XXX check if swapping is necessary on BE */
3543 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3544 WDOORBELL64(ring->doorbell_index, ring->wptr);
3546 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3547 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3551 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3553 u32 ref_and_mask, reg_mem_engine;
3554 struct nbio_hdp_flush_reg *nbio_hf_reg;
3556 if (ring->adev->asic_type == CHIP_VEGA10)
3557 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
3559 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3562 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3565 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3572 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3573 reg_mem_engine = 1; /* pfp */
3576 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3577 nbio_hf_reg->hdp_flush_req_offset,
3578 nbio_hf_reg->hdp_flush_done_offset,
3579 ref_and_mask, ref_and_mask, 0x20);
3582 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3584 gfx_v9_0_write_data_to_reg(ring, 0, true,
3585 SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
3588 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3589 struct amdgpu_ib *ib,
3590 unsigned vm_id, bool ctx_switch)
3592 u32 header, control = 0;
3594 if (ib->flags & AMDGPU_IB_FLAG_CE)
3595 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3597 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3599 control |= ib->length_dw | (vm_id << 24);
3601 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3602 control |= INDIRECT_BUFFER_PRE_ENB(1);
3604 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3605 gfx_v9_0_ring_emit_de_meta(ring);
3608 amdgpu_ring_write(ring, header);
3609 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3610 amdgpu_ring_write(ring,
3614 lower_32_bits(ib->gpu_addr));
3615 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3616 amdgpu_ring_write(ring, control);
3619 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3620 struct amdgpu_ib *ib,
3621 unsigned vm_id, bool ctx_switch)
3623 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3625 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3626 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3627 amdgpu_ring_write(ring,
3631 lower_32_bits(ib->gpu_addr));
3632 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3633 amdgpu_ring_write(ring, control);
3636 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3637 u64 seq, unsigned flags)
3639 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3640 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3642 /* RELEASE_MEM - flush caches, send int */
3643 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3644 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3646 EOP_TC_WB_ACTION_EN |
3647 EOP_TC_MD_ACTION_EN |
3648 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3650 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3653 * the address should be Qword aligned if 64bit write, Dword
3654 * aligned if only send 32bit data low (discard data high)
3660 amdgpu_ring_write(ring, lower_32_bits(addr));
3661 amdgpu_ring_write(ring, upper_32_bits(addr));
3662 amdgpu_ring_write(ring, lower_32_bits(seq));
3663 amdgpu_ring_write(ring, upper_32_bits(seq));
3664 amdgpu_ring_write(ring, 0);
3667 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3669 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3670 uint32_t seq = ring->fence_drv.sync_seq;
3671 uint64_t addr = ring->fence_drv.gpu_addr;
3673 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3674 lower_32_bits(addr), upper_32_bits(addr),
3675 seq, 0xffffffff, 4);
3678 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3679 unsigned vm_id, uint64_t pd_addr)
3681 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3682 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3683 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3684 unsigned eng = ring->vm_inv_eng;
3686 pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
3687 pd_addr |= AMDGPU_PTE_VALID;
3689 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3690 hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3691 lower_32_bits(pd_addr));
3693 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3694 hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3695 upper_32_bits(pd_addr));
3697 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3698 hub->vm_inv_eng0_req + eng, req);
3700 /* wait for the invalidate to complete */
3701 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3702 eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3704 /* compute doesn't have PFP */
3706 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3707 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3708 amdgpu_ring_write(ring, 0x0);
3712 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3714 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3717 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3721 /* XXX check if swapping is necessary on BE */
3722 if (ring->use_doorbell)
3723 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3729 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3731 struct amdgpu_device *adev = ring->adev;
3733 /* XXX check if swapping is necessary on BE */
3734 if (ring->use_doorbell) {
3735 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3736 WDOORBELL64(ring->doorbell_index, ring->wptr);
3738 BUG(); /* only DOORBELL method supported on gfx9 now */
3742 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3743 u64 seq, unsigned int flags)
3745 /* we only allocate 32bit for each seq wb address */
3746 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3748 /* write fence seq to the "addr" */
3749 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3750 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3751 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3752 amdgpu_ring_write(ring, lower_32_bits(addr));
3753 amdgpu_ring_write(ring, upper_32_bits(addr));
3754 amdgpu_ring_write(ring, lower_32_bits(seq));
3756 if (flags & AMDGPU_FENCE_FLAG_INT) {
3757 /* set register to trigger INT */
3758 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3759 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3760 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3761 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3762 amdgpu_ring_write(ring, 0);
3763 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3767 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3769 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3770 amdgpu_ring_write(ring, 0);
3773 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3775 static struct v9_ce_ib_state ce_payload = {0};
3779 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3780 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3782 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3783 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3784 WRITE_DATA_DST_SEL(8) |
3786 WRITE_DATA_CACHE_POLICY(0));
3787 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3788 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3789 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3792 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3794 static struct v9_de_ib_state de_payload = {0};
3795 uint64_t csa_addr, gds_addr;
3798 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3799 gds_addr = csa_addr + 4096;
3800 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3801 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3803 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3804 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3805 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3806 WRITE_DATA_DST_SEL(8) |
3808 WRITE_DATA_CACHE_POLICY(0));
3809 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3810 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3811 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3814 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3818 if (amdgpu_sriov_vf(ring->adev))
3819 gfx_v9_0_ring_emit_ce_meta(ring);
3821 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3822 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3823 /* set load_global_config & load_global_uconfig */
3825 /* set load_cs_sh_regs */
3827 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3830 /* set load_ce_ram if preamble presented */
3831 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3834 /* still load_ce_ram if this is the first time preamble presented
3835 * although there is no context switch happens.
3837 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3841 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3842 amdgpu_ring_write(ring, dw2);
3843 amdgpu_ring_write(ring, 0);
3846 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3849 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3850 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3851 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3852 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3853 ret = ring->wptr & ring->buf_mask;
3854 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3858 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3861 BUG_ON(offset > ring->buf_mask);
3862 BUG_ON(ring->ring[offset] != 0x55aa55aa);
3864 cur = (ring->wptr & ring->buf_mask) - 1;
3865 if (likely(cur > offset))
3866 ring->ring[offset] = cur - offset;
3868 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3871 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3873 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3874 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3877 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3879 struct amdgpu_device *adev = ring->adev;
3881 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3882 amdgpu_ring_write(ring, 0 | /* src: register*/
3883 (5 << 8) | /* dst: memory */
3884 (1 << 20)); /* write confirm */
3885 amdgpu_ring_write(ring, reg);
3886 amdgpu_ring_write(ring, 0);
3887 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3888 adev->virt.reg_val_offs * 4));
3889 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3890 adev->virt.reg_val_offs * 4));
3893 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3896 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3897 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3898 amdgpu_ring_write(ring, reg);
3899 amdgpu_ring_write(ring, 0);
3900 amdgpu_ring_write(ring, val);
3903 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3904 enum amdgpu_interrupt_state state)
3907 case AMDGPU_IRQ_STATE_DISABLE:
3908 case AMDGPU_IRQ_STATE_ENABLE:
3909 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3910 TIME_STAMP_INT_ENABLE,
3911 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3918 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3920 enum amdgpu_interrupt_state state)
3922 u32 mec_int_cntl, mec_int_cntl_reg;
3925 * amdgpu controls only the first MEC. That's why this function only
3926 * handles the setting of interrupts for this specific MEC. All other
3927 * pipes' interrupts are set by amdkfd.
3933 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
3936 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
3939 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
3942 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
3945 DRM_DEBUG("invalid pipe %d\n", pipe);
3949 DRM_DEBUG("invalid me %d\n", me);
3954 case AMDGPU_IRQ_STATE_DISABLE:
3955 mec_int_cntl = RREG32(mec_int_cntl_reg);
3956 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3957 TIME_STAMP_INT_ENABLE, 0);
3958 WREG32(mec_int_cntl_reg, mec_int_cntl);
3960 case AMDGPU_IRQ_STATE_ENABLE:
3961 mec_int_cntl = RREG32(mec_int_cntl_reg);
3962 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3963 TIME_STAMP_INT_ENABLE, 1);
3964 WREG32(mec_int_cntl_reg, mec_int_cntl);
3971 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3972 struct amdgpu_irq_src *source,
3974 enum amdgpu_interrupt_state state)
3977 case AMDGPU_IRQ_STATE_DISABLE:
3978 case AMDGPU_IRQ_STATE_ENABLE:
3979 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3980 PRIV_REG_INT_ENABLE,
3981 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3990 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3991 struct amdgpu_irq_src *source,
3993 enum amdgpu_interrupt_state state)
3996 case AMDGPU_IRQ_STATE_DISABLE:
3997 case AMDGPU_IRQ_STATE_ENABLE:
3998 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3999 PRIV_INSTR_INT_ENABLE,
4000 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4008 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4009 struct amdgpu_irq_src *src,
4011 enum amdgpu_interrupt_state state)
4014 case AMDGPU_CP_IRQ_GFX_EOP:
4015 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4017 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4018 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4020 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4021 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4023 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4024 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4026 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4027 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4029 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4030 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4032 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4033 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4035 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4036 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4038 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4039 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4047 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4048 struct amdgpu_irq_src *source,
4049 struct amdgpu_iv_entry *entry)
4052 u8 me_id, pipe_id, queue_id;
4053 struct amdgpu_ring *ring;
4055 DRM_DEBUG("IH: CP EOP\n");
4056 me_id = (entry->ring_id & 0x0c) >> 2;
4057 pipe_id = (entry->ring_id & 0x03) >> 0;
4058 queue_id = (entry->ring_id & 0x70) >> 4;
4062 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4066 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4067 ring = &adev->gfx.compute_ring[i];
4068 /* Per-queue interrupt is supported for MEC starting from VI.
4069 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4071 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4072 amdgpu_fence_process(ring);
4079 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4080 struct amdgpu_irq_src *source,
4081 struct amdgpu_iv_entry *entry)
4083 DRM_ERROR("Illegal register access in command stream\n");
4084 schedule_work(&adev->reset_work);
4088 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4089 struct amdgpu_irq_src *source,
4090 struct amdgpu_iv_entry *entry)
4092 DRM_ERROR("Illegal instruction in command stream\n");
4093 schedule_work(&adev->reset_work);
4097 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4098 struct amdgpu_irq_src *src,
4100 enum amdgpu_interrupt_state state)
4102 uint32_t tmp, target;
4103 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4106 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4108 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4109 target += ring->pipe;
4112 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4113 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4114 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4115 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4116 GENERIC2_INT_ENABLE, 0);
4117 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4119 tmp = RREG32(target);
4120 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4121 GENERIC2_INT_ENABLE, 0);
4122 WREG32(target, tmp);
4124 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4125 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4126 GENERIC2_INT_ENABLE, 1);
4127 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4129 tmp = RREG32(target);
4130 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4131 GENERIC2_INT_ENABLE, 1);
4132 WREG32(target, tmp);
4136 BUG(); /* kiq only support GENERIC2_INT now */
4142 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4143 struct amdgpu_irq_src *source,
4144 struct amdgpu_iv_entry *entry)
4146 u8 me_id, pipe_id, queue_id;
4147 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4149 me_id = (entry->ring_id & 0x0c) >> 2;
4150 pipe_id = (entry->ring_id & 0x03) >> 0;
4151 queue_id = (entry->ring_id & 0x70) >> 4;
4152 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4153 me_id, pipe_id, queue_id);
4155 amdgpu_fence_process(ring);
4159 const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4161 .early_init = gfx_v9_0_early_init,
4162 .late_init = gfx_v9_0_late_init,
4163 .sw_init = gfx_v9_0_sw_init,
4164 .sw_fini = gfx_v9_0_sw_fini,
4165 .hw_init = gfx_v9_0_hw_init,
4166 .hw_fini = gfx_v9_0_hw_fini,
4167 .suspend = gfx_v9_0_suspend,
4168 .resume = gfx_v9_0_resume,
4169 .is_idle = gfx_v9_0_is_idle,
4170 .wait_for_idle = gfx_v9_0_wait_for_idle,
4171 .soft_reset = gfx_v9_0_soft_reset,
4172 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4173 .set_powergating_state = gfx_v9_0_set_powergating_state,
4174 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4177 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4178 .type = AMDGPU_RING_TYPE_GFX,
4180 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4181 .support_64bit_ptrs = true,
4182 .vmhub = AMDGPU_GFXHUB,
4183 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4184 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4185 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4186 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4188 7 + /* PIPELINE_SYNC */
4190 8 + /* FENCE for VM_FLUSH */
4191 20 + /* GDS switch */
4192 4 + /* double SWITCH_BUFFER,
4193 the first COND_EXEC jump to the place just
4194 prior to this double SWITCH_BUFFER */
4202 8 + 8 + /* FENCE x2 */
4203 2, /* SWITCH_BUFFER */
4204 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4205 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4206 .emit_fence = gfx_v9_0_ring_emit_fence,
4207 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4208 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4209 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4210 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4211 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4212 .test_ring = gfx_v9_0_ring_test_ring,
4213 .test_ib = gfx_v9_0_ring_test_ib,
4214 .insert_nop = amdgpu_ring_insert_nop,
4215 .pad_ib = amdgpu_ring_generic_pad_ib,
4216 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4217 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4218 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4219 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4220 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4223 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4224 .type = AMDGPU_RING_TYPE_COMPUTE,
4226 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4227 .support_64bit_ptrs = true,
4228 .vmhub = AMDGPU_GFXHUB,
4229 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4230 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4231 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4233 20 + /* gfx_v9_0_ring_emit_gds_switch */
4234 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4235 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4236 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4237 24 + /* gfx_v9_0_ring_emit_vm_flush */
4238 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4239 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4240 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4241 .emit_fence = gfx_v9_0_ring_emit_fence,
4242 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4243 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4244 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4245 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4246 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4247 .test_ring = gfx_v9_0_ring_test_ring,
4248 .test_ib = gfx_v9_0_ring_test_ib,
4249 .insert_nop = amdgpu_ring_insert_nop,
4250 .pad_ib = amdgpu_ring_generic_pad_ib,
4253 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4254 .type = AMDGPU_RING_TYPE_KIQ,
4256 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4257 .support_64bit_ptrs = true,
4258 .vmhub = AMDGPU_GFXHUB,
4259 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4260 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4261 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4263 20 + /* gfx_v9_0_ring_emit_gds_switch */
4264 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4265 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4266 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4267 24 + /* gfx_v9_0_ring_emit_vm_flush */
4268 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4269 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4270 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4271 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4272 .test_ring = gfx_v9_0_ring_test_ring,
4273 .test_ib = gfx_v9_0_ring_test_ib,
4274 .insert_nop = amdgpu_ring_insert_nop,
4275 .pad_ib = amdgpu_ring_generic_pad_ib,
4276 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4277 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4280 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4284 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4286 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4287 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4289 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4290 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4293 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4294 .set = gfx_v9_0_kiq_set_interrupt_state,
4295 .process = gfx_v9_0_kiq_irq,
4298 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4299 .set = gfx_v9_0_set_eop_interrupt_state,
4300 .process = gfx_v9_0_eop_irq,
4303 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4304 .set = gfx_v9_0_set_priv_reg_fault_state,
4305 .process = gfx_v9_0_priv_reg_irq,
4308 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4309 .set = gfx_v9_0_set_priv_inst_fault_state,
4310 .process = gfx_v9_0_priv_inst_irq,
4313 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4315 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4316 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4318 adev->gfx.priv_reg_irq.num_types = 1;
4319 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4321 adev->gfx.priv_inst_irq.num_types = 1;
4322 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4324 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4325 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4328 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4330 switch (adev->asic_type) {
4333 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4340 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4342 /* init asci gds info */
4343 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4344 adev->gds.gws.total_size = 64;
4345 adev->gds.oa.total_size = 16;
4347 if (adev->gds.mem.total_size == 64 * 1024) {
4348 adev->gds.mem.gfx_partition_size = 4096;
4349 adev->gds.mem.cs_partition_size = 4096;
4351 adev->gds.gws.gfx_partition_size = 4;
4352 adev->gds.gws.cs_partition_size = 4;
4354 adev->gds.oa.gfx_partition_size = 4;
4355 adev->gds.oa.cs_partition_size = 1;
4357 adev->gds.mem.gfx_partition_size = 1024;
4358 adev->gds.mem.cs_partition_size = 1024;
4360 adev->gds.gws.gfx_partition_size = 16;
4361 adev->gds.gws.cs_partition_size = 16;
4363 adev->gds.oa.gfx_partition_size = 4;
4364 adev->gds.oa.cs_partition_size = 4;
4368 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4376 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4377 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4379 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4382 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4386 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4387 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4389 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4390 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4392 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4394 return (~data) & mask;
4397 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4398 struct amdgpu_cu_info *cu_info)
4400 int i, j, k, counter, active_cu_number = 0;
4401 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4402 unsigned disable_masks[4 * 2];
4404 if (!adev || !cu_info)
4407 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4409 mutex_lock(&adev->grbm_idx_mutex);
4410 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4411 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4415 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4417 gfx_v9_0_set_user_cu_inactive_bitmap(
4418 adev, disable_masks[i * 2 + j]);
4419 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4420 cu_info->bitmap[i][j] = bitmap;
4422 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4423 if (bitmap & mask) {
4424 if (counter < adev->gfx.config.max_cu_per_sh)
4430 active_cu_number += counter;
4432 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4433 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4436 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4437 mutex_unlock(&adev->grbm_idx_mutex);
4439 cu_info->number = active_cu_number;
4440 cu_info->ao_cu_mask = ao_cu_mask;
4445 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4447 .type = AMD_IP_BLOCK_TYPE_GFX,
4451 .funcs = &gfx_v9_0_ip_funcs,