2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "gfxhub_v1_0.h"
26 #include "vega10/soc15ip.h"
27 #include "vega10/GC/gc_9_0_offset.h"
28 #include "vega10/GC/gc_9_0_sh_mask.h"
29 #include "vega10/GC/gc_9_0_default.h"
30 #include "vega10/vega10_enum.h"
32 #include "soc15_common.h"
34 int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
41 /* Update configuration */
42 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
43 adev->mc.vram_start >> 18);
44 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
45 adev->mc.vram_end >> 18);
47 value = adev->vram_scratch.gpu_addr - adev->mc.vram_start
48 + adev->vm_manager.vram_base_offset;
49 WREG32(SOC15_REG_OFFSET(GC, 0,
50 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
52 WREG32(SOC15_REG_OFFSET(GC, 0,
53 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
56 if (amdgpu_sriov_vf(adev)) {
57 /* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
58 vbios post doesn't program them, for SRIOV driver need to program them */
59 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_BASE),
60 adev->mc.vram_start >> 24);
61 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_TOP),
62 adev->mc.vram_end >> 24);
66 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BASE), 0);
67 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_TOP), 0);
68 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BOT), 0xFFFFFFFF);
72 /* Setup TLB control */
73 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL));
74 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
75 tmp = REG_SET_FIELD(tmp,
79 tmp = REG_SET_FIELD(tmp,
81 ENABLE_ADVANCED_DRIVER_MODEL,
83 tmp = REG_SET_FIELD(tmp,
85 SYSTEM_APERTURE_UNMAPPED_ACCESS,
87 tmp = REG_SET_FIELD(tmp,
91 tmp = REG_SET_FIELD(tmp,
94 MTYPE_UC);/* XXX for emulation. */
95 tmp = REG_SET_FIELD(tmp,
99 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
102 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL));
103 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
104 tmp = REG_SET_FIELD(tmp,
106 ENABLE_L2_FRAGMENT_PROCESSING,
108 tmp = REG_SET_FIELD(tmp,
110 L2_PDE0_CACHE_TAG_GENERATION_MODE,
111 0);/* XXX for emulation, Refer to closed source code.*/
112 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
113 tmp = REG_SET_FIELD(tmp,
115 CONTEXT1_IDENTITY_ACCESS_MODE,
117 tmp = REG_SET_FIELD(tmp,
119 IDENTITY_MODE_FRAGMENT_SIZE,
121 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp);
123 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2));
124 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
125 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
126 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2), tmp);
128 tmp = mmVM_L2_CNTL3_DEFAULT;
129 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), tmp);
131 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4));
132 tmp = REG_SET_FIELD(tmp,
134 VMC_TAP_PDE_REQUEST_PHYSICAL,
136 tmp = REG_SET_FIELD(tmp,
138 VMC_TAP_PTE_REQUEST_PHYSICAL,
140 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4), tmp);
143 WREG32(SOC15_REG_OFFSET(GC, 0,
144 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
145 (u32)(adev->mc.gtt_start >> 12));
146 WREG32(SOC15_REG_OFFSET(GC, 0,
147 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
148 (u32)(adev->mc.gtt_start >> 44));
150 WREG32(SOC15_REG_OFFSET(GC, 0,
151 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
152 (u32)(adev->mc.gtt_end >> 12));
153 WREG32(SOC15_REG_OFFSET(GC, 0,
154 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
155 (u32)(adev->mc.gtt_end >> 44));
157 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
158 value = adev->gart.table_addr - adev->mc.vram_start
159 + adev->vm_manager.vram_base_offset;
160 value &= 0x0000FFFFFFFFF000ULL;
161 value |= 0x1; /*valid bit*/
163 WREG32(SOC15_REG_OFFSET(GC, 0,
164 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
166 WREG32(SOC15_REG_OFFSET(GC, 0,
167 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
170 WREG32(SOC15_REG_OFFSET(GC, 0,
171 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
172 (u32)(adev->dummy_page.addr >> 12));
173 WREG32(SOC15_REG_OFFSET(GC, 0,
174 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
175 (u32)(adev->dummy_page.addr >> 44));
177 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
178 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
179 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
181 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
183 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL));
184 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
185 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
186 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL), tmp);
188 /* Disable identity aperture.*/
189 WREG32(SOC15_REG_OFFSET(GC, 0,
190 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
191 WREG32(SOC15_REG_OFFSET(GC, 0,
192 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
194 WREG32(SOC15_REG_OFFSET(GC, 0,
195 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
196 WREG32(SOC15_REG_OFFSET(GC, 0,
197 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
199 WREG32(SOC15_REG_OFFSET(GC, 0,
200 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
201 WREG32(SOC15_REG_OFFSET(GC, 0,
202 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
204 for (i = 0; i <= 14; i++) {
205 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i);
206 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
207 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
208 adev->vm_manager.num_level);
209 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
210 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
211 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
212 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
213 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
214 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
215 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
216 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
217 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
218 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
219 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
220 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
221 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
222 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
223 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
224 PAGE_TABLE_BLOCK_SIZE,
225 amdgpu_vm_block_size - 9);
226 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
227 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
228 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
229 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
230 lower_32_bits(adev->vm_manager.max_pfn - 1));
231 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
232 upper_32_bits(adev->vm_manager.max_pfn - 1));
239 void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
244 /* Disable all tables */
245 for (i = 0; i < 16; i++)
246 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL) + i, 0);
248 /* Setup TLB control */
249 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL));
250 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
251 tmp = REG_SET_FIELD(tmp,
252 MC_VM_MX_L1_TLB_CNTL,
253 ENABLE_ADVANCED_DRIVER_MODEL,
255 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
258 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL));
259 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
260 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp);
261 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), 0);
265 * gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling
267 * @adev: amdgpu_device pointer
268 * @value: true redirects VM faults to the default page
270 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
274 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
275 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
276 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
277 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
278 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
279 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
280 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
281 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
282 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
283 tmp = REG_SET_FIELD(tmp,
284 VM_L2_PROTECTION_FAULT_CNTL,
285 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
287 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
288 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
289 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
290 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
291 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
292 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
293 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
294 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
295 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
296 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
297 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
298 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
302 static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
306 /* invalidate using legacy mode on vm_id*/
307 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
308 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
309 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
310 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
311 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
312 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
313 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
314 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
315 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
316 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
321 static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
323 return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
324 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
325 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
326 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
327 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
328 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
329 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
332 static int gfxhub_v1_0_early_init(void *handle)
337 static int gfxhub_v1_0_late_init(void *handle)
342 static int gfxhub_v1_0_sw_init(void *handle)
344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
345 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB];
347 hub->ctx0_ptb_addr_lo32 =
348 SOC15_REG_OFFSET(GC, 0,
349 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
350 hub->ctx0_ptb_addr_hi32 =
351 SOC15_REG_OFFSET(GC, 0,
352 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
353 hub->vm_inv_eng0_req =
354 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
355 hub->vm_inv_eng0_ack =
356 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK);
357 hub->vm_context0_cntl =
358 SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL);
359 hub->vm_l2_pro_fault_status =
360 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
361 hub->vm_l2_pro_fault_cntl =
362 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
364 hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
365 hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
370 static int gfxhub_v1_0_sw_fini(void *handle)
375 static int gfxhub_v1_0_hw_init(void *handle)
377 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
380 for (i = 0 ; i < 18; ++i) {
381 WREG32(SOC15_REG_OFFSET(GC, 0,
382 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
384 WREG32(SOC15_REG_OFFSET(GC, 0,
385 mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
392 static int gfxhub_v1_0_hw_fini(void *handle)
397 static int gfxhub_v1_0_suspend(void *handle)
402 static int gfxhub_v1_0_resume(void *handle)
407 static bool gfxhub_v1_0_is_idle(void *handle)
412 static int gfxhub_v1_0_wait_for_idle(void *handle)
417 static int gfxhub_v1_0_soft_reset(void *handle)
422 static int gfxhub_v1_0_set_clockgating_state(void *handle,
423 enum amd_clockgating_state state)
428 static int gfxhub_v1_0_set_powergating_state(void *handle,
429 enum amd_powergating_state state)
434 const struct amd_ip_funcs gfxhub_v1_0_ip_funcs = {
435 .name = "gfxhub_v1_0",
436 .early_init = gfxhub_v1_0_early_init,
437 .late_init = gfxhub_v1_0_late_init,
438 .sw_init = gfxhub_v1_0_sw_init,
439 .sw_fini = gfxhub_v1_0_sw_fini,
440 .hw_init = gfxhub_v1_0_hw_init,
441 .hw_fini = gfxhub_v1_0_hw_fini,
442 .suspend = gfxhub_v1_0_suspend,
443 .resume = gfxhub_v1_0_resume,
444 .is_idle = gfxhub_v1_0_is_idle,
445 .wait_for_idle = gfxhub_v1_0_wait_for_idle,
446 .soft_reset = gfxhub_v1_0_soft_reset,
447 .set_clockgating_state = gfxhub_v1_0_set_clockgating_state,
448 .set_powergating_state = gfxhub_v1_0_set_powergating_state,
451 const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block =
453 .type = AMD_IP_BLOCK_TYPE_GFXHUB,
457 .funcs = &gfxhub_v1_0_ip_funcs,