]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/soc15.c
drm/amdgpu/soc15: bypass PSP for VF
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / soc15.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "amdgpu_psp.h"
34 #include "atom.h"
35 #include "amd_pcie.h"
36
37 #include "vega10/soc15ip.h"
38 #include "vega10/UVD/uvd_7_0_offset.h"
39 #include "vega10/GC/gc_9_0_offset.h"
40 #include "vega10/GC/gc_9_0_sh_mask.h"
41 #include "vega10/SDMA0/sdma0_4_0_offset.h"
42 #include "vega10/SDMA1/sdma1_4_0_offset.h"
43 #include "vega10/HDP/hdp_4_0_offset.h"
44 #include "vega10/HDP/hdp_4_0_sh_mask.h"
45 #include "vega10/MP/mp_9_0_offset.h"
46 #include "vega10/MP/mp_9_0_sh_mask.h"
47 #include "vega10/SMUIO/smuio_9_0_offset.h"
48 #include "vega10/SMUIO/smuio_9_0_sh_mask.h"
49
50 #include "soc15.h"
51 #include "soc15_common.h"
52 #include "gfx_v9_0.h"
53 #include "gmc_v9_0.h"
54 #include "gfxhub_v1_0.h"
55 #include "mmhub_v1_0.h"
56 #include "vega10_ih.h"
57 #include "sdma_v4_0.h"
58 #include "uvd_v7_0.h"
59 #include "vce_v4_0.h"
60 #include "amdgpu_powerplay.h"
61
62 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
63
64 #define mmFabricConfigAccessControl                                                                    0x0410
65 #define mmFabricConfigAccessControl_BASE_IDX                                                           0
66 #define mmFabricConfigAccessControl_DEFAULT                                      0x00000000
67 //FabricConfigAccessControl
68 #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT                                                     0x0
69 #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT                                                0x1
70 #define FabricConfigAccessControl__CfgRegInstID__SHIFT                                                        0x10
71 #define FabricConfigAccessControl__CfgRegInstAccEn_MASK                                                       0x00000001L
72 #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK                                                  0x00000002L
73 #define FabricConfigAccessControl__CfgRegInstID_MASK                                                          0x00FF0000L
74
75
76 #define mmDF_PIE_AON0_DfGlobalClkGater                                                                 0x00fc
77 #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX                                                        0
78 //DF_PIE_AON0_DfGlobalClkGater
79 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT                                                         0x0
80 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK                                                           0x0000000FL
81
82 enum {
83         DF_MGCG_DISABLE = 0,
84         DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
85         DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
86         DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
87         DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
88         DF_MGCG_ENABLE_63_CYCLE_DELAY =15
89 };
90
91 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
92 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
93 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
94 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
95
96 /*
97  * Indirect registers accessor
98  */
99 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
100 {
101         unsigned long flags, address, data;
102         u32 r;
103         struct nbio_pcie_index_data *nbio_pcie_id;
104
105         if (adev->asic_type == CHIP_VEGA10)
106                 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
107
108         address = nbio_pcie_id->index_offset;
109         data = nbio_pcie_id->data_offset;
110
111         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
112         WREG32(address, reg);
113         (void)RREG32(address);
114         r = RREG32(data);
115         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
116         return r;
117 }
118
119 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
120 {
121         unsigned long flags, address, data;
122         struct nbio_pcie_index_data *nbio_pcie_id;
123
124         if (adev->asic_type == CHIP_VEGA10)
125                 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
126
127         address = nbio_pcie_id->index_offset;
128         data = nbio_pcie_id->data_offset;
129
130         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
131         WREG32(address, reg);
132         (void)RREG32(address);
133         WREG32(data, v);
134         (void)RREG32(data);
135         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
136 }
137
138 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
139 {
140         unsigned long flags, address, data;
141         u32 r;
142
143         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
144         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
145
146         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
147         WREG32(address, ((reg) & 0x1ff));
148         r = RREG32(data);
149         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
150         return r;
151 }
152
153 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
154 {
155         unsigned long flags, address, data;
156
157         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
158         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
159
160         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
161         WREG32(address, ((reg) & 0x1ff));
162         WREG32(data, (v));
163         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
164 }
165
166 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
167 {
168         unsigned long flags, address, data;
169         u32 r;
170
171         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
172         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
173
174         spin_lock_irqsave(&adev->didt_idx_lock, flags);
175         WREG32(address, (reg));
176         r = RREG32(data);
177         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
178         return r;
179 }
180
181 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
182 {
183         unsigned long flags, address, data;
184
185         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
186         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
187
188         spin_lock_irqsave(&adev->didt_idx_lock, flags);
189         WREG32(address, (reg));
190         WREG32(data, (v));
191         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
192 }
193
194 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
195 {
196         return nbio_v6_1_get_memsize(adev);
197 }
198
199 static const u32 vega10_golden_init[] =
200 {
201 };
202
203 static void soc15_init_golden_registers(struct amdgpu_device *adev)
204 {
205         /* Some of the registers might be dependent on GRBM_GFX_INDEX */
206         mutex_lock(&adev->grbm_idx_mutex);
207
208         switch (adev->asic_type) {
209         case CHIP_VEGA10:
210                 amdgpu_program_register_sequence(adev,
211                                                  vega10_golden_init,
212                                                  (const u32)ARRAY_SIZE(vega10_golden_init));
213                 break;
214         default:
215                 break;
216         }
217         mutex_unlock(&adev->grbm_idx_mutex);
218 }
219 static u32 soc15_get_xclk(struct amdgpu_device *adev)
220 {
221         if (adev->asic_type == CHIP_VEGA10)
222                 return adev->clock.spll.reference_freq/4;
223         else
224                 return adev->clock.spll.reference_freq;
225 }
226
227
228 void soc15_grbm_select(struct amdgpu_device *adev,
229                      u32 me, u32 pipe, u32 queue, u32 vmid)
230 {
231         u32 grbm_gfx_cntl = 0;
232         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
233         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
234         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
235         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
236
237         WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
238 }
239
240 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
241 {
242         /* todo */
243 }
244
245 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
246 {
247         /* todo */
248         return false;
249 }
250
251 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
252                                      u8 *bios, u32 length_bytes)
253 {
254         u32 *dw_ptr;
255         u32 i, length_dw;
256
257         if (bios == NULL)
258                 return false;
259         if (length_bytes == 0)
260                 return false;
261         /* APU vbios image is part of sbios image */
262         if (adev->flags & AMD_IS_APU)
263                 return false;
264
265         dw_ptr = (u32 *)bios;
266         length_dw = ALIGN(length_bytes, 4) / 4;
267
268         /* set rom index to 0 */
269         WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
270         /* read out the rom data */
271         for (i = 0; i < length_dw; i++)
272                 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
273
274         return true;
275 }
276
277 static struct amdgpu_allowed_register_entry vega10_allowed_read_registers[] = {
278         /* todo */
279 };
280
281 static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
282         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS), false},
283         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2), false},
284         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0), false},
285         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1), false},
286         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2), false},
287         { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3), false},
288         { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG), false},
289         { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG), false},
290         { SOC15_REG_OFFSET(GC, 0, mmCP_STAT), false},
291         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1), false},
292         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2), false},
293         { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3), false},
294         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
295         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1), false},
296         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS), false},
297         { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
298         { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1), false},
299         { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS), false},
300         { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), false},
301         { SOC15_REG_OFFSET(GC, 0, mmCC_RB_BACKEND_DISABLE), false, true},
302         { SOC15_REG_OFFSET(GC, 0, mmGC_USER_RB_BACKEND_DISABLE), false, true},
303         { SOC15_REG_OFFSET(GC, 0, mmGB_BACKEND_MAP), false, false},
304 };
305
306 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
307                                          u32 sh_num, u32 reg_offset)
308 {
309         uint32_t val;
310
311         mutex_lock(&adev->grbm_idx_mutex);
312         if (se_num != 0xffffffff || sh_num != 0xffffffff)
313                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
314
315         val = RREG32(reg_offset);
316
317         if (se_num != 0xffffffff || sh_num != 0xffffffff)
318                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
319         mutex_unlock(&adev->grbm_idx_mutex);
320         return val;
321 }
322
323 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
324                             u32 sh_num, u32 reg_offset, u32 *value)
325 {
326         struct amdgpu_allowed_register_entry *asic_register_table = NULL;
327         struct amdgpu_allowed_register_entry *asic_register_entry;
328         uint32_t size, i;
329
330         *value = 0;
331         switch (adev->asic_type) {
332         case CHIP_VEGA10:
333                 asic_register_table = vega10_allowed_read_registers;
334                 size = ARRAY_SIZE(vega10_allowed_read_registers);
335                 break;
336         default:
337                 return -EINVAL;
338         }
339
340         if (asic_register_table) {
341                 for (i = 0; i < size; i++) {
342                         asic_register_entry = asic_register_table + i;
343                         if (reg_offset != asic_register_entry->reg_offset)
344                                 continue;
345                         if (!asic_register_entry->untouched)
346                                 *value = asic_register_entry->grbm_indexed ?
347                                         soc15_read_indexed_register(adev, se_num,
348                                                                  sh_num, reg_offset) :
349                                         RREG32(reg_offset);
350                         return 0;
351                 }
352         }
353
354         for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
355                 if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
356                         continue;
357
358                 if (!soc15_allowed_read_registers[i].untouched)
359                         *value = soc15_allowed_read_registers[i].grbm_indexed ?
360                                 soc15_read_indexed_register(adev, se_num,
361                                                          sh_num, reg_offset) :
362                                 RREG32(reg_offset);
363                 return 0;
364         }
365         return -EINVAL;
366 }
367
368 static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
369 {
370         u32 i;
371
372         dev_info(adev->dev, "GPU pci config reset\n");
373
374         /* disable BM */
375         pci_clear_master(adev->pdev);
376         /* reset */
377         amdgpu_pci_config_reset(adev);
378
379         udelay(100);
380
381         /* wait for asic to come out of reset */
382         for (i = 0; i < adev->usec_timeout; i++) {
383                 if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
384                         break;
385                 udelay(1);
386         }
387
388 }
389
390 static int soc15_asic_reset(struct amdgpu_device *adev)
391 {
392         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
393
394         soc15_gpu_pci_config_reset(adev);
395
396         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
397
398         return 0;
399 }
400
401 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
402                         u32 cntl_reg, u32 status_reg)
403 {
404         return 0;
405 }*/
406
407 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
408 {
409         /*int r;
410
411         r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
412         if (r)
413                 return r;
414
415         r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
416         */
417         return 0;
418 }
419
420 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
421 {
422         /* todo */
423
424         return 0;
425 }
426
427 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
428 {
429         if (pci_is_root_bus(adev->pdev->bus))
430                 return;
431
432         if (amdgpu_pcie_gen2 == 0)
433                 return;
434
435         if (adev->flags & AMD_IS_APU)
436                 return;
437
438         if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
439                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
440                 return;
441
442         /* todo */
443 }
444
445 static void soc15_program_aspm(struct amdgpu_device *adev)
446 {
447
448         if (amdgpu_aspm == 0)
449                 return;
450
451         /* todo */
452 }
453
454 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
455                                         bool enable)
456 {
457         nbio_v6_1_enable_doorbell_aperture(adev, enable);
458         nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
459 }
460
461 static const struct amdgpu_ip_block_version vega10_common_ip_block =
462 {
463         .type = AMD_IP_BLOCK_TYPE_COMMON,
464         .major = 2,
465         .minor = 0,
466         .rev = 0,
467         .funcs = &soc15_common_ip_funcs,
468 };
469
470 int soc15_set_ip_blocks(struct amdgpu_device *adev)
471 {
472         nbio_v6_1_detect_hw_virt(adev);
473
474         switch (adev->asic_type) {
475         case CHIP_VEGA10:
476                 amdgpu_ip_block_add(adev, &vega10_common_ip_block);
477                 amdgpu_ip_block_add(adev, &gfxhub_v1_0_ip_block);
478                 amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
479                 amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
480                 amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
481                 if (!amdgpu_sriov_vf(adev))
482                         amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
483                 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
484                 amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
485                 amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
486                 amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
487                 amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
488                 break;
489         default:
490                 return -EINVAL;
491         }
492
493         return 0;
494 }
495
496 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
497 {
498         return nbio_v6_1_get_rev_id(adev);
499 }
500
501
502 int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
503 {
504         /* to be implemented in MC IP*/
505         return 0;
506 }
507
508 static const struct amdgpu_asic_funcs soc15_asic_funcs =
509 {
510         .read_disabled_bios = &soc15_read_disabled_bios,
511         .read_bios_from_rom = &soc15_read_bios_from_rom,
512         .read_register = &soc15_read_register,
513         .reset = &soc15_asic_reset,
514         .set_vga_state = &soc15_vga_set_state,
515         .get_xclk = &soc15_get_xclk,
516         .set_uvd_clocks = &soc15_set_uvd_clocks,
517         .set_vce_clocks = &soc15_set_vce_clocks,
518         .get_config_memsize = &soc15_get_config_memsize,
519 };
520
521 static int soc15_common_early_init(void *handle)
522 {
523         bool psp_enabled = false;
524         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
525
526         adev->smc_rreg = NULL;
527         adev->smc_wreg = NULL;
528         adev->pcie_rreg = &soc15_pcie_rreg;
529         adev->pcie_wreg = &soc15_pcie_wreg;
530         adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
531         adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
532         adev->didt_rreg = &soc15_didt_rreg;
533         adev->didt_wreg = &soc15_didt_wreg;
534
535         adev->asic_funcs = &soc15_asic_funcs;
536
537         if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
538                 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
539                 psp_enabled = true;
540
541         /*
542          * nbio need be used for both sdma and gfx9, but only
543          * initializes once
544          */
545         switch(adev->asic_type) {
546         case CHIP_VEGA10:
547                 nbio_v6_1_init(adev);
548                 break;
549         default:
550                 return -EINVAL;
551         }
552
553         adev->rev_id = soc15_get_rev_id(adev);
554         adev->external_rev_id = 0xFF;
555         switch (adev->asic_type) {
556         case CHIP_VEGA10:
557                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
558                         AMD_CG_SUPPORT_GFX_MGLS |
559                         AMD_CG_SUPPORT_GFX_RLC_LS |
560                         AMD_CG_SUPPORT_GFX_CP_LS |
561                         AMD_CG_SUPPORT_GFX_3D_CGCG |
562                         AMD_CG_SUPPORT_GFX_3D_CGLS |
563                         AMD_CG_SUPPORT_GFX_CGCG |
564                         AMD_CG_SUPPORT_GFX_CGLS |
565                         AMD_CG_SUPPORT_BIF_MGCG |
566                         AMD_CG_SUPPORT_BIF_LS |
567                         AMD_CG_SUPPORT_HDP_LS |
568                         AMD_CG_SUPPORT_DRM_MGCG |
569                         AMD_CG_SUPPORT_DRM_LS |
570                         AMD_CG_SUPPORT_ROM_MGCG |
571                         AMD_CG_SUPPORT_DF_MGCG |
572                         AMD_CG_SUPPORT_SDMA_MGCG |
573                         AMD_CG_SUPPORT_SDMA_LS |
574                         AMD_CG_SUPPORT_MC_MGCG |
575                         AMD_CG_SUPPORT_MC_LS;
576                 adev->pg_flags = 0;
577                 adev->external_rev_id = 0x1;
578                 break;
579         default:
580                 /* FIXME: not supported yet */
581                 return -EINVAL;
582         }
583
584         adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
585
586         amdgpu_get_pcie_info(adev);
587
588         return 0;
589 }
590
591 static int soc15_common_sw_init(void *handle)
592 {
593         return 0;
594 }
595
596 static int soc15_common_sw_fini(void *handle)
597 {
598         return 0;
599 }
600
601 static int soc15_common_hw_init(void *handle)
602 {
603         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604
605         /* move the golden regs per IP block */
606         soc15_init_golden_registers(adev);
607         /* enable pcie gen2/3 link */
608         soc15_pcie_gen3_enable(adev);
609         /* enable aspm */
610         soc15_program_aspm(adev);
611         /* enable the doorbell aperture */
612         soc15_enable_doorbell_aperture(adev, true);
613
614         return 0;
615 }
616
617 static int soc15_common_hw_fini(void *handle)
618 {
619         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
620
621         /* disable the doorbell aperture */
622         soc15_enable_doorbell_aperture(adev, false);
623
624         return 0;
625 }
626
627 static int soc15_common_suspend(void *handle)
628 {
629         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
630
631         return soc15_common_hw_fini(adev);
632 }
633
634 static int soc15_common_resume(void *handle)
635 {
636         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
637
638         return soc15_common_hw_init(adev);
639 }
640
641 static bool soc15_common_is_idle(void *handle)
642 {
643         return true;
644 }
645
646 static int soc15_common_wait_for_idle(void *handle)
647 {
648         return 0;
649 }
650
651 static int soc15_common_soft_reset(void *handle)
652 {
653         return 0;
654 }
655
656 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
657 {
658         uint32_t def, data;
659
660         def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
661
662         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
663                 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
664         else
665                 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
666
667         if (def != data)
668                 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
669 }
670
671 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
672 {
673         uint32_t def, data;
674
675         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
676
677         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
678                 data &= ~(0x01000000 |
679                           0x02000000 |
680                           0x04000000 |
681                           0x08000000 |
682                           0x10000000 |
683                           0x20000000 |
684                           0x40000000 |
685                           0x80000000);
686         else
687                 data |= (0x01000000 |
688                          0x02000000 |
689                          0x04000000 |
690                          0x08000000 |
691                          0x10000000 |
692                          0x20000000 |
693                          0x40000000 |
694                          0x80000000);
695
696         if (def != data)
697                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
698 }
699
700 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
701 {
702         uint32_t def, data;
703
704         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
705
706         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
707                 data |= 1;
708         else
709                 data &= ~1;
710
711         if (def != data)
712                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
713 }
714
715 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
716                                                        bool enable)
717 {
718         uint32_t def, data;
719
720         def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
721
722         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
723                 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
724                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
725         else
726                 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
727                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
728
729         if (def != data)
730                 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
731 }
732
733 static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
734                                                        bool enable)
735 {
736         uint32_t data;
737
738         /* Put DF on broadcast mode */
739         data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
740         data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
741         WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
742
743         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
744                 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
745                 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
746                 data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
747                 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
748         } else {
749                 data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
750                 data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
751                 data |= DF_MGCG_DISABLE;
752                 WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
753         }
754
755         WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
756                mmFabricConfigAccessControl_DEFAULT);
757 }
758
759 static int soc15_common_set_clockgating_state(void *handle,
760                                             enum amd_clockgating_state state)
761 {
762         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763
764         switch (adev->asic_type) {
765         case CHIP_VEGA10:
766                 nbio_v6_1_update_medium_grain_clock_gating(adev,
767                                 state == AMD_CG_STATE_GATE ? true : false);
768                 nbio_v6_1_update_medium_grain_light_sleep(adev,
769                                 state == AMD_CG_STATE_GATE ? true : false);
770                 soc15_update_hdp_light_sleep(adev,
771                                 state == AMD_CG_STATE_GATE ? true : false);
772                 soc15_update_drm_clock_gating(adev,
773                                 state == AMD_CG_STATE_GATE ? true : false);
774                 soc15_update_drm_light_sleep(adev,
775                                 state == AMD_CG_STATE_GATE ? true : false);
776                 soc15_update_rom_medium_grain_clock_gating(adev,
777                                 state == AMD_CG_STATE_GATE ? true : false);
778                 soc15_update_df_medium_grain_clock_gating(adev,
779                                 state == AMD_CG_STATE_GATE ? true : false);
780                 break;
781         default:
782                 break;
783         }
784         return 0;
785 }
786
787 static int soc15_common_set_powergating_state(void *handle,
788                                             enum amd_powergating_state state)
789 {
790         /* todo */
791         return 0;
792 }
793
794 const struct amd_ip_funcs soc15_common_ip_funcs = {
795         .name = "soc15_common",
796         .early_init = soc15_common_early_init,
797         .late_init = NULL,
798         .sw_init = soc15_common_sw_init,
799         .sw_fini = soc15_common_sw_fini,
800         .hw_init = soc15_common_hw_init,
801         .hw_fini = soc15_common_hw_fini,
802         .suspend = soc15_common_suspend,
803         .resume = soc15_common_resume,
804         .is_idle = soc15_common_is_idle,
805         .wait_for_idle = soc15_common_wait_for_idle,
806         .soft_reset = soc15_common_soft_reset,
807         .set_clockgating_state = soc15_common_set_clockgating_state,
808         .set_powergating_state = soc15_common_set_powergating_state,
809 };