2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
30 #include "radeon_asic.h"
31 #include "radeon_drm.h"
35 #include "cayman_blit_shaders.h"
37 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
38 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
39 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
40 extern void evergreen_mc_program(struct radeon_device *rdev);
41 extern void evergreen_irq_suspend(struct radeon_device *rdev);
42 extern int evergreen_mc_init(struct radeon_device *rdev);
43 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
44 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
45 extern void si_rlc_fini(struct radeon_device *rdev);
46 extern int si_rlc_init(struct radeon_device *rdev);
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define BTC_MC_UCODE_SIZE 6024
53 #define CAYMAN_PFP_UCODE_SIZE 2176
54 #define CAYMAN_PM4_UCODE_SIZE 2176
55 #define CAYMAN_RLC_UCODE_SIZE 1024
56 #define CAYMAN_MC_UCODE_SIZE 6037
58 #define ARUBA_RLC_UCODE_SIZE 1536
61 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
62 MODULE_FIRMWARE("radeon/BARTS_me.bin");
63 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
64 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
65 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
66 MODULE_FIRMWARE("radeon/TURKS_me.bin");
67 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
68 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
69 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
70 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
71 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
72 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
73 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
74 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
75 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
76 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
77 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
79 #define BTC_IO_MC_REGS_SIZE 29
81 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
82 {0x00000077, 0xff010100},
83 {0x00000078, 0x00000000},
84 {0x00000079, 0x00001434},
85 {0x0000007a, 0xcc08ec08},
86 {0x0000007b, 0x00040000},
87 {0x0000007c, 0x000080c0},
88 {0x0000007d, 0x09000000},
89 {0x0000007e, 0x00210404},
90 {0x00000081, 0x08a8e800},
91 {0x00000082, 0x00030444},
92 {0x00000083, 0x00000000},
93 {0x00000085, 0x00000001},
94 {0x00000086, 0x00000002},
95 {0x00000087, 0x48490000},
96 {0x00000088, 0x20244647},
97 {0x00000089, 0x00000005},
98 {0x0000008b, 0x66030000},
99 {0x0000008c, 0x00006603},
100 {0x0000008d, 0x00000100},
101 {0x0000008f, 0x00001c0a},
102 {0x00000090, 0xff000001},
103 {0x00000094, 0x00101101},
104 {0x00000095, 0x00000fff},
105 {0x00000096, 0x00116fff},
106 {0x00000097, 0x60010000},
107 {0x00000098, 0x10010000},
108 {0x00000099, 0x00006000},
109 {0x0000009a, 0x00001000},
110 {0x0000009f, 0x00946a00}
113 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
114 {0x00000077, 0xff010100},
115 {0x00000078, 0x00000000},
116 {0x00000079, 0x00001434},
117 {0x0000007a, 0xcc08ec08},
118 {0x0000007b, 0x00040000},
119 {0x0000007c, 0x000080c0},
120 {0x0000007d, 0x09000000},
121 {0x0000007e, 0x00210404},
122 {0x00000081, 0x08a8e800},
123 {0x00000082, 0x00030444},
124 {0x00000083, 0x00000000},
125 {0x00000085, 0x00000001},
126 {0x00000086, 0x00000002},
127 {0x00000087, 0x48490000},
128 {0x00000088, 0x20244647},
129 {0x00000089, 0x00000005},
130 {0x0000008b, 0x66030000},
131 {0x0000008c, 0x00006603},
132 {0x0000008d, 0x00000100},
133 {0x0000008f, 0x00001c0a},
134 {0x00000090, 0xff000001},
135 {0x00000094, 0x00101101},
136 {0x00000095, 0x00000fff},
137 {0x00000096, 0x00116fff},
138 {0x00000097, 0x60010000},
139 {0x00000098, 0x10010000},
140 {0x00000099, 0x00006000},
141 {0x0000009a, 0x00001000},
142 {0x0000009f, 0x00936a00}
145 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
146 {0x00000077, 0xff010100},
147 {0x00000078, 0x00000000},
148 {0x00000079, 0x00001434},
149 {0x0000007a, 0xcc08ec08},
150 {0x0000007b, 0x00040000},
151 {0x0000007c, 0x000080c0},
152 {0x0000007d, 0x09000000},
153 {0x0000007e, 0x00210404},
154 {0x00000081, 0x08a8e800},
155 {0x00000082, 0x00030444},
156 {0x00000083, 0x00000000},
157 {0x00000085, 0x00000001},
158 {0x00000086, 0x00000002},
159 {0x00000087, 0x48490000},
160 {0x00000088, 0x20244647},
161 {0x00000089, 0x00000005},
162 {0x0000008b, 0x66030000},
163 {0x0000008c, 0x00006603},
164 {0x0000008d, 0x00000100},
165 {0x0000008f, 0x00001c0a},
166 {0x00000090, 0xff000001},
167 {0x00000094, 0x00101101},
168 {0x00000095, 0x00000fff},
169 {0x00000096, 0x00116fff},
170 {0x00000097, 0x60010000},
171 {0x00000098, 0x10010000},
172 {0x00000099, 0x00006000},
173 {0x0000009a, 0x00001000},
174 {0x0000009f, 0x00916a00}
177 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
178 {0x00000077, 0xff010100},
179 {0x00000078, 0x00000000},
180 {0x00000079, 0x00001434},
181 {0x0000007a, 0xcc08ec08},
182 {0x0000007b, 0x00040000},
183 {0x0000007c, 0x000080c0},
184 {0x0000007d, 0x09000000},
185 {0x0000007e, 0x00210404},
186 {0x00000081, 0x08a8e800},
187 {0x00000082, 0x00030444},
188 {0x00000083, 0x00000000},
189 {0x00000085, 0x00000001},
190 {0x00000086, 0x00000002},
191 {0x00000087, 0x48490000},
192 {0x00000088, 0x20244647},
193 {0x00000089, 0x00000005},
194 {0x0000008b, 0x66030000},
195 {0x0000008c, 0x00006603},
196 {0x0000008d, 0x00000100},
197 {0x0000008f, 0x00001c0a},
198 {0x00000090, 0xff000001},
199 {0x00000094, 0x00101101},
200 {0x00000095, 0x00000fff},
201 {0x00000096, 0x00116fff},
202 {0x00000097, 0x60010000},
203 {0x00000098, 0x10010000},
204 {0x00000099, 0x00006000},
205 {0x0000009a, 0x00001000},
206 {0x0000009f, 0x00976b00}
209 int ni_mc_load_microcode(struct radeon_device *rdev)
211 const __be32 *fw_data;
212 u32 mem_type, running, blackout = 0;
214 int i, ucode_size, regs_size;
219 switch (rdev->family) {
221 io_mc_regs = (u32 *)&barts_io_mc_regs;
222 ucode_size = BTC_MC_UCODE_SIZE;
223 regs_size = BTC_IO_MC_REGS_SIZE;
226 io_mc_regs = (u32 *)&turks_io_mc_regs;
227 ucode_size = BTC_MC_UCODE_SIZE;
228 regs_size = BTC_IO_MC_REGS_SIZE;
232 io_mc_regs = (u32 *)&caicos_io_mc_regs;
233 ucode_size = BTC_MC_UCODE_SIZE;
234 regs_size = BTC_IO_MC_REGS_SIZE;
237 io_mc_regs = (u32 *)&cayman_io_mc_regs;
238 ucode_size = CAYMAN_MC_UCODE_SIZE;
239 regs_size = BTC_IO_MC_REGS_SIZE;
243 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
244 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
246 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
248 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
249 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
252 /* reset the engine and set to writable */
253 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
254 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
256 /* load mc io regs */
257 for (i = 0; i < regs_size; i++) {
258 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
259 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
261 /* load the MC ucode */
262 fw_data = (const __be32 *)rdev->mc_fw->data;
263 for (i = 0; i < ucode_size; i++)
264 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
266 /* put the engine back into the active state */
267 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
268 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
269 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
271 /* wait for training to complete */
272 for (i = 0; i < rdev->usec_timeout; i++) {
273 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
279 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
285 int ni_init_microcode(struct radeon_device *rdev)
287 struct platform_device *pdev;
288 const char *chip_name;
289 const char *rlc_chip_name;
290 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
296 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
299 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
303 switch (rdev->family) {
306 rlc_chip_name = "BTC";
307 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
308 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
309 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
310 mc_req_size = BTC_MC_UCODE_SIZE * 4;
314 rlc_chip_name = "BTC";
315 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
316 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
317 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
318 mc_req_size = BTC_MC_UCODE_SIZE * 4;
321 chip_name = "CAICOS";
322 rlc_chip_name = "BTC";
323 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
324 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
325 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
326 mc_req_size = BTC_MC_UCODE_SIZE * 4;
329 chip_name = "CAYMAN";
330 rlc_chip_name = "CAYMAN";
331 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
332 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
333 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
334 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
338 rlc_chip_name = "ARUBA";
339 /* pfp/me same size as CAYMAN */
340 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
341 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
342 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
348 DRM_INFO("Loading %s Microcode\n", chip_name);
350 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
351 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
354 if (rdev->pfp_fw->size != pfp_req_size) {
356 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
357 rdev->pfp_fw->size, fw_name);
362 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
363 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
366 if (rdev->me_fw->size != me_req_size) {
368 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
369 rdev->me_fw->size, fw_name);
373 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
374 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
377 if (rdev->rlc_fw->size != rlc_req_size) {
379 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
380 rdev->rlc_fw->size, fw_name);
384 /* no MC ucode on TN */
385 if (!(rdev->flags & RADEON_IS_IGP)) {
386 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
387 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
390 if (rdev->mc_fw->size != mc_req_size) {
392 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
393 rdev->mc_fw->size, fw_name);
398 platform_device_unregister(pdev);
403 "ni_cp: Failed to load firmware \"%s\"\n",
405 release_firmware(rdev->pfp_fw);
407 release_firmware(rdev->me_fw);
409 release_firmware(rdev->rlc_fw);
411 release_firmware(rdev->mc_fw);
420 static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
422 u32 num_backends_per_asic,
423 u32 *backend_disable_mask_per_asic,
424 u32 num_shader_engines)
427 u32 enabled_backends_mask = 0;
428 u32 enabled_backends_count = 0;
429 u32 num_backends_per_se;
431 u32 swizzle_pipe[CAYMAN_MAX_PIPES];
434 bool force_no_swizzle;
436 /* force legal values */
437 if (num_tile_pipes < 1)
439 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
440 num_tile_pipes = rdev->config.cayman.max_tile_pipes;
441 if (num_shader_engines < 1)
442 num_shader_engines = 1;
443 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
444 num_shader_engines = rdev->config.cayman.max_shader_engines;
445 if (num_backends_per_asic < num_shader_engines)
446 num_backends_per_asic = num_shader_engines;
447 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
448 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
450 /* make sure we have the same number of backends per se */
451 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
452 /* set up the number of backends per se */
453 num_backends_per_se = num_backends_per_asic / num_shader_engines;
454 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
455 num_backends_per_se = rdev->config.cayman.max_backends_per_se;
456 num_backends_per_asic = num_backends_per_se * num_shader_engines;
459 /* create enable mask and count for enabled backends */
460 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
461 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
462 enabled_backends_mask |= (1 << i);
463 ++enabled_backends_count;
465 if (enabled_backends_count == num_backends_per_asic)
469 /* force the backends mask to match the current number of backends */
470 if (enabled_backends_count != num_backends_per_asic) {
471 u32 this_backend_enabled;
475 enabled_backends_mask = 0;
476 enabled_backends_count = 0;
477 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
478 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
479 /* calc the current se */
480 shader_engine = i / rdev->config.cayman.max_backends_per_se;
481 /* calc the backend per se */
482 backend_per_se = i % rdev->config.cayman.max_backends_per_se;
483 /* default to not enabled */
484 this_backend_enabled = 0;
485 if ((shader_engine < num_shader_engines) &&
486 (backend_per_se < num_backends_per_se))
487 this_backend_enabled = 1;
488 if (this_backend_enabled) {
489 enabled_backends_mask |= (1 << i);
490 *backend_disable_mask_per_asic &= ~(1 << i);
491 ++enabled_backends_count;
497 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
498 switch (rdev->family) {
501 force_no_swizzle = true;
504 force_no_swizzle = false;
507 if (force_no_swizzle) {
508 bool last_backend_enabled = false;
510 force_no_swizzle = false;
511 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
512 if (((enabled_backends_mask >> i) & 1) == 1) {
513 if (last_backend_enabled)
514 force_no_swizzle = true;
515 last_backend_enabled = true;
517 last_backend_enabled = false;
521 switch (num_tile_pipes) {
526 DRM_ERROR("odd number of pipes!\n");
533 if (force_no_swizzle) {
546 if (force_no_swizzle) {
563 if (force_no_swizzle) {
585 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
586 while (((1 << cur_backend) & enabled_backends_mask) == 0)
587 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
589 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
591 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
597 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
598 u32 disable_mask_per_se,
599 u32 max_disable_mask_per_se,
600 u32 num_shader_engines)
602 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
603 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
605 if (num_shader_engines == 1)
606 return disable_mask_per_asic;
607 else if (num_shader_engines == 2)
608 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
613 static void cayman_gpu_init(struct radeon_device *rdev)
615 u32 cc_rb_backend_disable = 0;
616 u32 cc_gc_shader_pipe_config;
617 u32 gb_addr_config = 0;
618 u32 mc_shared_chmap, mc_arb_ramcfg;
620 u32 cgts_tcc_disable;
623 u32 gc_user_shader_pipe_config;
624 u32 gc_user_rb_backend_disable;
625 u32 cgts_user_tcc_disable;
626 u32 cgts_sm_ctrl_reg;
627 u32 hdp_host_path_cntl;
631 switch (rdev->family) {
633 rdev->config.cayman.max_shader_engines = 2;
634 rdev->config.cayman.max_pipes_per_simd = 4;
635 rdev->config.cayman.max_tile_pipes = 8;
636 rdev->config.cayman.max_simds_per_se = 12;
637 rdev->config.cayman.max_backends_per_se = 4;
638 rdev->config.cayman.max_texture_channel_caches = 8;
639 rdev->config.cayman.max_gprs = 256;
640 rdev->config.cayman.max_threads = 256;
641 rdev->config.cayman.max_gs_threads = 32;
642 rdev->config.cayman.max_stack_entries = 512;
643 rdev->config.cayman.sx_num_of_sets = 8;
644 rdev->config.cayman.sx_max_export_size = 256;
645 rdev->config.cayman.sx_max_export_pos_size = 64;
646 rdev->config.cayman.sx_max_export_smx_size = 192;
647 rdev->config.cayman.max_hw_contexts = 8;
648 rdev->config.cayman.sq_num_cf_insts = 2;
650 rdev->config.cayman.sc_prim_fifo_size = 0x100;
651 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
652 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
656 rdev->config.cayman.max_shader_engines = 1;
657 rdev->config.cayman.max_pipes_per_simd = 4;
658 rdev->config.cayman.max_tile_pipes = 2;
659 if ((rdev->pdev->device == 0x9900) ||
660 (rdev->pdev->device == 0x9901) ||
661 (rdev->pdev->device == 0x9905) ||
662 (rdev->pdev->device == 0x9906) ||
663 (rdev->pdev->device == 0x9907) ||
664 (rdev->pdev->device == 0x9908) ||
665 (rdev->pdev->device == 0x9909) ||
666 (rdev->pdev->device == 0x990B) ||
667 (rdev->pdev->device == 0x990C) ||
668 (rdev->pdev->device == 0x990F) ||
669 (rdev->pdev->device == 0x9910) ||
670 (rdev->pdev->device == 0x9917) ||
671 (rdev->pdev->device == 0x9999)) {
672 rdev->config.cayman.max_simds_per_se = 6;
673 rdev->config.cayman.max_backends_per_se = 2;
674 } else if ((rdev->pdev->device == 0x9903) ||
675 (rdev->pdev->device == 0x9904) ||
676 (rdev->pdev->device == 0x990A) ||
677 (rdev->pdev->device == 0x990D) ||
678 (rdev->pdev->device == 0x990E) ||
679 (rdev->pdev->device == 0x9913) ||
680 (rdev->pdev->device == 0x9918)) {
681 rdev->config.cayman.max_simds_per_se = 4;
682 rdev->config.cayman.max_backends_per_se = 2;
683 } else if ((rdev->pdev->device == 0x9919) ||
684 (rdev->pdev->device == 0x9990) ||
685 (rdev->pdev->device == 0x9991) ||
686 (rdev->pdev->device == 0x9994) ||
687 (rdev->pdev->device == 0x9995) ||
688 (rdev->pdev->device == 0x9996) ||
689 (rdev->pdev->device == 0x999A) ||
690 (rdev->pdev->device == 0x99A0)) {
691 rdev->config.cayman.max_simds_per_se = 3;
692 rdev->config.cayman.max_backends_per_se = 1;
694 rdev->config.cayman.max_simds_per_se = 2;
695 rdev->config.cayman.max_backends_per_se = 1;
697 rdev->config.cayman.max_texture_channel_caches = 2;
698 rdev->config.cayman.max_gprs = 256;
699 rdev->config.cayman.max_threads = 256;
700 rdev->config.cayman.max_gs_threads = 32;
701 rdev->config.cayman.max_stack_entries = 512;
702 rdev->config.cayman.sx_num_of_sets = 8;
703 rdev->config.cayman.sx_max_export_size = 256;
704 rdev->config.cayman.sx_max_export_pos_size = 64;
705 rdev->config.cayman.sx_max_export_smx_size = 192;
706 rdev->config.cayman.max_hw_contexts = 8;
707 rdev->config.cayman.sq_num_cf_insts = 2;
709 rdev->config.cayman.sc_prim_fifo_size = 0x40;
710 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
711 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
716 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
717 WREG32((0x2c14 + j), 0x00000000);
718 WREG32((0x2c18 + j), 0x00000000);
719 WREG32((0x2c1c + j), 0x00000000);
720 WREG32((0x2c20 + j), 0x00000000);
721 WREG32((0x2c24 + j), 0x00000000);
724 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
726 evergreen_fix_pci_max_read_req_size(rdev);
728 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
729 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
731 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
732 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
733 cgts_tcc_disable = 0xffff0000;
734 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
735 cgts_tcc_disable &= ~(1 << (16 + i));
736 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
737 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
738 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
740 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
741 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
742 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
743 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
744 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
745 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
746 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
747 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
748 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
749 rdev->config.cayman.backend_disable_mask_per_asic =
750 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
751 rdev->config.cayman.num_shader_engines);
752 rdev->config.cayman.backend_map =
753 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
754 rdev->config.cayman.num_backends_per_se *
755 rdev->config.cayman.num_shader_engines,
756 &rdev->config.cayman.backend_disable_mask_per_asic,
757 rdev->config.cayman.num_shader_engines);
758 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
759 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
760 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
761 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
762 if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
763 rdev->config.cayman.mem_max_burst_length_bytes = 512;
764 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
765 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
766 if (rdev->config.cayman.mem_row_size_in_kb > 4)
767 rdev->config.cayman.mem_row_size_in_kb = 4;
768 /* XXX use MC settings? */
769 rdev->config.cayman.shader_engine_tile_size = 32;
770 rdev->config.cayman.num_gpus = 1;
771 rdev->config.cayman.multi_gpu_tile_size = 64;
773 //gb_addr_config = 0x02011003
775 gb_addr_config = RREG32(GB_ADDR_CONFIG);
778 switch (rdev->config.cayman.num_tile_pipes) {
781 gb_addr_config |= NUM_PIPES(0);
784 gb_addr_config |= NUM_PIPES(1);
787 gb_addr_config |= NUM_PIPES(2);
790 gb_addr_config |= NUM_PIPES(3);
794 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
795 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
796 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
797 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
798 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
799 switch (rdev->config.cayman.num_gpus) {
802 gb_addr_config |= NUM_GPUS(0);
805 gb_addr_config |= NUM_GPUS(1);
808 gb_addr_config |= NUM_GPUS(2);
811 switch (rdev->config.cayman.multi_gpu_tile_size) {
813 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
817 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
820 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
823 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
826 switch (rdev->config.cayman.mem_row_size_in_kb) {
829 gb_addr_config |= ROW_SIZE(0);
832 gb_addr_config |= ROW_SIZE(1);
835 gb_addr_config |= ROW_SIZE(2);
840 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
841 rdev->config.cayman.num_tile_pipes = (1 << tmp);
842 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
843 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
844 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
845 rdev->config.cayman.num_shader_engines = tmp + 1;
846 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
847 rdev->config.cayman.num_gpus = tmp + 1;
848 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
849 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
850 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
851 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
853 //gb_backend_map = 0x76541032;
855 gb_backend_map = RREG32(GB_BACKEND_MAP);
858 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
859 rdev->config.cayman.num_backends_per_se *
860 rdev->config.cayman.num_shader_engines,
861 &rdev->config.cayman.backend_disable_mask_per_asic,
862 rdev->config.cayman.num_shader_engines);
864 /* setup tiling info dword. gb_addr_config is not adequate since it does
865 * not have bank info, so create a custom tiling dword.
868 * bits 11:8 group_size
869 * bits 15:12 row_size
871 rdev->config.cayman.tile_config = 0;
872 switch (rdev->config.cayman.num_tile_pipes) {
875 rdev->config.cayman.tile_config |= (0 << 0);
878 rdev->config.cayman.tile_config |= (1 << 0);
881 rdev->config.cayman.tile_config |= (2 << 0);
884 rdev->config.cayman.tile_config |= (3 << 0);
888 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
889 if (rdev->flags & RADEON_IS_IGP)
890 rdev->config.cayman.tile_config |= 1 << 4;
892 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
893 case 0: /* four banks */
894 rdev->config.cayman.tile_config |= 0 << 4;
896 case 1: /* eight banks */
897 rdev->config.cayman.tile_config |= 1 << 4;
899 case 2: /* sixteen banks */
901 rdev->config.cayman.tile_config |= 2 << 4;
905 rdev->config.cayman.tile_config |=
906 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
907 rdev->config.cayman.tile_config |=
908 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
910 rdev->config.cayman.backend_map = gb_backend_map;
911 WREG32(GB_BACKEND_MAP, gb_backend_map);
912 WREG32(GB_ADDR_CONFIG, gb_addr_config);
913 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
914 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
916 /* primary versions */
917 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
918 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
919 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
921 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
922 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
925 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
926 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
927 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
929 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
930 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
932 /* reprogram the shader complex */
933 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
934 for (i = 0; i < 16; i++)
935 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
936 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
938 /* set HW defaults for 3D engine */
939 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
941 sx_debug_1 = RREG32(SX_DEBUG_1);
942 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
943 WREG32(SX_DEBUG_1, sx_debug_1);
945 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
946 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
947 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
948 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
950 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
952 /* need to be explicitly zero-ed */
953 WREG32(VGT_OFFCHIP_LDS_BASE, 0);
954 WREG32(SQ_LSTMP_RING_BASE, 0);
955 WREG32(SQ_HSTMP_RING_BASE, 0);
956 WREG32(SQ_ESTMP_RING_BASE, 0);
957 WREG32(SQ_GSTMP_RING_BASE, 0);
958 WREG32(SQ_VSTMP_RING_BASE, 0);
959 WREG32(SQ_PSTMP_RING_BASE, 0);
961 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
963 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
964 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
965 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
967 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
968 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
969 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
972 WREG32(VGT_NUM_INSTANCES, 1);
974 WREG32(CP_PERFMON_CNTL, 0);
976 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
977 FETCH_FIFO_HIWATER(0x4) |
978 DONE_FIFO_HIWATER(0xe0) |
979 ALU_UPDATE_FIFO_HIWATER(0x8)));
981 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
982 WREG32(SQ_CONFIG, (VC_ENABLE |
987 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
989 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
990 FORCE_EOV_MAX_REZ_CNT(255)));
992 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
993 AUTO_INVLD_EN(ES_AND_GS_AUTO));
995 WREG32(VGT_GS_VERTEX_REUSE, 16);
996 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
998 WREG32(CB_PERF_CTR0_SEL_0, 0);
999 WREG32(CB_PERF_CTR0_SEL_1, 0);
1000 WREG32(CB_PERF_CTR1_SEL_0, 0);
1001 WREG32(CB_PERF_CTR1_SEL_1, 0);
1002 WREG32(CB_PERF_CTR2_SEL_0, 0);
1003 WREG32(CB_PERF_CTR2_SEL_1, 0);
1004 WREG32(CB_PERF_CTR3_SEL_0, 0);
1005 WREG32(CB_PERF_CTR3_SEL_1, 0);
1007 tmp = RREG32(HDP_MISC_CNTL);
1008 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1009 WREG32(HDP_MISC_CNTL, tmp);
1011 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1012 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1014 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1022 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1024 /* flush hdp cache */
1025 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1027 /* bits 0-7 are the VM contexts0-7 */
1028 WREG32(VM_INVALIDATE_REQUEST, 1);
1031 int cayman_pcie_gart_enable(struct radeon_device *rdev)
1035 if (rdev->gart.robj == NULL) {
1036 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1039 r = radeon_gart_table_vram_pin(rdev);
1042 radeon_gart_restore(rdev);
1043 /* Setup TLB control */
1044 WREG32(MC_VM_MX_L1_TLB_CNTL,
1047 ENABLE_L1_FRAGMENT_PROCESSING |
1048 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1049 ENABLE_ADVANCED_DRIVER_MODEL |
1050 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1051 /* Setup L2 cache */
1052 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1053 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1054 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1055 EFFECTIVE_L2_QUEUE_SIZE(7) |
1056 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1057 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1058 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1059 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1060 /* setup context0 */
1061 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1062 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1063 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1064 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1065 (u32)(rdev->dummy_page.addr >> 12));
1066 WREG32(VM_CONTEXT0_CNTL2, 0);
1067 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1068 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1074 /* empty context1-7 */
1075 for (i = 1; i < 8; i++) {
1076 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1077 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
1078 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1079 rdev->gart.table_addr >> 12);
1082 /* enable context1-7 */
1083 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1084 (u32)(rdev->dummy_page.addr >> 12));
1085 WREG32(VM_CONTEXT1_CNTL2, 0);
1086 WREG32(VM_CONTEXT1_CNTL, 0);
1087 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1088 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1090 cayman_pcie_gart_tlb_flush(rdev);
1091 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1092 (unsigned)(rdev->mc.gtt_size >> 20),
1093 (unsigned long long)rdev->gart.table_addr);
1094 rdev->gart.ready = true;
1098 void cayman_pcie_gart_disable(struct radeon_device *rdev)
1100 /* Disable all tables */
1101 WREG32(VM_CONTEXT0_CNTL, 0);
1102 WREG32(VM_CONTEXT1_CNTL, 0);
1103 /* Setup TLB control */
1104 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1105 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1106 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1107 /* Setup L2 cache */
1108 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1109 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1110 EFFECTIVE_L2_QUEUE_SIZE(7) |
1111 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1112 WREG32(VM_L2_CNTL2, 0);
1113 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1114 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1115 radeon_gart_table_vram_unpin(rdev);
1118 void cayman_pcie_gart_fini(struct radeon_device *rdev)
1120 cayman_pcie_gart_disable(rdev);
1121 radeon_gart_table_vram_free(rdev);
1122 radeon_gart_fini(rdev);
1125 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1126 int ring, u32 cp_int_cntl)
1128 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1130 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1131 WREG32(CP_INT_CNTL, cp_int_cntl);
1137 void cayman_fence_ring_emit(struct radeon_device *rdev,
1138 struct radeon_fence *fence)
1140 struct radeon_ring *ring = &rdev->ring[fence->ring];
1141 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1143 /* flush read cache over gart for this vmid */
1144 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1145 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1146 radeon_ring_write(ring, 0);
1147 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1148 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1149 radeon_ring_write(ring, 0xFFFFFFFF);
1150 radeon_ring_write(ring, 0);
1151 radeon_ring_write(ring, 10); /* poll interval */
1152 /* EVENT_WRITE_EOP - flush caches, send int */
1153 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1154 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1155 radeon_ring_write(ring, addr & 0xffffffff);
1156 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1157 radeon_ring_write(ring, fence->seq);
1158 radeon_ring_write(ring, 0);
1161 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1163 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
1165 /* set to DX10/11 mode */
1166 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1167 radeon_ring_write(ring, 1);
1168 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1169 radeon_ring_write(ring,
1173 (ib->gpu_addr & 0xFFFFFFFC));
1174 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1175 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
1177 /* flush read cache over gart for this vmid */
1178 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1179 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1180 radeon_ring_write(ring, ib->vm_id);
1181 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1182 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
1183 radeon_ring_write(ring, 0xFFFFFFFF);
1184 radeon_ring_write(ring, 0);
1185 radeon_ring_write(ring, 10); /* poll interval */
1188 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1191 WREG32(CP_ME_CNTL, 0);
1193 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1194 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1195 WREG32(SCRATCH_UMSK, 0);
1199 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1201 const __be32 *fw_data;
1204 if (!rdev->me_fw || !rdev->pfp_fw)
1207 cayman_cp_enable(rdev, false);
1209 fw_data = (const __be32 *)rdev->pfp_fw->data;
1210 WREG32(CP_PFP_UCODE_ADDR, 0);
1211 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1212 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1213 WREG32(CP_PFP_UCODE_ADDR, 0);
1215 fw_data = (const __be32 *)rdev->me_fw->data;
1216 WREG32(CP_ME_RAM_WADDR, 0);
1217 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1218 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1220 WREG32(CP_PFP_UCODE_ADDR, 0);
1221 WREG32(CP_ME_RAM_WADDR, 0);
1222 WREG32(CP_ME_RAM_RADDR, 0);
1226 static int cayman_cp_start(struct radeon_device *rdev)
1228 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1231 r = radeon_ring_lock(rdev, ring, 7);
1233 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1236 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1237 radeon_ring_write(ring, 0x1);
1238 radeon_ring_write(ring, 0x0);
1239 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1240 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1241 radeon_ring_write(ring, 0);
1242 radeon_ring_write(ring, 0);
1243 radeon_ring_unlock_commit(rdev, ring);
1245 cayman_cp_enable(rdev, true);
1247 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1249 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1253 /* setup clear context state */
1254 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1255 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1257 for (i = 0; i < cayman_default_size; i++)
1258 radeon_ring_write(ring, cayman_default_state[i]);
1260 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1261 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1263 /* set clear context state */
1264 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1265 radeon_ring_write(ring, 0);
1267 /* SQ_VTX_BASE_VTX_LOC */
1268 radeon_ring_write(ring, 0xc0026f00);
1269 radeon_ring_write(ring, 0x00000000);
1270 radeon_ring_write(ring, 0x00000000);
1271 radeon_ring_write(ring, 0x00000000);
1274 radeon_ring_write(ring, 0xc0036f00);
1275 radeon_ring_write(ring, 0x00000bc4);
1276 radeon_ring_write(ring, 0xffffffff);
1277 radeon_ring_write(ring, 0xffffffff);
1278 radeon_ring_write(ring, 0xffffffff);
1280 radeon_ring_write(ring, 0xc0026900);
1281 radeon_ring_write(ring, 0x00000316);
1282 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1283 radeon_ring_write(ring, 0x00000010); /* */
1285 radeon_ring_unlock_commit(rdev, ring);
1287 /* XXX init other rings */
1292 static void cayman_cp_fini(struct radeon_device *rdev)
1294 cayman_cp_enable(rdev, false);
1295 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1298 int cayman_cp_resume(struct radeon_device *rdev)
1300 struct radeon_ring *ring;
1305 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1306 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1312 RREG32(GRBM_SOFT_RESET);
1314 WREG32(GRBM_SOFT_RESET, 0);
1315 RREG32(GRBM_SOFT_RESET);
1317 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1318 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1320 /* Set the write pointer delay */
1321 WREG32(CP_RB_WPTR_DELAY, 0);
1323 WREG32(CP_DEBUG, (1 << 27));
1325 /* ring 0 - compute and gfx */
1326 /* Set ring buffer size */
1327 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1328 rb_bufsz = drm_order(ring->ring_size / 8);
1329 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1331 tmp |= BUF_SWAP_32BIT;
1333 WREG32(CP_RB0_CNTL, tmp);
1335 /* Initialize the ring buffer's read and write pointers */
1336 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1338 WREG32(CP_RB0_WPTR, ring->wptr);
1340 /* set the wb address wether it's enabled or not */
1341 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1342 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1343 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1345 if (rdev->wb.enabled)
1346 WREG32(SCRATCH_UMSK, 0xff);
1348 tmp |= RB_NO_UPDATE;
1349 WREG32(SCRATCH_UMSK, 0);
1353 WREG32(CP_RB0_CNTL, tmp);
1355 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
1357 ring->rptr = RREG32(CP_RB0_RPTR);
1359 /* ring1 - compute only */
1360 /* Set ring buffer size */
1361 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
1362 rb_bufsz = drm_order(ring->ring_size / 8);
1363 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1365 tmp |= BUF_SWAP_32BIT;
1367 WREG32(CP_RB1_CNTL, tmp);
1369 /* Initialize the ring buffer's read and write pointers */
1370 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1372 WREG32(CP_RB1_WPTR, ring->wptr);
1374 /* set the wb address wether it's enabled or not */
1375 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
1376 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
1379 WREG32(CP_RB1_CNTL, tmp);
1381 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
1383 ring->rptr = RREG32(CP_RB1_RPTR);
1385 /* ring2 - compute only */
1386 /* Set ring buffer size */
1387 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
1388 rb_bufsz = drm_order(ring->ring_size / 8);
1389 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1391 tmp |= BUF_SWAP_32BIT;
1393 WREG32(CP_RB2_CNTL, tmp);
1395 /* Initialize the ring buffer's read and write pointers */
1396 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1398 WREG32(CP_RB2_WPTR, ring->wptr);
1400 /* set the wb address wether it's enabled or not */
1401 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
1402 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
1405 WREG32(CP_RB2_CNTL, tmp);
1407 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
1409 ring->rptr = RREG32(CP_RB2_RPTR);
1411 /* start the rings */
1412 cayman_cp_start(rdev);
1413 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1414 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1415 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1416 /* this only test cp0 */
1417 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1419 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1420 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1421 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1428 bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1432 u32 grbm_status_se0, grbm_status_se1;
1433 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
1436 srbm_status = RREG32(SRBM_STATUS);
1437 grbm_status = RREG32(GRBM_STATUS);
1438 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1439 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1440 if (!(grbm_status & GUI_ACTIVE)) {
1441 r100_gpu_lockup_update(lockup, ring);
1444 /* force CP activities */
1445 r = radeon_ring_lock(rdev, ring, 2);
1448 radeon_ring_write(ring, 0x80000000);
1449 radeon_ring_write(ring, 0x80000000);
1450 radeon_ring_unlock_commit(rdev, ring);
1452 /* XXX deal with CP0,1,2 */
1453 ring->rptr = RREG32(ring->rptr_reg);
1454 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1457 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1459 struct evergreen_mc_save save;
1462 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1465 dev_info(rdev->dev, "GPU softreset \n");
1466 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1467 RREG32(GRBM_STATUS));
1468 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1469 RREG32(GRBM_STATUS_SE0));
1470 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1471 RREG32(GRBM_STATUS_SE1));
1472 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1473 RREG32(SRBM_STATUS));
1474 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1476 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1478 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1480 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1483 evergreen_mc_stop(rdev, &save);
1484 if (evergreen_mc_wait_for_idle(rdev)) {
1485 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1487 /* Disable CP parsing/prefetching */
1488 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1490 /* reset all the gfx blocks */
1491 grbm_reset = (SOFT_RESET_CP |
1505 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1506 WREG32(GRBM_SOFT_RESET, grbm_reset);
1507 (void)RREG32(GRBM_SOFT_RESET);
1509 WREG32(GRBM_SOFT_RESET, 0);
1510 (void)RREG32(GRBM_SOFT_RESET);
1511 /* Wait a little for things to settle down */
1514 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1515 RREG32(GRBM_STATUS));
1516 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1517 RREG32(GRBM_STATUS_SE0));
1518 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
1519 RREG32(GRBM_STATUS_SE1));
1520 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
1521 RREG32(SRBM_STATUS));
1522 evergreen_mc_resume(rdev, &save);
1526 int cayman_asic_reset(struct radeon_device *rdev)
1528 return cayman_gpu_soft_reset(rdev);
1531 static int cayman_startup(struct radeon_device *rdev)
1533 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1536 /* enable pcie gen2 link */
1537 evergreen_pcie_gen2_enable(rdev);
1539 if (rdev->flags & RADEON_IS_IGP) {
1540 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1541 r = ni_init_microcode(rdev);
1543 DRM_ERROR("Failed to load firmware!\n");
1548 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1549 r = ni_init_microcode(rdev);
1551 DRM_ERROR("Failed to load firmware!\n");
1556 r = ni_mc_load_microcode(rdev);
1558 DRM_ERROR("Failed to load MC firmware!\n");
1563 r = r600_vram_scratch_init(rdev);
1567 evergreen_mc_program(rdev);
1568 r = cayman_pcie_gart_enable(rdev);
1571 cayman_gpu_init(rdev);
1573 r = evergreen_blit_init(rdev);
1575 r600_blit_fini(rdev);
1576 rdev->asic->copy.copy = NULL;
1577 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1580 /* allocate rlc buffers */
1581 if (rdev->flags & RADEON_IS_IGP) {
1582 r = si_rlc_init(rdev);
1584 DRM_ERROR("Failed to init rlc BOs!\n");
1589 /* allocate wb buffer */
1590 r = radeon_wb_init(rdev);
1594 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1596 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1600 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1602 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1606 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1608 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1613 r = r600_irq_init(rdev);
1615 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1616 radeon_irq_kms_fini(rdev);
1619 evergreen_irq_set(rdev);
1621 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1622 CP_RB0_RPTR, CP_RB0_WPTR,
1623 0, 0xfffff, RADEON_CP_PACKET2);
1626 r = cayman_cp_load_microcode(rdev);
1629 r = cayman_cp_resume(rdev);
1633 r = radeon_ib_pool_start(rdev);
1637 r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1639 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1640 rdev->accel_working = false;
1644 r = radeon_vm_manager_start(rdev);
1651 int cayman_resume(struct radeon_device *rdev)
1655 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1656 * posting will perform necessary task to bring back GPU into good
1660 atom_asic_init(rdev->mode_info.atom_context);
1662 rdev->accel_working = true;
1663 r = cayman_startup(rdev);
1665 DRM_ERROR("cayman startup failed on resume\n");
1666 rdev->accel_working = false;
1672 int cayman_suspend(struct radeon_device *rdev)
1674 /* FIXME: we should wait for ring to be empty */
1675 radeon_ib_pool_suspend(rdev);
1676 radeon_vm_manager_suspend(rdev);
1677 r600_blit_suspend(rdev);
1678 cayman_cp_enable(rdev, false);
1679 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1680 evergreen_irq_suspend(rdev);
1681 radeon_wb_disable(rdev);
1682 cayman_pcie_gart_disable(rdev);
1686 /* Plan is to move initialization in that function and use
1687 * helper function so that radeon_device_init pretty much
1688 * do nothing more than calling asic specific function. This
1689 * should also allow to remove a bunch of callback function
1692 int cayman_init(struct radeon_device *rdev)
1694 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1697 /* This don't do much */
1698 r = radeon_gem_init(rdev);
1702 if (!radeon_get_bios(rdev)) {
1703 if (ASIC_IS_AVIVO(rdev))
1706 /* Must be an ATOMBIOS */
1707 if (!rdev->is_atom_bios) {
1708 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
1711 r = radeon_atombios_init(rdev);
1715 /* Post card if necessary */
1716 if (!radeon_card_posted(rdev)) {
1718 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1721 DRM_INFO("GPU not posted. posting now...\n");
1722 atom_asic_init(rdev->mode_info.atom_context);
1724 /* Initialize scratch registers */
1725 r600_scratch_init(rdev);
1726 /* Initialize surface registers */
1727 radeon_surface_init(rdev);
1728 /* Initialize clocks */
1729 radeon_get_clock_info(rdev->ddev);
1731 r = radeon_fence_driver_init(rdev);
1734 /* initialize memory controller */
1735 r = evergreen_mc_init(rdev);
1738 /* Memory manager */
1739 r = radeon_bo_init(rdev);
1743 r = radeon_irq_kms_init(rdev);
1747 ring->ring_obj = NULL;
1748 r600_ring_init(rdev, ring, 1024 * 1024);
1750 rdev->ih.ring_obj = NULL;
1751 r600_ih_ring_init(rdev, 64 * 1024);
1753 r = r600_pcie_gart_init(rdev);
1757 r = radeon_ib_pool_init(rdev);
1758 rdev->accel_working = true;
1760 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1761 rdev->accel_working = false;
1763 r = radeon_vm_manager_init(rdev);
1765 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
1768 r = cayman_startup(rdev);
1770 dev_err(rdev->dev, "disabling GPU acceleration\n");
1771 cayman_cp_fini(rdev);
1772 r600_irq_fini(rdev);
1773 if (rdev->flags & RADEON_IS_IGP)
1775 radeon_wb_fini(rdev);
1777 radeon_vm_manager_fini(rdev);
1778 radeon_irq_kms_fini(rdev);
1779 cayman_pcie_gart_fini(rdev);
1780 rdev->accel_working = false;
1783 /* Don't start up if the MC ucode is missing.
1784 * The default clocks and voltages before the MC ucode
1785 * is loaded are not suffient for advanced operations.
1787 * We can skip this check for TN, because there is no MC
1790 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
1791 DRM_ERROR("radeon: MC ucode required for NI+.\n");
1798 void cayman_fini(struct radeon_device *rdev)
1800 r600_blit_fini(rdev);
1801 cayman_cp_fini(rdev);
1802 r600_irq_fini(rdev);
1803 if (rdev->flags & RADEON_IS_IGP)
1805 radeon_wb_fini(rdev);
1806 radeon_vm_manager_fini(rdev);
1808 radeon_irq_kms_fini(rdev);
1809 cayman_pcie_gart_fini(rdev);
1810 r600_vram_scratch_fini(rdev);
1811 radeon_gem_fini(rdev);
1812 radeon_semaphore_driver_fini(rdev);
1813 radeon_fence_driver_fini(rdev);
1814 radeon_bo_fini(rdev);
1815 radeon_atombios_fini(rdev);
1823 int cayman_vm_init(struct radeon_device *rdev)
1826 rdev->vm_manager.nvm = 8;
1827 /* base offset of vram pages */
1828 if (rdev->flags & RADEON_IS_IGP) {
1829 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
1831 rdev->vm_manager.vram_base_offset = tmp;
1833 rdev->vm_manager.vram_base_offset = 0;
1837 void cayman_vm_fini(struct radeon_device *rdev)
1841 int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
1843 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
1844 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
1845 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
1846 /* flush hdp cache */
1847 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1848 /* bits 0-7 are the VM contexts0-7 */
1849 WREG32(VM_INVALIDATE_REQUEST, 1 << id);
1853 void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
1855 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
1856 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
1857 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
1858 /* flush hdp cache */
1859 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1860 /* bits 0-7 are the VM contexts0-7 */
1861 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1864 void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
1869 /* flush hdp cache */
1870 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1871 /* bits 0-7 are the VM contexts0-7 */
1872 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1875 #define R600_PTE_VALID (1 << 0)
1876 #define R600_PTE_SYSTEM (1 << 1)
1877 #define R600_PTE_SNOOPED (1 << 2)
1878 #define R600_PTE_READABLE (1 << 5)
1879 #define R600_PTE_WRITEABLE (1 << 6)
1881 uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
1882 struct radeon_vm *vm,
1885 uint32_t r600_flags = 0;
1887 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
1888 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1889 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1890 if (flags & RADEON_VM_PAGE_SYSTEM) {
1891 r600_flags |= R600_PTE_SYSTEM;
1892 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
1897 void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
1898 unsigned pfn, uint64_t addr, uint32_t flags)
1900 void __iomem *ptr = (void *)vm->pt;
1902 addr = addr & 0xFFFFFFFFFFFFF000ULL;
1904 writeq(addr, ptr + (pfn * 8));