]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/radeon/r420.c
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[mv-sheeva.git] / drivers / gpu / drm / radeon / r420.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "radeon_reg.h"
31 #include "radeon.h"
32 #include "atom.h"
33 #include "r100d.h"
34 #include "r420d.h"
35 #include "r420_reg_safe.h"
36
37 static void r420_set_reg_safe(struct radeon_device *rdev)
38 {
39         rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
40         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41 }
42
43 int r420_mc_init(struct radeon_device *rdev)
44 {
45         int r;
46
47         /* Setup GPU memory space */
48         rdev->mc.vram_location = 0xFFFFFFFFUL;
49         rdev->mc.gtt_location = 0xFFFFFFFFUL;
50         if (rdev->flags & RADEON_IS_AGP) {
51                 r = radeon_agp_init(rdev);
52                 if (r) {
53                         radeon_agp_disable(rdev);
54                 } else {
55                         rdev->mc.gtt_location = rdev->mc.agp_base;
56                 }
57         }
58         r = radeon_mc_setup(rdev);
59         if (r) {
60                 return r;
61         }
62         return 0;
63 }
64
65 void r420_pipes_init(struct radeon_device *rdev)
66 {
67         unsigned tmp;
68         unsigned gb_pipe_select;
69         unsigned num_pipes;
70
71         /* GA_ENHANCE workaround TCL deadlock issue */
72         WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
73         /* add idle wait as per freedesktop.org bug 24041 */
74         if (r100_gui_wait_for_idle(rdev)) {
75                 printk(KERN_WARNING "Failed to wait GUI idle while "
76                        "programming pipes. Bad things might happen.\n");
77         }
78         /* get max number of pipes */
79         gb_pipe_select = RREG32(0x402C);
80         num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
81         rdev->num_gb_pipes = num_pipes;
82         tmp = 0;
83         switch (num_pipes) {
84         default:
85                 /* force to 1 pipe */
86                 num_pipes = 1;
87         case 1:
88                 tmp = (0 << 1);
89                 break;
90         case 2:
91                 tmp = (3 << 1);
92                 break;
93         case 3:
94                 tmp = (6 << 1);
95                 break;
96         case 4:
97                 tmp = (7 << 1);
98                 break;
99         }
100         WREG32(0x42C8, (1 << num_pipes) - 1);
101         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
102         tmp |= (1 << 4) | (1 << 0);
103         WREG32(0x4018, tmp);
104         if (r100_gui_wait_for_idle(rdev)) {
105                 printk(KERN_WARNING "Failed to wait GUI idle while "
106                        "programming pipes. Bad things might happen.\n");
107         }
108
109         tmp = RREG32(0x170C);
110         WREG32(0x170C, tmp | (1 << 31));
111
112         WREG32(R300_RB2D_DSTCACHE_MODE,
113                RREG32(R300_RB2D_DSTCACHE_MODE) |
114                R300_DC_AUTOFLUSH_ENABLE |
115                R300_DC_DC_DISABLE_IGNORE_PE);
116
117         if (r100_gui_wait_for_idle(rdev)) {
118                 printk(KERN_WARNING "Failed to wait GUI idle while "
119                        "programming pipes. Bad things might happen.\n");
120         }
121
122         if (rdev->family == CHIP_RV530) {
123                 tmp = RREG32(RV530_GB_PIPE_SELECT2);
124                 if ((tmp & 3) == 3)
125                         rdev->num_z_pipes = 2;
126                 else
127                         rdev->num_z_pipes = 1;
128         } else
129                 rdev->num_z_pipes = 1;
130
131         DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
132                  rdev->num_gb_pipes, rdev->num_z_pipes);
133 }
134
135 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
136 {
137         u32 r;
138
139         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
140         r = RREG32(R_0001FC_MC_IND_DATA);
141         return r;
142 }
143
144 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
145 {
146         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
147                 S_0001F8_MC_IND_WR_EN(1));
148         WREG32(R_0001FC_MC_IND_DATA, v);
149 }
150
151 static void r420_debugfs(struct radeon_device *rdev)
152 {
153         if (r100_debugfs_rbbm_init(rdev)) {
154                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
155         }
156         if (r420_debugfs_pipes_info_init(rdev)) {
157                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
158         }
159 }
160
161 static void r420_clock_resume(struct radeon_device *rdev)
162 {
163         u32 sclk_cntl;
164
165         if (radeon_dynclks != -1 && radeon_dynclks)
166                 radeon_atom_set_clock_gating(rdev, 1);
167         sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
168         sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
169         if (rdev->family == CHIP_R420)
170                 sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
171         WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
172 }
173
174 static void r420_cp_errata_init(struct radeon_device *rdev)
175 {
176         /* RV410 and R420 can lock up if CP DMA to host memory happens
177          * while the 2D engine is busy.
178          *
179          * The proper workaround is to queue a RESYNC at the beginning
180          * of the CP init, apparently.
181          */
182         radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
183         radeon_ring_lock(rdev, 8);
184         radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
185         radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
186         radeon_ring_write(rdev, 0xDEADBEEF);
187         radeon_ring_unlock_commit(rdev);
188 }
189
190 static void r420_cp_errata_fini(struct radeon_device *rdev)
191 {
192         /* Catch the RESYNC we dispatched all the way back,
193          * at the very beginning of the CP init.
194          */
195         radeon_ring_lock(rdev, 8);
196         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197         radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
198         radeon_ring_unlock_commit(rdev);
199         radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
200 }
201
202 static int r420_startup(struct radeon_device *rdev)
203 {
204         int r;
205
206         /* set common regs */
207         r100_set_common_regs(rdev);
208         /* program mc */
209         r300_mc_program(rdev);
210         /* Resume clock */
211         r420_clock_resume(rdev);
212         /* Initialize GART (initialize after TTM so we can allocate
213          * memory through TTM but finalize after TTM) */
214         if (rdev->flags & RADEON_IS_PCIE) {
215                 r = rv370_pcie_gart_enable(rdev);
216                 if (r)
217                         return r;
218         }
219         if (rdev->flags & RADEON_IS_PCI) {
220                 r = r100_pci_gart_enable(rdev);
221                 if (r)
222                         return r;
223         }
224         r420_pipes_init(rdev);
225         /* Enable IRQ */
226         r100_irq_set(rdev);
227         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
228         /* 1M ring buffer */
229         r = r100_cp_init(rdev, 1024 * 1024);
230         if (r) {
231                 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
232                 return r;
233         }
234         r420_cp_errata_init(rdev);
235         r = r100_wb_init(rdev);
236         if (r) {
237                 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
238         }
239         r = r100_ib_init(rdev);
240         if (r) {
241                 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
242                 return r;
243         }
244         return 0;
245 }
246
247 int r420_resume(struct radeon_device *rdev)
248 {
249         /* Make sur GART are not working */
250         if (rdev->flags & RADEON_IS_PCIE)
251                 rv370_pcie_gart_disable(rdev);
252         if (rdev->flags & RADEON_IS_PCI)
253                 r100_pci_gart_disable(rdev);
254         /* Resume clock before doing reset */
255         r420_clock_resume(rdev);
256         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
257         if (radeon_gpu_reset(rdev)) {
258                 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
259                         RREG32(R_000E40_RBBM_STATUS),
260                         RREG32(R_0007C0_CP_STAT));
261         }
262         /* check if cards are posted or not */
263         if (rdev->is_atom_bios) {
264                 atom_asic_init(rdev->mode_info.atom_context);
265         } else {
266                 radeon_combios_asic_init(rdev->ddev);
267         }
268         /* Resume clock after posting */
269         r420_clock_resume(rdev);
270         /* Initialize surface registers */
271         radeon_surface_init(rdev);
272         return r420_startup(rdev);
273 }
274
275 int r420_suspend(struct radeon_device *rdev)
276 {
277         r420_cp_errata_fini(rdev);
278         r100_cp_disable(rdev);
279         r100_wb_disable(rdev);
280         r100_irq_disable(rdev);
281         if (rdev->flags & RADEON_IS_PCIE)
282                 rv370_pcie_gart_disable(rdev);
283         if (rdev->flags & RADEON_IS_PCI)
284                 r100_pci_gart_disable(rdev);
285         return 0;
286 }
287
288 void r420_fini(struct radeon_device *rdev)
289 {
290         r100_cp_fini(rdev);
291         r100_wb_fini(rdev);
292         r100_ib_fini(rdev);
293         radeon_gem_fini(rdev);
294         if (rdev->flags & RADEON_IS_PCIE)
295                 rv370_pcie_gart_fini(rdev);
296         if (rdev->flags & RADEON_IS_PCI)
297                 r100_pci_gart_fini(rdev);
298         radeon_agp_fini(rdev);
299         radeon_irq_kms_fini(rdev);
300         radeon_fence_driver_fini(rdev);
301         radeon_bo_fini(rdev);
302         if (rdev->is_atom_bios) {
303                 radeon_atombios_fini(rdev);
304         } else {
305                 radeon_combios_fini(rdev);
306         }
307         kfree(rdev->bios);
308         rdev->bios = NULL;
309 }
310
311 int r420_init(struct radeon_device *rdev)
312 {
313         int r;
314
315         /* Initialize scratch registers */
316         radeon_scratch_init(rdev);
317         /* Initialize surface registers */
318         radeon_surface_init(rdev);
319         /* TODO: disable VGA need to use VGA request */
320         /* BIOS*/
321         if (!radeon_get_bios(rdev)) {
322                 if (ASIC_IS_AVIVO(rdev))
323                         return -EINVAL;
324         }
325         if (rdev->is_atom_bios) {
326                 r = radeon_atombios_init(rdev);
327                 if (r) {
328                         return r;
329                 }
330         } else {
331                 r = radeon_combios_init(rdev);
332                 if (r) {
333                         return r;
334                 }
335         }
336         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
337         if (radeon_gpu_reset(rdev)) {
338                 dev_warn(rdev->dev,
339                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
340                         RREG32(R_000E40_RBBM_STATUS),
341                         RREG32(R_0007C0_CP_STAT));
342         }
343         /* check if cards are posted or not */
344         if (radeon_boot_test_post_card(rdev) == false)
345                 return -EINVAL;
346
347         /* Initialize clocks */
348         radeon_get_clock_info(rdev->ddev);
349         /* Initialize power management */
350         radeon_pm_init(rdev);
351         /* Get vram informations */
352         r300_vram_info(rdev);
353         /* Initialize memory controller (also test AGP) */
354         r = r420_mc_init(rdev);
355         if (r) {
356                 return r;
357         }
358         r420_debugfs(rdev);
359         /* Fence driver */
360         r = radeon_fence_driver_init(rdev);
361         if (r) {
362                 return r;
363         }
364         r = radeon_irq_kms_init(rdev);
365         if (r) {
366                 return r;
367         }
368         /* Memory manager */
369         r = radeon_bo_init(rdev);
370         if (r) {
371                 return r;
372         }
373         if (rdev->family == CHIP_R420)
374                 r100_enable_bm(rdev);
375
376         if (rdev->flags & RADEON_IS_PCIE) {
377                 r = rv370_pcie_gart_init(rdev);
378                 if (r)
379                         return r;
380         }
381         if (rdev->flags & RADEON_IS_PCI) {
382                 r = r100_pci_gart_init(rdev);
383                 if (r)
384                         return r;
385         }
386         r420_set_reg_safe(rdev);
387         rdev->accel_working = true;
388         r = r420_startup(rdev);
389         if (r) {
390                 /* Somethings want wront with the accel init stop accel */
391                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
392                 r100_cp_fini(rdev);
393                 r100_wb_fini(rdev);
394                 r100_ib_fini(rdev);
395                 radeon_irq_kms_fini(rdev);
396                 if (rdev->flags & RADEON_IS_PCIE)
397                         rv370_pcie_gart_fini(rdev);
398                 if (rdev->flags & RADEON_IS_PCI)
399                         r100_pci_gart_fini(rdev);
400                 radeon_agp_fini(rdev);
401                 rdev->accel_working = false;
402         }
403         return 0;
404 }
405
406 /*
407  * Debugfs info
408  */
409 #if defined(CONFIG_DEBUG_FS)
410 static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
411 {
412         struct drm_info_node *node = (struct drm_info_node *) m->private;
413         struct drm_device *dev = node->minor->dev;
414         struct radeon_device *rdev = dev->dev_private;
415         uint32_t tmp;
416
417         tmp = RREG32(R400_GB_PIPE_SELECT);
418         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
419         tmp = RREG32(R300_GB_TILE_CONFIG);
420         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
421         tmp = RREG32(R300_DST_PIPE_CONFIG);
422         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
423         return 0;
424 }
425
426 static struct drm_info_list r420_pipes_info_list[] = {
427         {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
428 };
429 #endif
430
431 int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
432 {
433 #if defined(CONFIG_DEBUG_FS)
434         return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
435 #else
436         return 0;
437 #endif
438 }