]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/r420.c
scsi: qedi: Remove WARN_ON from clear task context.
[karo-tx-linux.git] / drivers / gpu / drm / radeon / r420.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <drm/drmP.h>
31 #include "radeon_reg.h"
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "atom.h"
35 #include "r100d.h"
36 #include "r420d.h"
37 #include "r420_reg_safe.h"
38
39 void r420_pm_init_profile(struct radeon_device *rdev)
40 {
41         /* default */
42         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
43         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
44         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
45         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
46         /* low sh */
47         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
48         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
49         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
50         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
51         /* mid sh */
52         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
53         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
54         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
55         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
56         /* high sh */
57         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
58         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
59         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
60         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
61         /* low mh */
62         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
63         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
64         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
65         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
66         /* mid mh */
67         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
68         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
69         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
70         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
71         /* high mh */
72         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
73         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
74         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
75         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
76 }
77
78 static void r420_set_reg_safe(struct radeon_device *rdev)
79 {
80         rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
81         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
82 }
83
84 void r420_pipes_init(struct radeon_device *rdev)
85 {
86         unsigned tmp;
87         unsigned gb_pipe_select;
88         unsigned num_pipes;
89
90         /* GA_ENHANCE workaround TCL deadlock issue */
91         WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
92                (1 << 2) | (1 << 3));
93         /* add idle wait as per freedesktop.org bug 24041 */
94         if (r100_gui_wait_for_idle(rdev)) {
95                 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
96         }
97         /* get max number of pipes */
98         gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
99         num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
100
101         /* SE chips have 1 pipe */
102         if ((rdev->pdev->device == 0x5e4c) ||
103             (rdev->pdev->device == 0x5e4f))
104                 num_pipes = 1;
105
106         rdev->num_gb_pipes = num_pipes;
107         tmp = 0;
108         switch (num_pipes) {
109         default:
110                 /* force to 1 pipe */
111                 num_pipes = 1;
112         case 1:
113                 tmp = (0 << 1);
114                 break;
115         case 2:
116                 tmp = (3 << 1);
117                 break;
118         case 3:
119                 tmp = (6 << 1);
120                 break;
121         case 4:
122                 tmp = (7 << 1);
123                 break;
124         }
125         WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
126         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
127         tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
128         WREG32(R300_GB_TILE_CONFIG, tmp);
129         if (r100_gui_wait_for_idle(rdev)) {
130                 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
131         }
132
133         tmp = RREG32(R300_DST_PIPE_CONFIG);
134         WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
135
136         WREG32(R300_RB2D_DSTCACHE_MODE,
137                RREG32(R300_RB2D_DSTCACHE_MODE) |
138                R300_DC_AUTOFLUSH_ENABLE |
139                R300_DC_DC_DISABLE_IGNORE_PE);
140
141         if (r100_gui_wait_for_idle(rdev)) {
142                 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
143         }
144
145         if (rdev->family == CHIP_RV530) {
146                 tmp = RREG32(RV530_GB_PIPE_SELECT2);
147                 if ((tmp & 3) == 3)
148                         rdev->num_z_pipes = 2;
149                 else
150                         rdev->num_z_pipes = 1;
151         } else
152                 rdev->num_z_pipes = 1;
153
154         DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
155                  rdev->num_gb_pipes, rdev->num_z_pipes);
156 }
157
158 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
159 {
160         unsigned long flags;
161         u32 r;
162
163         spin_lock_irqsave(&rdev->mc_idx_lock, flags);
164         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
165         r = RREG32(R_0001FC_MC_IND_DATA);
166         spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
167         return r;
168 }
169
170 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
171 {
172         unsigned long flags;
173
174         spin_lock_irqsave(&rdev->mc_idx_lock, flags);
175         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
176                 S_0001F8_MC_IND_WR_EN(1));
177         WREG32(R_0001FC_MC_IND_DATA, v);
178         spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
179 }
180
181 static void r420_debugfs(struct radeon_device *rdev)
182 {
183         if (r100_debugfs_rbbm_init(rdev)) {
184                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
185         }
186         if (r420_debugfs_pipes_info_init(rdev)) {
187                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
188         }
189 }
190
191 static void r420_clock_resume(struct radeon_device *rdev)
192 {
193         u32 sclk_cntl;
194
195         if (radeon_dynclks != -1 && radeon_dynclks)
196                 radeon_atom_set_clock_gating(rdev, 1);
197         sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
198         sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
199         if (rdev->family == CHIP_R420)
200                 sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
201         WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
202 }
203
204 static void r420_cp_errata_init(struct radeon_device *rdev)
205 {
206         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
207
208         /* RV410 and R420 can lock up if CP DMA to host memory happens
209          * while the 2D engine is busy.
210          *
211          * The proper workaround is to queue a RESYNC at the beginning
212          * of the CP init, apparently.
213          */
214         radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
215         radeon_ring_lock(rdev, ring, 8);
216         radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
217         radeon_ring_write(ring, rdev->config.r300.resync_scratch);
218         radeon_ring_write(ring, 0xDEADBEEF);
219         radeon_ring_unlock_commit(rdev, ring, false);
220 }
221
222 static void r420_cp_errata_fini(struct radeon_device *rdev)
223 {
224         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
225
226         /* Catch the RESYNC we dispatched all the way back,
227          * at the very beginning of the CP init.
228          */
229         radeon_ring_lock(rdev, ring, 8);
230         radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
231         radeon_ring_write(ring, R300_RB3D_DC_FINISH);
232         radeon_ring_unlock_commit(rdev, ring, false);
233         radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
234 }
235
236 static int r420_startup(struct radeon_device *rdev)
237 {
238         int r;
239
240         /* set common regs */
241         r100_set_common_regs(rdev);
242         /* program mc */
243         r300_mc_program(rdev);
244         /* Resume clock */
245         r420_clock_resume(rdev);
246         /* Initialize GART (initialize after TTM so we can allocate
247          * memory through TTM but finalize after TTM) */
248         if (rdev->flags & RADEON_IS_PCIE) {
249                 r = rv370_pcie_gart_enable(rdev);
250                 if (r)
251                         return r;
252         }
253         if (rdev->flags & RADEON_IS_PCI) {
254                 r = r100_pci_gart_enable(rdev);
255                 if (r)
256                         return r;
257         }
258         r420_pipes_init(rdev);
259
260         /* allocate wb buffer */
261         r = radeon_wb_init(rdev);
262         if (r)
263                 return r;
264
265         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
266         if (r) {
267                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
268                 return r;
269         }
270
271         /* Enable IRQ */
272         if (!rdev->irq.installed) {
273                 r = radeon_irq_kms_init(rdev);
274                 if (r)
275                         return r;
276         }
277
278         r100_irq_set(rdev);
279         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
280         /* 1M ring buffer */
281         r = r100_cp_init(rdev, 1024 * 1024);
282         if (r) {
283                 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
284                 return r;
285         }
286         r420_cp_errata_init(rdev);
287
288         r = radeon_ib_pool_init(rdev);
289         if (r) {
290                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
291                 return r;
292         }
293
294         return 0;
295 }
296
297 int r420_resume(struct radeon_device *rdev)
298 {
299         int r;
300
301         /* Make sur GART are not working */
302         if (rdev->flags & RADEON_IS_PCIE)
303                 rv370_pcie_gart_disable(rdev);
304         if (rdev->flags & RADEON_IS_PCI)
305                 r100_pci_gart_disable(rdev);
306         /* Resume clock before doing reset */
307         r420_clock_resume(rdev);
308         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
309         if (radeon_asic_reset(rdev)) {
310                 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
311                         RREG32(R_000E40_RBBM_STATUS),
312                         RREG32(R_0007C0_CP_STAT));
313         }
314         /* check if cards are posted or not */
315         if (rdev->is_atom_bios) {
316                 atom_asic_init(rdev->mode_info.atom_context);
317         } else {
318                 radeon_combios_asic_init(rdev->ddev);
319         }
320         /* Resume clock after posting */
321         r420_clock_resume(rdev);
322         /* Initialize surface registers */
323         radeon_surface_init(rdev);
324
325         rdev->accel_working = true;
326         r = r420_startup(rdev);
327         if (r) {
328                 rdev->accel_working = false;
329         }
330         return r;
331 }
332
333 int r420_suspend(struct radeon_device *rdev)
334 {
335         radeon_pm_suspend(rdev);
336         r420_cp_errata_fini(rdev);
337         r100_cp_disable(rdev);
338         radeon_wb_disable(rdev);
339         r100_irq_disable(rdev);
340         if (rdev->flags & RADEON_IS_PCIE)
341                 rv370_pcie_gart_disable(rdev);
342         if (rdev->flags & RADEON_IS_PCI)
343                 r100_pci_gart_disable(rdev);
344         return 0;
345 }
346
347 void r420_fini(struct radeon_device *rdev)
348 {
349         radeon_pm_fini(rdev);
350         r100_cp_fini(rdev);
351         radeon_wb_fini(rdev);
352         radeon_ib_pool_fini(rdev);
353         radeon_gem_fini(rdev);
354         if (rdev->flags & RADEON_IS_PCIE)
355                 rv370_pcie_gart_fini(rdev);
356         if (rdev->flags & RADEON_IS_PCI)
357                 r100_pci_gart_fini(rdev);
358         radeon_agp_fini(rdev);
359         radeon_irq_kms_fini(rdev);
360         radeon_fence_driver_fini(rdev);
361         radeon_bo_fini(rdev);
362         if (rdev->is_atom_bios) {
363                 radeon_atombios_fini(rdev);
364         } else {
365                 radeon_combios_fini(rdev);
366         }
367         kfree(rdev->bios);
368         rdev->bios = NULL;
369 }
370
371 int r420_init(struct radeon_device *rdev)
372 {
373         int r;
374
375         /* Initialize scratch registers */
376         radeon_scratch_init(rdev);
377         /* Initialize surface registers */
378         radeon_surface_init(rdev);
379         /* TODO: disable VGA need to use VGA request */
380         /* restore some register to sane defaults */
381         r100_restore_sanity(rdev);
382         /* BIOS*/
383         if (!radeon_get_bios(rdev)) {
384                 if (ASIC_IS_AVIVO(rdev))
385                         return -EINVAL;
386         }
387         if (rdev->is_atom_bios) {
388                 r = radeon_atombios_init(rdev);
389                 if (r) {
390                         return r;
391                 }
392         } else {
393                 r = radeon_combios_init(rdev);
394                 if (r) {
395                         return r;
396                 }
397         }
398         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
399         if (radeon_asic_reset(rdev)) {
400                 dev_warn(rdev->dev,
401                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
402                         RREG32(R_000E40_RBBM_STATUS),
403                         RREG32(R_0007C0_CP_STAT));
404         }
405         /* check if cards are posted or not */
406         if (radeon_boot_test_post_card(rdev) == false)
407                 return -EINVAL;
408
409         /* Initialize clocks */
410         radeon_get_clock_info(rdev->ddev);
411         /* initialize AGP */
412         if (rdev->flags & RADEON_IS_AGP) {
413                 r = radeon_agp_init(rdev);
414                 if (r) {
415                         radeon_agp_disable(rdev);
416                 }
417         }
418         /* initialize memory controller */
419         r300_mc_init(rdev);
420         r420_debugfs(rdev);
421         /* Fence driver */
422         r = radeon_fence_driver_init(rdev);
423         if (r) {
424                 return r;
425         }
426         /* Memory manager */
427         r = radeon_bo_init(rdev);
428         if (r) {
429                 return r;
430         }
431         if (rdev->family == CHIP_R420)
432                 r100_enable_bm(rdev);
433
434         if (rdev->flags & RADEON_IS_PCIE) {
435                 r = rv370_pcie_gart_init(rdev);
436                 if (r)
437                         return r;
438         }
439         if (rdev->flags & RADEON_IS_PCI) {
440                 r = r100_pci_gart_init(rdev);
441                 if (r)
442                         return r;
443         }
444         r420_set_reg_safe(rdev);
445
446         /* Initialize power management */
447         radeon_pm_init(rdev);
448
449         rdev->accel_working = true;
450         r = r420_startup(rdev);
451         if (r) {
452                 /* Somethings want wront with the accel init stop accel */
453                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
454                 r100_cp_fini(rdev);
455                 radeon_wb_fini(rdev);
456                 radeon_ib_pool_fini(rdev);
457                 radeon_irq_kms_fini(rdev);
458                 if (rdev->flags & RADEON_IS_PCIE)
459                         rv370_pcie_gart_fini(rdev);
460                 if (rdev->flags & RADEON_IS_PCI)
461                         r100_pci_gart_fini(rdev);
462                 radeon_agp_fini(rdev);
463                 rdev->accel_working = false;
464         }
465         return 0;
466 }
467
468 /*
469  * Debugfs info
470  */
471 #if defined(CONFIG_DEBUG_FS)
472 static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
473 {
474         struct drm_info_node *node = (struct drm_info_node *) m->private;
475         struct drm_device *dev = node->minor->dev;
476         struct radeon_device *rdev = dev->dev_private;
477         uint32_t tmp;
478
479         tmp = RREG32(R400_GB_PIPE_SELECT);
480         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
481         tmp = RREG32(R300_GB_TILE_CONFIG);
482         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
483         tmp = RREG32(R300_DST_PIPE_CONFIG);
484         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
485         return 0;
486 }
487
488 static struct drm_info_list r420_pipes_info_list[] = {
489         {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
490 };
491 #endif
492
493 int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
494 {
495 #if defined(CONFIG_DEBUG_FS)
496         return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
497 #else
498         return 0;
499 #endif
500 }