]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/radeon/rv515.c
sound: ymfpci: increase timer resolution to 96 kHz
[mv-sheeva.git] / drivers / gpu / drm / radeon / rv515.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "rv515r.h"
31 #include "radeon.h"
32 #include "radeon_share.h"
33
34 /* rv515 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_gui_wait_for_idle(struct radeon_device *rdev);
39 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40 int rv370_pcie_gart_enable(struct radeon_device *rdev);
41 void rv370_pcie_gart_disable(struct radeon_device *rdev);
42 void r420_pipes_init(struct radeon_device *rdev);
43 void rs600_mc_disable_clients(struct radeon_device *rdev);
44 void rs600_disable_vga(struct radeon_device *rdev);
45
46 /* This files gather functions specifics to:
47  * rv515
48  *
49  * Some of these functions might be used by newer ASICs.
50  */
51 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
52 int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
53 void rv515_gpu_init(struct radeon_device *rdev);
54 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
55
56
57 /*
58  * MC
59  */
60 int rv515_mc_init(struct radeon_device *rdev)
61 {
62         uint32_t tmp;
63         int r;
64
65         if (r100_debugfs_rbbm_init(rdev)) {
66                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
67         }
68         if (rv515_debugfs_pipes_info_init(rdev)) {
69                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
70         }
71         if (rv515_debugfs_ga_info_init(rdev)) {
72                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
73         }
74
75         rv515_gpu_init(rdev);
76         rv370_pcie_gart_disable(rdev);
77
78         /* Setup GPU memory space */
79         rdev->mc.vram_location = 0xFFFFFFFFUL;
80         rdev->mc.gtt_location = 0xFFFFFFFFUL;
81         if (rdev->flags & RADEON_IS_AGP) {
82                 r = radeon_agp_init(rdev);
83                 if (r) {
84                         printk(KERN_WARNING "[drm] Disabling AGP\n");
85                         rdev->flags &= ~RADEON_IS_AGP;
86                         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
87                 } else {
88                         rdev->mc.gtt_location = rdev->mc.agp_base;
89                 }
90         }
91         r = radeon_mc_setup(rdev);
92         if (r) {
93                 return r;
94         }
95
96         /* Program GPU memory space */
97         rs600_mc_disable_clients(rdev);
98         if (rv515_mc_wait_for_idle(rdev)) {
99                 printk(KERN_WARNING "Failed to wait MC idle while "
100                        "programming pipes. Bad things might happen.\n");
101         }
102         /* Write VRAM size in case we are limiting it */
103         WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
104         tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
105         WREG32(0x134, tmp);
106         tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
107         tmp = REG_SET(MC_FB_TOP, tmp >> 16);
108         tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
109         WREG32_MC(MC_FB_LOCATION, tmp);
110         WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
111         WREG32(0x310, rdev->mc.vram_location);
112         if (rdev->flags & RADEON_IS_AGP) {
113                 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
114                 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
115                 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
116                 WREG32_MC(MC_AGP_LOCATION, tmp);
117                 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
118                 WREG32_MC(MC_AGP_BASE_2, 0);
119         } else {
120                 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
121                 WREG32_MC(MC_AGP_BASE, 0);
122                 WREG32_MC(MC_AGP_BASE_2, 0);
123         }
124         return 0;
125 }
126
127 void rv515_mc_fini(struct radeon_device *rdev)
128 {
129         rv370_pcie_gart_disable(rdev);
130         radeon_gart_table_vram_free(rdev);
131         radeon_gart_fini(rdev);
132 }
133
134
135 /*
136  * Global GPU functions
137  */
138 void rv515_ring_start(struct radeon_device *rdev)
139 {
140         int r;
141
142         r = radeon_ring_lock(rdev, 64);
143         if (r) {
144                 return;
145         }
146         radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
147         radeon_ring_write(rdev,
148                           ISYNC_ANY2D_IDLE3D |
149                           ISYNC_ANY3D_IDLE2D |
150                           ISYNC_WAIT_IDLEGUI |
151                           ISYNC_CPSCRATCH_IDLEGUI);
152         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
154         radeon_ring_write(rdev, PACKET0(0x170C, 0));
155         radeon_ring_write(rdev, 1 << 31);
156         radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
157         radeon_ring_write(rdev, 0);
158         radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
159         radeon_ring_write(rdev, 0);
160         radeon_ring_write(rdev, PACKET0(0x42C8, 0));
161         radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
162         radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
163         radeon_ring_write(rdev, 0);
164         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
165         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
166         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
167         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
168         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
169         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
170         radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
171         radeon_ring_write(rdev, 0);
172         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
173         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
174         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
175         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
176         radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
177         radeon_ring_write(rdev,
178                           ((6 << MS_X0_SHIFT) |
179                            (6 << MS_Y0_SHIFT) |
180                            (6 << MS_X1_SHIFT) |
181                            (6 << MS_Y1_SHIFT) |
182                            (6 << MS_X2_SHIFT) |
183                            (6 << MS_Y2_SHIFT) |
184                            (6 << MSBD0_Y_SHIFT) |
185                            (6 << MSBD0_X_SHIFT)));
186         radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
187         radeon_ring_write(rdev,
188                           ((6 << MS_X3_SHIFT) |
189                            (6 << MS_Y3_SHIFT) |
190                            (6 << MS_X4_SHIFT) |
191                            (6 << MS_Y4_SHIFT) |
192                            (6 << MS_X5_SHIFT) |
193                            (6 << MS_Y5_SHIFT) |
194                            (6 << MSBD1_SHIFT)));
195         radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196         radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197         radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198         radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199         radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200         radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
201         radeon_ring_write(rdev, PACKET0(0x20C8, 0));
202         radeon_ring_write(rdev, 0);
203         radeon_ring_unlock_commit(rdev);
204 }
205
206 void rv515_errata(struct radeon_device *rdev)
207 {
208         rdev->pll_errata = 0;
209 }
210
211 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
212 {
213         unsigned i;
214         uint32_t tmp;
215
216         for (i = 0; i < rdev->usec_timeout; i++) {
217                 /* read MC_STATUS */
218                 tmp = RREG32_MC(MC_STATUS);
219                 if (tmp & MC_STATUS_IDLE) {
220                         return 0;
221                 }
222                 DRM_UDELAY(1);
223         }
224         return -1;
225 }
226
227 void rv515_gpu_init(struct radeon_device *rdev)
228 {
229         unsigned pipe_select_current, gb_pipe_select, tmp;
230
231         r100_hdp_reset(rdev);
232         r100_rb2d_reset(rdev);
233
234         if (r100_gui_wait_for_idle(rdev)) {
235                 printk(KERN_WARNING "Failed to wait GUI idle while "
236                        "reseting GPU. Bad things might happen.\n");
237         }
238
239         rs600_disable_vga(rdev);
240
241         r420_pipes_init(rdev);
242         gb_pipe_select = RREG32(0x402C);
243         tmp = RREG32(0x170C);
244         pipe_select_current = (tmp >> 2) & 3;
245         tmp = (1 << pipe_select_current) |
246               (((gb_pipe_select >> 8) & 0xF) << 4);
247         WREG32_PLL(0x000D, tmp);
248         if (r100_gui_wait_for_idle(rdev)) {
249                 printk(KERN_WARNING "Failed to wait GUI idle while "
250                        "reseting GPU. Bad things might happen.\n");
251         }
252         if (rv515_mc_wait_for_idle(rdev)) {
253                 printk(KERN_WARNING "Failed to wait MC idle while "
254                        "programming pipes. Bad things might happen.\n");
255         }
256 }
257
258 int rv515_ga_reset(struct radeon_device *rdev)
259 {
260         uint32_t tmp;
261         bool reinit_cp;
262         int i;
263
264         reinit_cp = rdev->cp.ready;
265         rdev->cp.ready = false;
266         for (i = 0; i < rdev->usec_timeout; i++) {
267                 WREG32(CP_CSQ_MODE, 0);
268                 WREG32(CP_CSQ_CNTL, 0);
269                 WREG32(RBBM_SOFT_RESET, 0x32005);
270                 (void)RREG32(RBBM_SOFT_RESET);
271                 udelay(200);
272                 WREG32(RBBM_SOFT_RESET, 0);
273                 /* Wait to prevent race in RBBM_STATUS */
274                 mdelay(1);
275                 tmp = RREG32(RBBM_STATUS);
276                 if (tmp & ((1 << 20) | (1 << 26))) {
277                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
278                         /* GA still busy soft reset it */
279                         WREG32(0x429C, 0x200);
280                         WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
281                         WREG32(0x43E0, 0);
282                         WREG32(0x43E4, 0);
283                         WREG32(0x24AC, 0);
284                 }
285                 /* Wait to prevent race in RBBM_STATUS */
286                 mdelay(1);
287                 tmp = RREG32(RBBM_STATUS);
288                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
289                         break;
290                 }
291         }
292         for (i = 0; i < rdev->usec_timeout; i++) {
293                 tmp = RREG32(RBBM_STATUS);
294                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
295                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
296                                  tmp);
297                         DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
298                         DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
299                         DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
300                         if (reinit_cp) {
301                                 return r100_cp_init(rdev, rdev->cp.ring_size);
302                         }
303                         return 0;
304                 }
305                 DRM_UDELAY(1);
306         }
307         tmp = RREG32(RBBM_STATUS);
308         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
309         return -1;
310 }
311
312 int rv515_gpu_reset(struct radeon_device *rdev)
313 {
314         uint32_t status;
315
316         /* reset order likely matter */
317         status = RREG32(RBBM_STATUS);
318         /* reset HDP */
319         r100_hdp_reset(rdev);
320         /* reset rb2d */
321         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
322                 r100_rb2d_reset(rdev);
323         }
324         /* reset GA */
325         if (status & ((1 << 20) | (1 << 26))) {
326                 rv515_ga_reset(rdev);
327         }
328         /* reset CP */
329         status = RREG32(RBBM_STATUS);
330         if (status & (1 << 16)) {
331                 r100_cp_reset(rdev);
332         }
333         /* Check if GPU is idle */
334         status = RREG32(RBBM_STATUS);
335         if (status & (1 << 31)) {
336                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
337                 return -1;
338         }
339         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
340         return 0;
341 }
342
343
344 /*
345  * VRAM info
346  */
347 static void rv515_vram_get_type(struct radeon_device *rdev)
348 {
349         uint32_t tmp;
350
351         rdev->mc.vram_width = 128;
352         rdev->mc.vram_is_ddr = true;
353         tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
354         switch (tmp) {
355         case 0:
356                 rdev->mc.vram_width = 64;
357                 break;
358         case 1:
359                 rdev->mc.vram_width = 128;
360                 break;
361         default:
362                 rdev->mc.vram_width = 128;
363                 break;
364         }
365 }
366
367 void rv515_vram_info(struct radeon_device *rdev)
368 {
369         fixed20_12 a;
370
371         rv515_vram_get_type(rdev);
372
373         /* FIXME: we should enforce default clock in case GPU is not in
374          * default setup
375          */
376         a.full = rfixed_const(100);
377         rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
378         rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
379 }
380
381
382 /*
383  * Indirect registers accessor
384  */
385 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
386 {
387         uint32_t r;
388
389         WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
390         r = RREG32(MC_IND_DATA);
391         WREG32(MC_IND_INDEX, 0);
392         return r;
393 }
394
395 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
396 {
397         WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
398         WREG32(MC_IND_DATA, (v));
399         WREG32(MC_IND_INDEX, 0);
400 }
401
402 uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
403 {
404         uint32_t r;
405
406         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
407         (void)RREG32(PCIE_INDEX);
408         r = RREG32(PCIE_DATA);
409         return r;
410 }
411
412 void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
413 {
414         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
415         (void)RREG32(PCIE_INDEX);
416         WREG32(PCIE_DATA, (v));
417         (void)RREG32(PCIE_DATA);
418 }
419
420
421 /*
422  * Debugfs info
423  */
424 #if defined(CONFIG_DEBUG_FS)
425 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
426 {
427         struct drm_info_node *node = (struct drm_info_node *) m->private;
428         struct drm_device *dev = node->minor->dev;
429         struct radeon_device *rdev = dev->dev_private;
430         uint32_t tmp;
431
432         tmp = RREG32(GB_PIPE_SELECT);
433         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
434         tmp = RREG32(SU_REG_DEST);
435         seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
436         tmp = RREG32(GB_TILE_CONFIG);
437         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
438         tmp = RREG32(DST_PIPE_CONFIG);
439         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
440         return 0;
441 }
442
443 static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
444 {
445         struct drm_info_node *node = (struct drm_info_node *) m->private;
446         struct drm_device *dev = node->minor->dev;
447         struct radeon_device *rdev = dev->dev_private;
448         uint32_t tmp;
449
450         tmp = RREG32(0x2140);
451         seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
452         radeon_gpu_reset(rdev);
453         tmp = RREG32(0x425C);
454         seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
455         return 0;
456 }
457
458 static struct drm_info_list rv515_pipes_info_list[] = {
459         {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
460 };
461
462 static struct drm_info_list rv515_ga_info_list[] = {
463         {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
464 };
465 #endif
466
467 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
468 {
469 #if defined(CONFIG_DEBUG_FS)
470         return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
471 #else
472         return 0;
473 #endif
474 }
475
476 int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
477 {
478 #if defined(CONFIG_DEBUG_FS)
479         return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
480 #else
481         return 0;
482 #endif
483 }
484
485
486 /*
487  * Asic initialization
488  */
489 static const unsigned r500_reg_safe_bm[219] = {
490         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
491         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
492         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
493         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
494         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
495         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
496         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
497         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
498         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
499         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
500         0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
501         0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
502         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
503         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
504         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
505         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
506         0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
507         0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
508         0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
509         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
510         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
511         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
512         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
515         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516         0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522         0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
523         0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
524         0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
525         0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
526         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
527         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
528         0x00000000, 0x00000000, 0x00000000, 0x00000000,
529         0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
530         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
531         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
532         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545 };
546
547 int rv515_init(struct radeon_device *rdev)
548 {
549         rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
550         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
551         return 0;
552 }
553
554 void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
555 {
556
557         WREG32(0x659C, 0x0);
558         WREG32(0x6594, 0x705);
559         WREG32(0x65A4, 0x10001);
560         WREG32(0x65D8, 0x0);
561         WREG32(0x65B0, 0x0);
562         WREG32(0x65C0, 0x0);
563         WREG32(0x65D4, 0x0);
564         WREG32(0x6578, 0x0);
565         WREG32(0x657C, 0x841880A8);
566         WREG32(0x6578, 0x1);
567         WREG32(0x657C, 0x84208680);
568         WREG32(0x6578, 0x2);
569         WREG32(0x657C, 0xBFF880B0);
570         WREG32(0x6578, 0x100);
571         WREG32(0x657C, 0x83D88088);
572         WREG32(0x6578, 0x101);
573         WREG32(0x657C, 0x84608680);
574         WREG32(0x6578, 0x102);
575         WREG32(0x657C, 0xBFF080D0);
576         WREG32(0x6578, 0x200);
577         WREG32(0x657C, 0x83988068);
578         WREG32(0x6578, 0x201);
579         WREG32(0x657C, 0x84A08680);
580         WREG32(0x6578, 0x202);
581         WREG32(0x657C, 0xBFF080F8);
582         WREG32(0x6578, 0x300);
583         WREG32(0x657C, 0x83588058);
584         WREG32(0x6578, 0x301);
585         WREG32(0x657C, 0x84E08660);
586         WREG32(0x6578, 0x302);
587         WREG32(0x657C, 0xBFF88120);
588         WREG32(0x6578, 0x400);
589         WREG32(0x657C, 0x83188040);
590         WREG32(0x6578, 0x401);
591         WREG32(0x657C, 0x85008660);
592         WREG32(0x6578, 0x402);
593         WREG32(0x657C, 0xBFF88150);
594         WREG32(0x6578, 0x500);
595         WREG32(0x657C, 0x82D88030);
596         WREG32(0x6578, 0x501);
597         WREG32(0x657C, 0x85408640);
598         WREG32(0x6578, 0x502);
599         WREG32(0x657C, 0xBFF88180);
600         WREG32(0x6578, 0x600);
601         WREG32(0x657C, 0x82A08018);
602         WREG32(0x6578, 0x601);
603         WREG32(0x657C, 0x85808620);
604         WREG32(0x6578, 0x602);
605         WREG32(0x657C, 0xBFF081B8);
606         WREG32(0x6578, 0x700);
607         WREG32(0x657C, 0x82608010);
608         WREG32(0x6578, 0x701);
609         WREG32(0x657C, 0x85A08600);
610         WREG32(0x6578, 0x702);
611         WREG32(0x657C, 0x800081F0);
612         WREG32(0x6578, 0x800);
613         WREG32(0x657C, 0x8228BFF8);
614         WREG32(0x6578, 0x801);
615         WREG32(0x657C, 0x85E085E0);
616         WREG32(0x6578, 0x802);
617         WREG32(0x657C, 0xBFF88228);
618         WREG32(0x6578, 0x10000);
619         WREG32(0x657C, 0x82A8BF00);
620         WREG32(0x6578, 0x10001);
621         WREG32(0x657C, 0x82A08CC0);
622         WREG32(0x6578, 0x10002);
623         WREG32(0x657C, 0x8008BEF8);
624         WREG32(0x6578, 0x10100);
625         WREG32(0x657C, 0x81F0BF28);
626         WREG32(0x6578, 0x10101);
627         WREG32(0x657C, 0x83608CA0);
628         WREG32(0x6578, 0x10102);
629         WREG32(0x657C, 0x8018BED0);
630         WREG32(0x6578, 0x10200);
631         WREG32(0x657C, 0x8148BF38);
632         WREG32(0x6578, 0x10201);
633         WREG32(0x657C, 0x84408C80);
634         WREG32(0x6578, 0x10202);
635         WREG32(0x657C, 0x8008BEB8);
636         WREG32(0x6578, 0x10300);
637         WREG32(0x657C, 0x80B0BF78);
638         WREG32(0x6578, 0x10301);
639         WREG32(0x657C, 0x85008C20);
640         WREG32(0x6578, 0x10302);
641         WREG32(0x657C, 0x8020BEA0);
642         WREG32(0x6578, 0x10400);
643         WREG32(0x657C, 0x8028BF90);
644         WREG32(0x6578, 0x10401);
645         WREG32(0x657C, 0x85E08BC0);
646         WREG32(0x6578, 0x10402);
647         WREG32(0x657C, 0x8018BE90);
648         WREG32(0x6578, 0x10500);
649         WREG32(0x657C, 0xBFB8BFB0);
650         WREG32(0x6578, 0x10501);
651         WREG32(0x657C, 0x86C08B40);
652         WREG32(0x6578, 0x10502);
653         WREG32(0x657C, 0x8010BE90);
654         WREG32(0x6578, 0x10600);
655         WREG32(0x657C, 0xBF58BFC8);
656         WREG32(0x6578, 0x10601);
657         WREG32(0x657C, 0x87A08AA0);
658         WREG32(0x6578, 0x10602);
659         WREG32(0x657C, 0x8010BE98);
660         WREG32(0x6578, 0x10700);
661         WREG32(0x657C, 0xBF10BFF0);
662         WREG32(0x6578, 0x10701);
663         WREG32(0x657C, 0x886089E0);
664         WREG32(0x6578, 0x10702);
665         WREG32(0x657C, 0x8018BEB0);
666         WREG32(0x6578, 0x10800);
667         WREG32(0x657C, 0xBED8BFE8);
668         WREG32(0x6578, 0x10801);
669         WREG32(0x657C, 0x89408940);
670         WREG32(0x6578, 0x10802);
671         WREG32(0x657C, 0xBFE8BED8);
672         WREG32(0x6578, 0x20000);
673         WREG32(0x657C, 0x80008000);
674         WREG32(0x6578, 0x20001);
675         WREG32(0x657C, 0x90008000);
676         WREG32(0x6578, 0x20002);
677         WREG32(0x657C, 0x80008000);
678         WREG32(0x6578, 0x20003);
679         WREG32(0x657C, 0x80008000);
680         WREG32(0x6578, 0x20100);
681         WREG32(0x657C, 0x80108000);
682         WREG32(0x6578, 0x20101);
683         WREG32(0x657C, 0x8FE0BF70);
684         WREG32(0x6578, 0x20102);
685         WREG32(0x657C, 0xBFE880C0);
686         WREG32(0x6578, 0x20103);
687         WREG32(0x657C, 0x80008000);
688         WREG32(0x6578, 0x20200);
689         WREG32(0x657C, 0x8018BFF8);
690         WREG32(0x6578, 0x20201);
691         WREG32(0x657C, 0x8F80BF08);
692         WREG32(0x6578, 0x20202);
693         WREG32(0x657C, 0xBFD081A0);
694         WREG32(0x6578, 0x20203);
695         WREG32(0x657C, 0xBFF88000);
696         WREG32(0x6578, 0x20300);
697         WREG32(0x657C, 0x80188000);
698         WREG32(0x6578, 0x20301);
699         WREG32(0x657C, 0x8EE0BEC0);
700         WREG32(0x6578, 0x20302);
701         WREG32(0x657C, 0xBFB082A0);
702         WREG32(0x6578, 0x20303);
703         WREG32(0x657C, 0x80008000);
704         WREG32(0x6578, 0x20400);
705         WREG32(0x657C, 0x80188000);
706         WREG32(0x6578, 0x20401);
707         WREG32(0x657C, 0x8E00BEA0);
708         WREG32(0x6578, 0x20402);
709         WREG32(0x657C, 0xBF8883C0);
710         WREG32(0x6578, 0x20403);
711         WREG32(0x657C, 0x80008000);
712         WREG32(0x6578, 0x20500);
713         WREG32(0x657C, 0x80188000);
714         WREG32(0x6578, 0x20501);
715         WREG32(0x657C, 0x8D00BE90);
716         WREG32(0x6578, 0x20502);
717         WREG32(0x657C, 0xBF588500);
718         WREG32(0x6578, 0x20503);
719         WREG32(0x657C, 0x80008008);
720         WREG32(0x6578, 0x20600);
721         WREG32(0x657C, 0x80188000);
722         WREG32(0x6578, 0x20601);
723         WREG32(0x657C, 0x8BC0BE98);
724         WREG32(0x6578, 0x20602);
725         WREG32(0x657C, 0xBF308660);
726         WREG32(0x6578, 0x20603);
727         WREG32(0x657C, 0x80008008);
728         WREG32(0x6578, 0x20700);
729         WREG32(0x657C, 0x80108000);
730         WREG32(0x6578, 0x20701);
731         WREG32(0x657C, 0x8A80BEB0);
732         WREG32(0x6578, 0x20702);
733         WREG32(0x657C, 0xBF0087C0);
734         WREG32(0x6578, 0x20703);
735         WREG32(0x657C, 0x80008008);
736         WREG32(0x6578, 0x20800);
737         WREG32(0x657C, 0x80108000);
738         WREG32(0x6578, 0x20801);
739         WREG32(0x657C, 0x8920BED0);
740         WREG32(0x6578, 0x20802);
741         WREG32(0x657C, 0xBED08920);
742         WREG32(0x6578, 0x20803);
743         WREG32(0x657C, 0x80008010);
744         WREG32(0x6578, 0x30000);
745         WREG32(0x657C, 0x90008000);
746         WREG32(0x6578, 0x30001);
747         WREG32(0x657C, 0x80008000);
748         WREG32(0x6578, 0x30100);
749         WREG32(0x657C, 0x8FE0BF90);
750         WREG32(0x6578, 0x30101);
751         WREG32(0x657C, 0xBFF880A0);
752         WREG32(0x6578, 0x30200);
753         WREG32(0x657C, 0x8F60BF40);
754         WREG32(0x6578, 0x30201);
755         WREG32(0x657C, 0xBFE88180);
756         WREG32(0x6578, 0x30300);
757         WREG32(0x657C, 0x8EC0BF00);
758         WREG32(0x6578, 0x30301);
759         WREG32(0x657C, 0xBFC88280);
760         WREG32(0x6578, 0x30400);
761         WREG32(0x657C, 0x8DE0BEE0);
762         WREG32(0x6578, 0x30401);
763         WREG32(0x657C, 0xBFA083A0);
764         WREG32(0x6578, 0x30500);
765         WREG32(0x657C, 0x8CE0BED0);
766         WREG32(0x6578, 0x30501);
767         WREG32(0x657C, 0xBF7884E0);
768         WREG32(0x6578, 0x30600);
769         WREG32(0x657C, 0x8BA0BED8);
770         WREG32(0x6578, 0x30601);
771         WREG32(0x657C, 0xBF508640);
772         WREG32(0x6578, 0x30700);
773         WREG32(0x657C, 0x8A60BEE8);
774         WREG32(0x6578, 0x30701);
775         WREG32(0x657C, 0xBF2087A0);
776         WREG32(0x6578, 0x30800);
777         WREG32(0x657C, 0x8900BF00);
778         WREG32(0x6578, 0x30801);
779         WREG32(0x657C, 0xBF008900);
780 }
781
782 struct rv515_watermark {
783         u32        lb_request_fifo_depth;
784         fixed20_12 num_line_pair;
785         fixed20_12 estimated_width;
786         fixed20_12 worst_case_latency;
787         fixed20_12 consumption_rate;
788         fixed20_12 active_time;
789         fixed20_12 dbpp;
790         fixed20_12 priority_mark_max;
791         fixed20_12 priority_mark;
792         fixed20_12 sclk;
793 };
794
795 void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
796                                   struct radeon_crtc *crtc,
797                                   struct rv515_watermark *wm)
798 {
799         struct drm_display_mode *mode = &crtc->base.mode;
800         fixed20_12 a, b, c;
801         fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
802         fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
803
804         if (!crtc->base.enabled) {
805                 /* FIXME: wouldn't it better to set priority mark to maximum */
806                 wm->lb_request_fifo_depth = 4;
807                 return;
808         }
809
810         if (crtc->vsc.full > rfixed_const(2))
811                 wm->num_line_pair.full = rfixed_const(2);
812         else
813                 wm->num_line_pair.full = rfixed_const(1);
814
815         b.full = rfixed_const(mode->crtc_hdisplay);
816         c.full = rfixed_const(256);
817         a.full = rfixed_mul(wm->num_line_pair, b);
818         request_fifo_depth.full = rfixed_div(a, c);
819         if (a.full < rfixed_const(4)) {
820                 wm->lb_request_fifo_depth = 4;
821         } else {
822                 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
823         }
824
825         /* Determine consumption rate
826          *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
827          *  vtaps = number of vertical taps,
828          *  vsc = vertical scaling ratio, defined as source/destination
829          *  hsc = horizontal scaling ration, defined as source/destination
830          */
831         a.full = rfixed_const(mode->clock);
832         b.full = rfixed_const(1000);
833         a.full = rfixed_div(a, b);
834         pclk.full = rfixed_div(b, a);
835         if (crtc->rmx_type != RMX_OFF) {
836                 b.full = rfixed_const(2);
837                 if (crtc->vsc.full > b.full)
838                         b.full = crtc->vsc.full;
839                 b.full = rfixed_mul(b, crtc->hsc);
840                 c.full = rfixed_const(2);
841                 b.full = rfixed_div(b, c);
842                 consumption_time.full = rfixed_div(pclk, b);
843         } else {
844                 consumption_time.full = pclk.full;
845         }
846         a.full = rfixed_const(1);
847         wm->consumption_rate.full = rfixed_div(a, consumption_time);
848
849
850         /* Determine line time
851          *  LineTime = total time for one line of displayhtotal
852          *  LineTime = total number of horizontal pixels
853          *  pclk = pixel clock period(ns)
854          */
855         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
856         line_time.full = rfixed_mul(a, pclk);
857
858         /* Determine active time
859          *  ActiveTime = time of active region of display within one line,
860          *  hactive = total number of horizontal active pixels
861          *  htotal = total number of horizontal pixels
862          */
863         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
864         b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
865         wm->active_time.full = rfixed_mul(line_time, b);
866         wm->active_time.full = rfixed_div(wm->active_time, a);
867
868         /* Determine chunk time
869          * ChunkTime = the time it takes the DCP to send one chunk of data
870          * to the LB which consists of pipeline delay and inter chunk gap
871          * sclk = system clock(Mhz)
872          */
873         a.full = rfixed_const(600 * 1000);
874         chunk_time.full = rfixed_div(a, rdev->pm.sclk);
875         read_delay_latency.full = rfixed_const(1000);
876
877         /* Determine the worst case latency
878          * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
879          * WorstCaseLatency = worst case time from urgent to when the MC starts
880          *                    to return data
881          * READ_DELAY_IDLE_MAX = constant of 1us
882          * ChunkTime = time it takes the DCP to send one chunk of data to the LB
883          *             which consists of pipeline delay and inter chunk gap
884          */
885         if (rfixed_trunc(wm->num_line_pair) > 1) {
886                 a.full = rfixed_const(3);
887                 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
888                 wm->worst_case_latency.full += read_delay_latency.full;
889         } else {
890                 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
891         }
892
893         /* Determine the tolerable latency
894          * TolerableLatency = Any given request has only 1 line time
895          *                    for the data to be returned
896          * LBRequestFifoDepth = Number of chunk requests the LB can
897          *                      put into the request FIFO for a display
898          *  LineTime = total time for one line of display
899          *  ChunkTime = the time it takes the DCP to send one chunk
900          *              of data to the LB which consists of
901          *  pipeline delay and inter chunk gap
902          */
903         if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
904                 tolerable_latency.full = line_time.full;
905         } else {
906                 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
907                 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
908                 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
909                 tolerable_latency.full = line_time.full - tolerable_latency.full;
910         }
911         /* We assume worst case 32bits (4 bytes) */
912         wm->dbpp.full = rfixed_const(2 * 16);
913
914         /* Determine the maximum priority mark
915          *  width = viewport width in pixels
916          */
917         a.full = rfixed_const(16);
918         wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
919         wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
920
921         /* Determine estimated width */
922         estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
923         estimated_width.full = rfixed_div(estimated_width, consumption_time);
924         if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
925                 wm->priority_mark.full = rfixed_const(10);
926         } else {
927                 a.full = rfixed_const(16);
928                 wm->priority_mark.full = rfixed_div(estimated_width, a);
929                 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
930         }
931 }
932
933 void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
934 {
935         struct drm_display_mode *mode0 = NULL;
936         struct drm_display_mode *mode1 = NULL;
937         struct rv515_watermark wm0;
938         struct rv515_watermark wm1;
939         u32 tmp;
940         fixed20_12 priority_mark02, priority_mark12, fill_rate;
941         fixed20_12 a, b;
942
943         if (rdev->mode_info.crtcs[0]->base.enabled)
944                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
945         if (rdev->mode_info.crtcs[1]->base.enabled)
946                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
947         rs690_line_buffer_adjust(rdev, mode0, mode1);
948
949         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
950         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
951
952         tmp = wm0.lb_request_fifo_depth;
953         tmp |= wm1.lb_request_fifo_depth << 16;
954         WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
955
956         if (mode0 && mode1) {
957                 if (rfixed_trunc(wm0.dbpp) > 64)
958                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
959                 else
960                         a.full = wm0.num_line_pair.full;
961                 if (rfixed_trunc(wm1.dbpp) > 64)
962                         b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
963                 else
964                         b.full = wm1.num_line_pair.full;
965                 a.full += b.full;
966                 fill_rate.full = rfixed_div(wm0.sclk, a);
967                 if (wm0.consumption_rate.full > fill_rate.full) {
968                         b.full = wm0.consumption_rate.full - fill_rate.full;
969                         b.full = rfixed_mul(b, wm0.active_time);
970                         a.full = rfixed_const(16);
971                         b.full = rfixed_div(b, a);
972                         a.full = rfixed_mul(wm0.worst_case_latency,
973                                                 wm0.consumption_rate);
974                         priority_mark02.full = a.full + b.full;
975                 } else {
976                         a.full = rfixed_mul(wm0.worst_case_latency,
977                                                 wm0.consumption_rate);
978                         b.full = rfixed_const(16 * 1000);
979                         priority_mark02.full = rfixed_div(a, b);
980                 }
981                 if (wm1.consumption_rate.full > fill_rate.full) {
982                         b.full = wm1.consumption_rate.full - fill_rate.full;
983                         b.full = rfixed_mul(b, wm1.active_time);
984                         a.full = rfixed_const(16);
985                         b.full = rfixed_div(b, a);
986                         a.full = rfixed_mul(wm1.worst_case_latency,
987                                                 wm1.consumption_rate);
988                         priority_mark12.full = a.full + b.full;
989                 } else {
990                         a.full = rfixed_mul(wm1.worst_case_latency,
991                                                 wm1.consumption_rate);
992                         b.full = rfixed_const(16 * 1000);
993                         priority_mark12.full = rfixed_div(a, b);
994                 }
995                 if (wm0.priority_mark.full > priority_mark02.full)
996                         priority_mark02.full = wm0.priority_mark.full;
997                 if (rfixed_trunc(priority_mark02) < 0)
998                         priority_mark02.full = 0;
999                 if (wm0.priority_mark_max.full > priority_mark02.full)
1000                         priority_mark02.full = wm0.priority_mark_max.full;
1001                 if (wm1.priority_mark.full > priority_mark12.full)
1002                         priority_mark12.full = wm1.priority_mark.full;
1003                 if (rfixed_trunc(priority_mark12) < 0)
1004                         priority_mark12.full = 0;
1005                 if (wm1.priority_mark_max.full > priority_mark12.full)
1006                         priority_mark12.full = wm1.priority_mark_max.full;
1007                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1008                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1009                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1010                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1011         } else if (mode0) {
1012                 if (rfixed_trunc(wm0.dbpp) > 64)
1013                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
1014                 else
1015                         a.full = wm0.num_line_pair.full;
1016                 fill_rate.full = rfixed_div(wm0.sclk, a);
1017                 if (wm0.consumption_rate.full > fill_rate.full) {
1018                         b.full = wm0.consumption_rate.full - fill_rate.full;
1019                         b.full = rfixed_mul(b, wm0.active_time);
1020                         a.full = rfixed_const(16);
1021                         b.full = rfixed_div(b, a);
1022                         a.full = rfixed_mul(wm0.worst_case_latency,
1023                                                 wm0.consumption_rate);
1024                         priority_mark02.full = a.full + b.full;
1025                 } else {
1026                         a.full = rfixed_mul(wm0.worst_case_latency,
1027                                                 wm0.consumption_rate);
1028                         b.full = rfixed_const(16);
1029                         priority_mark02.full = rfixed_div(a, b);
1030                 }
1031                 if (wm0.priority_mark.full > priority_mark02.full)
1032                         priority_mark02.full = wm0.priority_mark.full;
1033                 if (rfixed_trunc(priority_mark02) < 0)
1034                         priority_mark02.full = 0;
1035                 if (wm0.priority_mark_max.full > priority_mark02.full)
1036                         priority_mark02.full = wm0.priority_mark_max.full;
1037                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1038                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1039                 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1040                 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1041         } else {
1042                 if (rfixed_trunc(wm1.dbpp) > 64)
1043                         a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1044                 else
1045                         a.full = wm1.num_line_pair.full;
1046                 fill_rate.full = rfixed_div(wm1.sclk, a);
1047                 if (wm1.consumption_rate.full > fill_rate.full) {
1048                         b.full = wm1.consumption_rate.full - fill_rate.full;
1049                         b.full = rfixed_mul(b, wm1.active_time);
1050                         a.full = rfixed_const(16);
1051                         b.full = rfixed_div(b, a);
1052                         a.full = rfixed_mul(wm1.worst_case_latency,
1053                                                 wm1.consumption_rate);
1054                         priority_mark12.full = a.full + b.full;
1055                 } else {
1056                         a.full = rfixed_mul(wm1.worst_case_latency,
1057                                                 wm1.consumption_rate);
1058                         b.full = rfixed_const(16 * 1000);
1059                         priority_mark12.full = rfixed_div(a, b);
1060                 }
1061                 if (wm1.priority_mark.full > priority_mark12.full)
1062                         priority_mark12.full = wm1.priority_mark.full;
1063                 if (rfixed_trunc(priority_mark12) < 0)
1064                         priority_mark12.full = 0;
1065                 if (wm1.priority_mark_max.full > priority_mark12.full)
1066                         priority_mark12.full = wm1.priority_mark_max.full;
1067                 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1068                 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1069                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1070                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1071         }
1072 }
1073
1074 void rv515_bandwidth_update(struct radeon_device *rdev)
1075 {
1076         uint32_t tmp;
1077         struct drm_display_mode *mode0 = NULL;
1078         struct drm_display_mode *mode1 = NULL;
1079
1080         if (rdev->mode_info.crtcs[0]->base.enabled)
1081                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1082         if (rdev->mode_info.crtcs[1]->base.enabled)
1083                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1084         /*
1085          * Set display0/1 priority up in the memory controller for
1086          * modes if the user specifies HIGH for displaypriority
1087          * option.
1088          */
1089         if (rdev->disp_priority == 2) {
1090                 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1091                 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1092                 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1093                 if (mode1)
1094                         tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1095                 if (mode0)
1096                         tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1097                 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1098         }
1099         rv515_bandwidth_avivo_update(rdev);
1100 }