2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
42 #include <linux/firmware.h>
43 #include <linux/platform_device.h>
45 #include "r100_reg_safe.h"
46 #include "rn50_reg_safe.h"
49 #define FIRMWARE_R100 "radeon/R100_cp.bin"
50 #define FIRMWARE_R200 "radeon/R200_cp.bin"
51 #define FIRMWARE_R300 "radeon/R300_cp.bin"
52 #define FIRMWARE_R420 "radeon/R420_cp.bin"
53 #define FIRMWARE_RS690 "radeon/RS690_cp.bin"
54 #define FIRMWARE_RS600 "radeon/RS600_cp.bin"
55 #define FIRMWARE_R520 "radeon/R520_cp.bin"
57 MODULE_FIRMWARE(FIRMWARE_R100);
58 MODULE_FIRMWARE(FIRMWARE_R200);
59 MODULE_FIRMWARE(FIRMWARE_R300);
60 MODULE_FIRMWARE(FIRMWARE_R420);
61 MODULE_FIRMWARE(FIRMWARE_RS690);
62 MODULE_FIRMWARE(FIRMWARE_RS600);
63 MODULE_FIRMWARE(FIRMWARE_R520);
65 #include "r100_track.h"
67 /* This files gather functions specifics to:
68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
71 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
74 rdev->pm.dynpm_can_upclock = true;
75 rdev->pm.dynpm_can_downclock = true;
77 switch (rdev->pm.dynpm_planned_action) {
78 case DYNPM_ACTION_MINIMUM:
79 rdev->pm.requested_power_state_index = 0;
80 rdev->pm.dynpm_can_downclock = false;
82 case DYNPM_ACTION_DOWNCLOCK:
83 if (rdev->pm.current_power_state_index == 0) {
84 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 rdev->pm.dynpm_can_downclock = false;
87 if (rdev->pm.active_crtc_count > 1) {
88 for (i = 0; i < rdev->pm.num_power_states; i++) {
89 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
91 else if (i >= rdev->pm.current_power_state_index) {
92 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
95 rdev->pm.requested_power_state_index = i;
100 rdev->pm.requested_power_state_index =
101 rdev->pm.current_power_state_index - 1;
103 /* don't use the power state if crtcs are active and no display flag is set */
104 if ((rdev->pm.active_crtc_count > 0) &&
105 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 RADEON_PM_MODE_NO_DISPLAY)) {
107 rdev->pm.requested_power_state_index++;
110 case DYNPM_ACTION_UPCLOCK:
111 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 rdev->pm.dynpm_can_upclock = false;
115 if (rdev->pm.active_crtc_count > 1) {
116 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
119 else if (i <= rdev->pm.current_power_state_index) {
120 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
123 rdev->pm.requested_power_state_index = i;
128 rdev->pm.requested_power_state_index =
129 rdev->pm.current_power_state_index + 1;
132 case DYNPM_ACTION_DEFAULT:
133 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 rdev->pm.dynpm_can_upclock = false;
136 case DYNPM_ACTION_NONE:
138 DRM_ERROR("Requested mode for not defined action\n");
141 /* only one clock mode per power state */
142 rdev->pm.requested_clock_mode_index = 0;
144 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
145 rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 rdev->pm.power_state[rdev->pm.requested_power_state_index].
153 void r100_pm_init_profile(struct radeon_device *rdev)
156 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
161 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
166 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
167 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
168 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
169 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
171 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
172 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
174 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
176 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
177 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
181 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
182 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
183 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
186 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
187 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
188 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
192 void r100_pm_misc(struct radeon_device *rdev)
194 int requested_index = rdev->pm.requested_power_state_index;
195 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
196 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
197 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
199 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
200 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
201 tmp = RREG32(voltage->gpio.reg);
202 if (voltage->active_high)
203 tmp |= voltage->gpio.mask;
205 tmp &= ~(voltage->gpio.mask);
206 WREG32(voltage->gpio.reg, tmp);
208 udelay(voltage->delay);
210 tmp = RREG32(voltage->gpio.reg);
211 if (voltage->active_high)
212 tmp &= ~voltage->gpio.mask;
214 tmp |= voltage->gpio.mask;
215 WREG32(voltage->gpio.reg, tmp);
217 udelay(voltage->delay);
221 sclk_cntl = RREG32_PLL(SCLK_CNTL);
222 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
223 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
224 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
225 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
226 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
227 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
228 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
229 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
231 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
232 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
233 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
234 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
235 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
237 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
239 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
240 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
241 if (voltage->delay) {
242 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
243 switch (voltage->delay) {
245 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
248 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
251 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
254 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
258 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
260 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
262 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
263 sclk_cntl &= ~FORCE_HDP;
265 sclk_cntl |= FORCE_HDP;
267 WREG32_PLL(SCLK_CNTL, sclk_cntl);
268 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
269 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
272 if ((rdev->flags & RADEON_IS_PCIE) &&
273 !(rdev->flags & RADEON_IS_IGP) &&
274 rdev->asic->set_pcie_lanes &&
276 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
277 radeon_set_pcie_lanes(rdev,
279 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
283 void r100_pm_prepare(struct radeon_device *rdev)
285 struct drm_device *ddev = rdev->ddev;
286 struct drm_crtc *crtc;
287 struct radeon_crtc *radeon_crtc;
290 /* disable any active CRTCs */
291 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
292 radeon_crtc = to_radeon_crtc(crtc);
293 if (radeon_crtc->enabled) {
294 if (radeon_crtc->crtc_id) {
295 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
296 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
297 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
299 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
300 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
301 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
307 void r100_pm_finish(struct radeon_device *rdev)
309 struct drm_device *ddev = rdev->ddev;
310 struct drm_crtc *crtc;
311 struct radeon_crtc *radeon_crtc;
314 /* enable any active CRTCs */
315 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 radeon_crtc = to_radeon_crtc(crtc);
317 if (radeon_crtc->enabled) {
318 if (radeon_crtc->crtc_id) {
319 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
320 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
321 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
323 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
324 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
325 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
331 bool r100_gui_idle(struct radeon_device *rdev)
333 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
339 /* hpd for digital panel detect/disconnect */
340 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
342 bool connected = false;
346 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
350 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
359 void r100_hpd_set_polarity(struct radeon_device *rdev,
360 enum radeon_hpd_id hpd)
363 bool connected = r100_hpd_sense(rdev, hpd);
367 tmp = RREG32(RADEON_FP_GEN_CNTL);
369 tmp &= ~RADEON_FP_DETECT_INT_POL;
371 tmp |= RADEON_FP_DETECT_INT_POL;
372 WREG32(RADEON_FP_GEN_CNTL, tmp);
375 tmp = RREG32(RADEON_FP2_GEN_CNTL);
377 tmp &= ~RADEON_FP2_DETECT_INT_POL;
379 tmp |= RADEON_FP2_DETECT_INT_POL;
380 WREG32(RADEON_FP2_GEN_CNTL, tmp);
387 void r100_hpd_init(struct radeon_device *rdev)
389 struct drm_device *dev = rdev->ddev;
390 struct drm_connector *connector;
392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
394 switch (radeon_connector->hpd.hpd) {
396 rdev->irq.hpd[0] = true;
399 rdev->irq.hpd[1] = true;
405 if (rdev->irq.installed)
409 void r100_hpd_fini(struct radeon_device *rdev)
411 struct drm_device *dev = rdev->ddev;
412 struct drm_connector *connector;
414 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
415 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
416 switch (radeon_connector->hpd.hpd) {
418 rdev->irq.hpd[0] = false;
421 rdev->irq.hpd[1] = false;
432 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
434 /* TODO: can we do somethings here ? */
435 /* It seems hw only cache one entry so we should discard this
436 * entry otherwise if first GPU GART read hit this entry it
437 * could end up in wrong address. */
440 int r100_pci_gart_init(struct radeon_device *rdev)
444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n");
448 /* Initialize common gart structure */
449 r = radeon_gart_init(rdev);
452 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
453 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
454 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
455 return radeon_gart_table_ram_alloc(rdev);
458 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
459 void r100_enable_bm(struct radeon_device *rdev)
462 /* Enable bus mastering */
463 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
464 WREG32(RADEON_BUS_CNTL, tmp);
467 int r100_pci_gart_enable(struct radeon_device *rdev)
471 radeon_gart_restore(rdev);
472 /* discard memory request outside of configured range */
473 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
474 WREG32(RADEON_AIC_CNTL, tmp);
475 /* set address range for PCI address translate */
476 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
477 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
478 /* set PCI GART page-table base address */
479 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
480 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
481 WREG32(RADEON_AIC_CNTL, tmp);
482 r100_pci_gart_tlb_flush(rdev);
483 rdev->gart.ready = true;
487 void r100_pci_gart_disable(struct radeon_device *rdev)
491 /* discard memory request outside of configured range */
492 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
493 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
494 WREG32(RADEON_AIC_LO_ADDR, 0);
495 WREG32(RADEON_AIC_HI_ADDR, 0);
498 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
500 if (i < 0 || i > rdev->gart.num_gpu_pages) {
503 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
507 void r100_pci_gart_fini(struct radeon_device *rdev)
509 radeon_gart_fini(rdev);
510 r100_pci_gart_disable(rdev);
511 radeon_gart_table_ram_free(rdev);
514 int r100_irq_set(struct radeon_device *rdev)
518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0);
523 if (rdev->irq.sw_int) {
524 tmp |= RADEON_SW_INT_ENABLE;
526 if (rdev->irq.gui_idle) {
527 tmp |= RADEON_GUI_IDLE_MASK;
529 if (rdev->irq.crtc_vblank_int[0]) {
530 tmp |= RADEON_CRTC_VBLANK_MASK;
532 if (rdev->irq.crtc_vblank_int[1]) {
533 tmp |= RADEON_CRTC2_VBLANK_MASK;
535 if (rdev->irq.hpd[0]) {
536 tmp |= RADEON_FP_DETECT_MASK;
538 if (rdev->irq.hpd[1]) {
539 tmp |= RADEON_FP2_DETECT_MASK;
541 WREG32(RADEON_GEN_INT_CNTL, tmp);
545 void r100_irq_disable(struct radeon_device *rdev)
549 WREG32(R_000040_GEN_INT_CNTL, 0);
550 /* Wait and acknowledge irq */
552 tmp = RREG32(R_000044_GEN_INT_STATUS);
553 WREG32(R_000044_GEN_INT_STATUS, tmp);
556 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
558 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
559 uint32_t irq_mask = RADEON_SW_INT_TEST |
560 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
561 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
563 /* the interrupt works, but the status bit is permanently asserted */
564 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
565 if (!rdev->irq.gui_idle_acked)
566 irq_mask |= RADEON_GUI_IDLE_STAT;
570 WREG32(RADEON_GEN_INT_STATUS, irqs);
572 return irqs & irq_mask;
575 int r100_irq_process(struct radeon_device *rdev)
577 uint32_t status, msi_rearm;
578 bool queue_hotplug = false;
580 /* reset gui idle ack. the status bit is broken */
581 rdev->irq.gui_idle_acked = false;
583 status = r100_irq_ack(rdev);
587 if (rdev->shutdown) {
592 if (status & RADEON_SW_INT_TEST) {
593 radeon_fence_process(rdev);
595 /* gui idle interrupt */
596 if (status & RADEON_GUI_IDLE_STAT) {
597 rdev->irq.gui_idle_acked = true;
598 rdev->pm.gui_idle = true;
599 wake_up(&rdev->irq.idle_queue);
601 /* Vertical blank interrupts */
602 if (status & RADEON_CRTC_VBLANK_STAT) {
603 drm_handle_vblank(rdev->ddev, 0);
604 rdev->pm.vblank_sync = true;
605 wake_up(&rdev->irq.vblank_queue);
607 if (status & RADEON_CRTC2_VBLANK_STAT) {
608 drm_handle_vblank(rdev->ddev, 1);
609 rdev->pm.vblank_sync = true;
610 wake_up(&rdev->irq.vblank_queue);
612 if (status & RADEON_FP_DETECT_STAT) {
613 queue_hotplug = true;
616 if (status & RADEON_FP2_DETECT_STAT) {
617 queue_hotplug = true;
620 status = r100_irq_ack(rdev);
622 /* reset gui idle ack. the status bit is broken */
623 rdev->irq.gui_idle_acked = false;
625 queue_work(rdev->wq, &rdev->hotplug_work);
626 if (rdev->msi_enabled) {
627 switch (rdev->family) {
630 msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
631 WREG32(RADEON_AIC_CNTL, msi_rearm);
632 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
635 msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
636 WREG32(RADEON_MSI_REARM_EN, msi_rearm);
637 WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
644 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
647 return RREG32(RADEON_CRTC_CRNT_FRAME);
649 return RREG32(RADEON_CRTC2_CRNT_FRAME);
652 /* Who ever call radeon_fence_emit should call ring_lock and ask
653 * for enough space (today caller are ib schedule and buffer move) */
654 void r100_fence_ring_emit(struct radeon_device *rdev,
655 struct radeon_fence *fence)
657 /* We have to make sure that caches are flushed before
658 * CPU might read something from VRAM. */
659 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
660 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
661 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
662 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
663 /* Wait until IDLE & CLEAN */
664 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
665 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
666 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
667 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
668 RADEON_HDP_READ_BUFFER_INVALIDATE);
669 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
670 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
671 /* Emit fence sequence & fire IRQ */
672 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
673 radeon_ring_write(rdev, fence->seq);
674 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
675 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
678 int r100_wb_init(struct radeon_device *rdev)
682 if (rdev->wb.wb_obj == NULL) {
683 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
684 RADEON_GEM_DOMAIN_GTT,
687 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
690 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
691 if (unlikely(r != 0))
693 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
696 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
697 radeon_bo_unreserve(rdev->wb.wb_obj);
700 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
701 radeon_bo_unreserve(rdev->wb.wb_obj);
703 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
707 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
708 WREG32(R_00070C_CP_RB_RPTR_ADDR,
709 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
710 WREG32(R_000770_SCRATCH_UMSK, 0xff);
714 void r100_wb_disable(struct radeon_device *rdev)
716 WREG32(R_000770_SCRATCH_UMSK, 0);
719 void r100_wb_fini(struct radeon_device *rdev)
723 r100_wb_disable(rdev);
724 if (rdev->wb.wb_obj) {
725 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
726 if (unlikely(r != 0)) {
727 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
730 radeon_bo_kunmap(rdev->wb.wb_obj);
731 radeon_bo_unpin(rdev->wb.wb_obj);
732 radeon_bo_unreserve(rdev->wb.wb_obj);
733 radeon_bo_unref(&rdev->wb.wb_obj);
735 rdev->wb.wb_obj = NULL;
739 int r100_copy_blit(struct radeon_device *rdev,
743 struct radeon_fence *fence)
746 uint32_t stride_bytes = PAGE_SIZE;
748 uint32_t stride_pixels;
753 /* radeon limited to 16k stride */
754 stride_bytes &= 0x3fff;
755 /* radeon pitch is /64 */
756 pitch = stride_bytes / 64;
757 stride_pixels = stride_bytes / 4;
758 num_loops = DIV_ROUND_UP(num_pages, 8191);
760 /* Ask for enough room for blit + flush + fence */
761 ndw = 64 + (10 * num_loops);
762 r = radeon_ring_lock(rdev, ndw);
764 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
767 while (num_pages > 0) {
768 cur_pages = num_pages;
769 if (cur_pages > 8191) {
772 num_pages -= cur_pages;
774 /* pages are in Y direction - height
775 page width in X direction - width */
776 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
777 radeon_ring_write(rdev,
778 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
779 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
780 RADEON_GMC_SRC_CLIPPING |
781 RADEON_GMC_DST_CLIPPING |
782 RADEON_GMC_BRUSH_NONE |
783 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
784 RADEON_GMC_SRC_DATATYPE_COLOR |
786 RADEON_DP_SRC_SOURCE_MEMORY |
787 RADEON_GMC_CLR_CMP_CNTL_DIS |
788 RADEON_GMC_WR_MSK_DIS);
789 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
790 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
791 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
792 radeon_ring_write(rdev, 0);
793 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
794 radeon_ring_write(rdev, num_pages);
795 radeon_ring_write(rdev, num_pages);
796 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
798 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
799 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
800 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
801 radeon_ring_write(rdev,
802 RADEON_WAIT_2D_IDLECLEAN |
803 RADEON_WAIT_HOST_IDLECLEAN |
804 RADEON_WAIT_DMA_GUI_IDLE);
806 r = radeon_fence_emit(rdev, fence);
808 radeon_ring_unlock_commit(rdev);
812 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
817 for (i = 0; i < rdev->usec_timeout; i++) {
818 tmp = RREG32(R_000E40_RBBM_STATUS);
819 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
827 void r100_ring_start(struct radeon_device *rdev)
831 r = radeon_ring_lock(rdev, 2);
835 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
836 radeon_ring_write(rdev,
837 RADEON_ISYNC_ANY2D_IDLE3D |
838 RADEON_ISYNC_ANY3D_IDLE2D |
839 RADEON_ISYNC_WAIT_IDLEGUI |
840 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
841 radeon_ring_unlock_commit(rdev);
845 /* Load the microcode for the CP */
846 static int r100_cp_init_microcode(struct radeon_device *rdev)
848 struct platform_device *pdev;
849 const char *fw_name = NULL;
854 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
857 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
860 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
861 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
862 (rdev->family == CHIP_RS200)) {
863 DRM_INFO("Loading R100 Microcode\n");
864 fw_name = FIRMWARE_R100;
865 } else if ((rdev->family == CHIP_R200) ||
866 (rdev->family == CHIP_RV250) ||
867 (rdev->family == CHIP_RV280) ||
868 (rdev->family == CHIP_RS300)) {
869 DRM_INFO("Loading R200 Microcode\n");
870 fw_name = FIRMWARE_R200;
871 } else if ((rdev->family == CHIP_R300) ||
872 (rdev->family == CHIP_R350) ||
873 (rdev->family == CHIP_RV350) ||
874 (rdev->family == CHIP_RV380) ||
875 (rdev->family == CHIP_RS400) ||
876 (rdev->family == CHIP_RS480)) {
877 DRM_INFO("Loading R300 Microcode\n");
878 fw_name = FIRMWARE_R300;
879 } else if ((rdev->family == CHIP_R420) ||
880 (rdev->family == CHIP_R423) ||
881 (rdev->family == CHIP_RV410)) {
882 DRM_INFO("Loading R400 Microcode\n");
883 fw_name = FIRMWARE_R420;
884 } else if ((rdev->family == CHIP_RS690) ||
885 (rdev->family == CHIP_RS740)) {
886 DRM_INFO("Loading RS690/RS740 Microcode\n");
887 fw_name = FIRMWARE_RS690;
888 } else if (rdev->family == CHIP_RS600) {
889 DRM_INFO("Loading RS600 Microcode\n");
890 fw_name = FIRMWARE_RS600;
891 } else if ((rdev->family == CHIP_RV515) ||
892 (rdev->family == CHIP_R520) ||
893 (rdev->family == CHIP_RV530) ||
894 (rdev->family == CHIP_R580) ||
895 (rdev->family == CHIP_RV560) ||
896 (rdev->family == CHIP_RV570)) {
897 DRM_INFO("Loading R500 Microcode\n");
898 fw_name = FIRMWARE_R520;
901 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
902 platform_device_unregister(pdev);
904 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
906 } else if (rdev->me_fw->size % 8) {
908 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
909 rdev->me_fw->size, fw_name);
911 release_firmware(rdev->me_fw);
917 static void r100_cp_load_microcode(struct radeon_device *rdev)
919 const __be32 *fw_data;
922 if (r100_gui_wait_for_idle(rdev)) {
923 printk(KERN_WARNING "Failed to wait GUI idle while "
924 "programming pipes. Bad things might happen.\n");
928 size = rdev->me_fw->size / 4;
929 fw_data = (const __be32 *)&rdev->me_fw->data[0];
930 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
931 for (i = 0; i < size; i += 2) {
932 WREG32(RADEON_CP_ME_RAM_DATAH,
933 be32_to_cpup(&fw_data[i]));
934 WREG32(RADEON_CP_ME_RAM_DATAL,
935 be32_to_cpup(&fw_data[i + 1]));
940 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
945 unsigned pre_write_timer;
946 unsigned pre_write_limit;
947 unsigned indirect2_start;
948 unsigned indirect1_start;
952 if (r100_debugfs_cp_init(rdev)) {
953 DRM_ERROR("Failed to register debugfs file for CP !\n");
956 r = r100_cp_init_microcode(rdev);
958 DRM_ERROR("Failed to load firmware!\n");
963 /* Align ring size */
964 rb_bufsz = drm_order(ring_size / 8);
965 ring_size = (1 << (rb_bufsz + 1)) * 4;
966 r100_cp_load_microcode(rdev);
967 r = radeon_ring_init(rdev, ring_size);
971 /* Each time the cp read 1024 bytes (16 dword/quadword) update
972 * the rptr copy in system ram */
974 /* cp will read 128bytes at a time (4 dwords) */
976 rdev->cp.align_mask = 16 - 1;
977 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
978 pre_write_timer = 64;
979 /* Force CP_RB_WPTR write if written more than one time before the
983 /* Setup the cp cache like this (cache size is 96 dwords) :
987 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
988 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
989 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
990 * Idea being that most of the gpu cmd will be through indirect1 buffer
991 * so it gets the bigger cache.
993 indirect2_start = 80;
994 indirect1_start = 16;
996 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
997 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
998 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
999 REG_SET(RADEON_MAX_FETCH, max_fetch) |
1000 RADEON_RB_NO_UPDATE);
1002 tmp |= RADEON_BUF_SWAP_32BIT;
1004 WREG32(RADEON_CP_RB_CNTL, tmp);
1006 /* Set ring address */
1007 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
1008 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
1009 /* Force read & write ptr to 0 */
1010 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1011 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1012 WREG32(RADEON_CP_RB_WPTR, 0);
1013 WREG32(RADEON_CP_RB_CNTL, tmp);
1015 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1016 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1017 /* protect against crazy HW on resume */
1018 rdev->cp.wptr &= rdev->cp.ptr_mask;
1019 /* Set cp mode to bus mastering & enable cp*/
1020 WREG32(RADEON_CP_CSQ_MODE,
1021 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1022 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1024 WREG32(0x744, 0x00004D4D);
1025 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1026 radeon_ring_start(rdev);
1027 r = radeon_ring_test(rdev);
1029 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1032 rdev->cp.ready = true;
1036 void r100_cp_fini(struct radeon_device *rdev)
1038 if (r100_cp_wait_for_idle(rdev)) {
1039 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1042 r100_cp_disable(rdev);
1043 radeon_ring_fini(rdev);
1044 DRM_INFO("radeon: cp finalized\n");
1047 void r100_cp_disable(struct radeon_device *rdev)
1050 rdev->cp.ready = false;
1051 WREG32(RADEON_CP_CSQ_MODE, 0);
1052 WREG32(RADEON_CP_CSQ_CNTL, 0);
1053 if (r100_gui_wait_for_idle(rdev)) {
1054 printk(KERN_WARNING "Failed to wait GUI idle while "
1055 "programming pipes. Bad things might happen.\n");
1059 void r100_cp_commit(struct radeon_device *rdev)
1061 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
1062 (void)RREG32(RADEON_CP_RB_WPTR);
1069 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1070 struct radeon_cs_packet *pkt,
1071 const unsigned *auth, unsigned n,
1072 radeon_packet0_check_t check)
1081 /* Check that register fall into register range
1082 * determined by the number of entry (n) in the
1083 * safe register bitmap.
1085 if (pkt->one_reg_wr) {
1086 if ((reg >> 7) > n) {
1090 if (((reg + (pkt->count << 2)) >> 7) > n) {
1094 for (i = 0; i <= pkt->count; i++, idx++) {
1096 m = 1 << ((reg >> 2) & 31);
1098 r = check(p, pkt, idx, reg);
1103 if (pkt->one_reg_wr) {
1104 if (!(auth[j] & m)) {
1114 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1115 struct radeon_cs_packet *pkt)
1117 volatile uint32_t *ib;
1123 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1124 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1129 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1130 * @parser: parser structure holding parsing context.
1131 * @pkt: where to store packet informations
1133 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1134 * if packet is bigger than remaining ib size. or if packets is unknown.
1136 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1137 struct radeon_cs_packet *pkt,
1140 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1143 if (idx >= ib_chunk->length_dw) {
1144 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1145 idx, ib_chunk->length_dw);
1148 header = radeon_get_ib_value(p, idx);
1150 pkt->type = CP_PACKET_GET_TYPE(header);
1151 pkt->count = CP_PACKET_GET_COUNT(header);
1152 switch (pkt->type) {
1154 pkt->reg = CP_PACKET0_GET_REG(header);
1155 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1158 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1164 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1167 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1168 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1169 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1176 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1177 * @parser: parser structure holding parsing context.
1179 * Userspace sends a special sequence for VLINE waits.
1180 * PACKET0 - VLINE_START_END + value
1181 * PACKET0 - WAIT_UNTIL +_value
1182 * RELOC (P3) - crtc_id in reloc.
1184 * This function parses this and relocates the VLINE START END
1185 * and WAIT UNTIL packets to the correct crtc.
1186 * It also detects a switched off crtc and nulls out the
1187 * wait in that case.
1189 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1191 struct drm_mode_object *obj;
1192 struct drm_crtc *crtc;
1193 struct radeon_crtc *radeon_crtc;
1194 struct radeon_cs_packet p3reloc, waitreloc;
1197 uint32_t header, h_idx, reg;
1198 volatile uint32_t *ib;
1202 /* parse the wait until */
1203 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1207 /* check its a wait until and only 1 count */
1208 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1209 waitreloc.count != 0) {
1210 DRM_ERROR("vline wait had illegal wait until segment\n");
1215 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1216 DRM_ERROR("vline wait had illegal wait until\n");
1221 /* jump over the NOP */
1222 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1227 p->idx += waitreloc.count + 2;
1228 p->idx += p3reloc.count + 2;
1230 header = radeon_get_ib_value(p, h_idx);
1231 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1232 reg = CP_PACKET0_GET_REG(header);
1233 mutex_lock(&p->rdev->ddev->mode_config.mutex);
1234 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1236 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1240 crtc = obj_to_crtc(obj);
1241 radeon_crtc = to_radeon_crtc(crtc);
1242 crtc_id = radeon_crtc->crtc_id;
1244 if (!crtc->enabled) {
1245 /* if the CRTC isn't enabled - we need to nop out the wait until */
1246 ib[h_idx + 2] = PACKET2(0);
1247 ib[h_idx + 3] = PACKET2(0);
1248 } else if (crtc_id == 1) {
1250 case AVIVO_D1MODE_VLINE_START_END:
1251 header &= ~R300_CP_PACKET0_REG_MASK;
1252 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1254 case RADEON_CRTC_GUI_TRIG_VLINE:
1255 header &= ~R300_CP_PACKET0_REG_MASK;
1256 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1259 DRM_ERROR("unknown crtc reloc\n");
1264 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1267 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
1272 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1273 * @parser: parser structure holding parsing context.
1274 * @data: pointer to relocation data
1275 * @offset_start: starting offset
1276 * @offset_mask: offset mask (to align start offset on)
1277 * @reloc: reloc informations
1279 * Check next packet is relocation packet3, do bo validation and compute
1280 * GPU offset using the provided start.
1282 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1283 struct radeon_cs_reloc **cs_reloc)
1285 struct radeon_cs_chunk *relocs_chunk;
1286 struct radeon_cs_packet p3reloc;
1290 if (p->chunk_relocs_idx == -1) {
1291 DRM_ERROR("No relocation chunk !\n");
1295 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1296 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1300 p->idx += p3reloc.count + 2;
1301 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1302 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1304 r100_cs_dump_packet(p, &p3reloc);
1307 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1308 if (idx >= relocs_chunk->length_dw) {
1309 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1310 idx, relocs_chunk->length_dw);
1311 r100_cs_dump_packet(p, &p3reloc);
1314 /* FIXME: we assume reloc size is 4 dwords */
1315 *cs_reloc = p->relocs_ptr[(idx / 4)];
1319 static int r100_get_vtx_size(uint32_t vtx_fmt)
1323 /* ordered according to bits in spec */
1324 if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1326 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1328 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1330 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1332 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1334 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1336 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1338 if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1340 if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1342 if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1344 if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1346 if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1348 if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1350 if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1352 if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1355 if (vtx_fmt & (0x7 << 15))
1356 vtx_size += (vtx_fmt >> 15) & 0x7;
1357 if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1359 if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1361 if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1363 if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1365 if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1367 if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1372 static int r100_packet0_check(struct radeon_cs_parser *p,
1373 struct radeon_cs_packet *pkt,
1374 unsigned idx, unsigned reg)
1376 struct radeon_cs_reloc *reloc;
1377 struct r100_cs_track *track;
1378 volatile uint32_t *ib;
1386 track = (struct r100_cs_track *)p->track;
1388 idx_value = radeon_get_ib_value(p, idx);
1391 case RADEON_CRTC_GUI_TRIG_VLINE:
1392 r = r100_cs_packet_parse_vline(p);
1394 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1396 r100_cs_dump_packet(p, pkt);
1400 /* FIXME: only allow PACKET3 blit? easier to check for out of
1402 case RADEON_DST_PITCH_OFFSET:
1403 case RADEON_SRC_PITCH_OFFSET:
1404 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1408 case RADEON_RB3D_DEPTHOFFSET:
1409 r = r100_cs_packet_next_reloc(p, &reloc);
1411 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1413 r100_cs_dump_packet(p, pkt);
1416 track->zb.robj = reloc->robj;
1417 track->zb.offset = idx_value;
1418 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1420 case RADEON_RB3D_COLOROFFSET:
1421 r = r100_cs_packet_next_reloc(p, &reloc);
1423 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1425 r100_cs_dump_packet(p, pkt);
1428 track->cb[0].robj = reloc->robj;
1429 track->cb[0].offset = idx_value;
1430 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1432 case RADEON_PP_TXOFFSET_0:
1433 case RADEON_PP_TXOFFSET_1:
1434 case RADEON_PP_TXOFFSET_2:
1435 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1436 r = r100_cs_packet_next_reloc(p, &reloc);
1438 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1440 r100_cs_dump_packet(p, pkt);
1443 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1444 track->textures[i].robj = reloc->robj;
1446 case RADEON_PP_CUBIC_OFFSET_T0_0:
1447 case RADEON_PP_CUBIC_OFFSET_T0_1:
1448 case RADEON_PP_CUBIC_OFFSET_T0_2:
1449 case RADEON_PP_CUBIC_OFFSET_T0_3:
1450 case RADEON_PP_CUBIC_OFFSET_T0_4:
1451 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1452 r = r100_cs_packet_next_reloc(p, &reloc);
1454 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1456 r100_cs_dump_packet(p, pkt);
1459 track->textures[0].cube_info[i].offset = idx_value;
1460 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1461 track->textures[0].cube_info[i].robj = reloc->robj;
1463 case RADEON_PP_CUBIC_OFFSET_T1_0:
1464 case RADEON_PP_CUBIC_OFFSET_T1_1:
1465 case RADEON_PP_CUBIC_OFFSET_T1_2:
1466 case RADEON_PP_CUBIC_OFFSET_T1_3:
1467 case RADEON_PP_CUBIC_OFFSET_T1_4:
1468 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1469 r = r100_cs_packet_next_reloc(p, &reloc);
1471 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1473 r100_cs_dump_packet(p, pkt);
1476 track->textures[1].cube_info[i].offset = idx_value;
1477 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1478 track->textures[1].cube_info[i].robj = reloc->robj;
1480 case RADEON_PP_CUBIC_OFFSET_T2_0:
1481 case RADEON_PP_CUBIC_OFFSET_T2_1:
1482 case RADEON_PP_CUBIC_OFFSET_T2_2:
1483 case RADEON_PP_CUBIC_OFFSET_T2_3:
1484 case RADEON_PP_CUBIC_OFFSET_T2_4:
1485 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1486 r = r100_cs_packet_next_reloc(p, &reloc);
1488 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1490 r100_cs_dump_packet(p, pkt);
1493 track->textures[2].cube_info[i].offset = idx_value;
1494 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1495 track->textures[2].cube_info[i].robj = reloc->robj;
1497 case RADEON_RE_WIDTH_HEIGHT:
1498 track->maxy = ((idx_value >> 16) & 0x7FF);
1500 case RADEON_RB3D_COLORPITCH:
1501 r = r100_cs_packet_next_reloc(p, &reloc);
1503 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1505 r100_cs_dump_packet(p, pkt);
1509 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1510 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1511 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1512 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1514 tmp = idx_value & ~(0x7 << 16);
1518 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1520 case RADEON_RB3D_DEPTHPITCH:
1521 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1523 case RADEON_RB3D_CNTL:
1524 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1530 track->cb[0].cpp = 1;
1535 track->cb[0].cpp = 2;
1538 track->cb[0].cpp = 4;
1541 DRM_ERROR("Invalid color buffer format (%d) !\n",
1542 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1545 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1547 case RADEON_RB3D_ZSTENCILCNTL:
1548 switch (idx_value & 0xf) {
1564 case RADEON_RB3D_ZPASS_ADDR:
1565 r = r100_cs_packet_next_reloc(p, &reloc);
1567 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1569 r100_cs_dump_packet(p, pkt);
1572 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1574 case RADEON_PP_CNTL:
1576 uint32_t temp = idx_value >> 4;
1577 for (i = 0; i < track->num_texture; i++)
1578 track->textures[i].enabled = !!(temp & (1 << i));
1581 case RADEON_SE_VF_CNTL:
1582 track->vap_vf_cntl = idx_value;
1584 case RADEON_SE_VTX_FMT:
1585 track->vtx_size = r100_get_vtx_size(idx_value);
1587 case RADEON_PP_TEX_SIZE_0:
1588 case RADEON_PP_TEX_SIZE_1:
1589 case RADEON_PP_TEX_SIZE_2:
1590 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1591 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1592 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1594 case RADEON_PP_TEX_PITCH_0:
1595 case RADEON_PP_TEX_PITCH_1:
1596 case RADEON_PP_TEX_PITCH_2:
1597 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1598 track->textures[i].pitch = idx_value + 32;
1600 case RADEON_PP_TXFILTER_0:
1601 case RADEON_PP_TXFILTER_1:
1602 case RADEON_PP_TXFILTER_2:
1603 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1604 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1605 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1606 tmp = (idx_value >> 23) & 0x7;
1607 if (tmp == 2 || tmp == 6)
1608 track->textures[i].roundup_w = false;
1609 tmp = (idx_value >> 27) & 0x7;
1610 if (tmp == 2 || tmp == 6)
1611 track->textures[i].roundup_h = false;
1613 case RADEON_PP_TXFORMAT_0:
1614 case RADEON_PP_TXFORMAT_1:
1615 case RADEON_PP_TXFORMAT_2:
1616 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1617 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1618 track->textures[i].use_pitch = 1;
1620 track->textures[i].use_pitch = 0;
1621 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1622 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1624 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1625 track->textures[i].tex_coord_type = 2;
1626 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1627 case RADEON_TXFORMAT_I8:
1628 case RADEON_TXFORMAT_RGB332:
1629 case RADEON_TXFORMAT_Y8:
1630 track->textures[i].cpp = 1;
1632 case RADEON_TXFORMAT_AI88:
1633 case RADEON_TXFORMAT_ARGB1555:
1634 case RADEON_TXFORMAT_RGB565:
1635 case RADEON_TXFORMAT_ARGB4444:
1636 case RADEON_TXFORMAT_VYUY422:
1637 case RADEON_TXFORMAT_YVYU422:
1638 case RADEON_TXFORMAT_SHADOW16:
1639 case RADEON_TXFORMAT_LDUDV655:
1640 case RADEON_TXFORMAT_DUDV88:
1641 track->textures[i].cpp = 2;
1643 case RADEON_TXFORMAT_ARGB8888:
1644 case RADEON_TXFORMAT_RGBA8888:
1645 case RADEON_TXFORMAT_SHADOW32:
1646 case RADEON_TXFORMAT_LDUDUV8888:
1647 track->textures[i].cpp = 4;
1649 case RADEON_TXFORMAT_DXT1:
1650 track->textures[i].cpp = 1;
1651 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1653 case RADEON_TXFORMAT_DXT23:
1654 case RADEON_TXFORMAT_DXT45:
1655 track->textures[i].cpp = 1;
1656 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1659 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1660 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1662 case RADEON_PP_CUBIC_FACES_0:
1663 case RADEON_PP_CUBIC_FACES_1:
1664 case RADEON_PP_CUBIC_FACES_2:
1666 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1667 for (face = 0; face < 4; face++) {
1668 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1669 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1673 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1680 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1681 struct radeon_cs_packet *pkt,
1682 struct radeon_bo *robj)
1687 value = radeon_get_ib_value(p, idx + 2);
1688 if ((value + 1) > radeon_bo_size(robj)) {
1689 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1690 "(need %u have %lu) !\n",
1692 radeon_bo_size(robj));
1698 static int r100_packet3_check(struct radeon_cs_parser *p,
1699 struct radeon_cs_packet *pkt)
1701 struct radeon_cs_reloc *reloc;
1702 struct r100_cs_track *track;
1704 volatile uint32_t *ib;
1709 track = (struct r100_cs_track *)p->track;
1710 switch (pkt->opcode) {
1711 case PACKET3_3D_LOAD_VBPNTR:
1712 r = r100_packet3_load_vbpntr(p, pkt, idx);
1716 case PACKET3_INDX_BUFFER:
1717 r = r100_cs_packet_next_reloc(p, &reloc);
1719 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1720 r100_cs_dump_packet(p, pkt);
1723 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1724 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1730 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1731 r = r100_cs_packet_next_reloc(p, &reloc);
1733 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1734 r100_cs_dump_packet(p, pkt);
1737 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1738 track->num_arrays = 1;
1739 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1741 track->arrays[0].robj = reloc->robj;
1742 track->arrays[0].esize = track->vtx_size;
1744 track->max_indx = radeon_get_ib_value(p, idx+1);
1746 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1747 track->immd_dwords = pkt->count - 1;
1748 r = r100_cs_track_check(p->rdev, track);
1752 case PACKET3_3D_DRAW_IMMD:
1753 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1754 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1757 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1758 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1759 track->immd_dwords = pkt->count - 1;
1760 r = r100_cs_track_check(p->rdev, track);
1764 /* triggers drawing using in-packet vertex data */
1765 case PACKET3_3D_DRAW_IMMD_2:
1766 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1767 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1770 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1771 track->immd_dwords = pkt->count;
1772 r = r100_cs_track_check(p->rdev, track);
1776 /* triggers drawing using in-packet vertex data */
1777 case PACKET3_3D_DRAW_VBUF_2:
1778 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1779 r = r100_cs_track_check(p->rdev, track);
1783 /* triggers drawing of vertex buffers setup elsewhere */
1784 case PACKET3_3D_DRAW_INDX_2:
1785 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1786 r = r100_cs_track_check(p->rdev, track);
1790 /* triggers drawing using indices to vertex buffer */
1791 case PACKET3_3D_DRAW_VBUF:
1792 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1793 r = r100_cs_track_check(p->rdev, track);
1797 /* triggers drawing of vertex buffers setup elsewhere */
1798 case PACKET3_3D_DRAW_INDX:
1799 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1800 r = r100_cs_track_check(p->rdev, track);
1804 /* triggers drawing using indices to vertex buffer */
1808 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1814 int r100_cs_parse(struct radeon_cs_parser *p)
1816 struct radeon_cs_packet pkt;
1817 struct r100_cs_track *track;
1820 track = kzalloc(sizeof(*track), GFP_KERNEL);
1821 r100_cs_track_clear(p->rdev, track);
1824 r = r100_cs_packet_parse(p, &pkt, p->idx);
1828 p->idx += pkt.count + 2;
1831 if (p->rdev->family >= CHIP_R200)
1832 r = r100_cs_parse_packet0(p, &pkt,
1833 p->rdev->config.r100.reg_safe_bm,
1834 p->rdev->config.r100.reg_safe_bm_size,
1835 &r200_packet0_check);
1837 r = r100_cs_parse_packet0(p, &pkt,
1838 p->rdev->config.r100.reg_safe_bm,
1839 p->rdev->config.r100.reg_safe_bm_size,
1840 &r100_packet0_check);
1845 r = r100_packet3_check(p, &pkt);
1848 DRM_ERROR("Unknown packet type %d !\n",
1855 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1861 * Global GPU functions
1863 void r100_errata(struct radeon_device *rdev)
1865 rdev->pll_errata = 0;
1867 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1868 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1871 if (rdev->family == CHIP_RV100 ||
1872 rdev->family == CHIP_RS100 ||
1873 rdev->family == CHIP_RS200) {
1874 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1878 /* Wait for vertical sync on primary CRTC */
1879 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1881 uint32_t crtc_gen_cntl, tmp;
1884 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1885 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1886 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1889 /* Clear the CRTC_VBLANK_SAVE bit */
1890 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1891 for (i = 0; i < rdev->usec_timeout; i++) {
1892 tmp = RREG32(RADEON_CRTC_STATUS);
1893 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1900 /* Wait for vertical sync on secondary CRTC */
1901 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1903 uint32_t crtc2_gen_cntl, tmp;
1906 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1907 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1908 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1911 /* Clear the CRTC_VBLANK_SAVE bit */
1912 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1913 for (i = 0; i < rdev->usec_timeout; i++) {
1914 tmp = RREG32(RADEON_CRTC2_STATUS);
1915 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1922 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1927 for (i = 0; i < rdev->usec_timeout; i++) {
1928 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1937 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1942 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1943 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1944 " Bad things might happen.\n");
1946 for (i = 0; i < rdev->usec_timeout; i++) {
1947 tmp = RREG32(RADEON_RBBM_STATUS);
1948 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1956 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1961 for (i = 0; i < rdev->usec_timeout; i++) {
1962 /* read MC_STATUS */
1963 tmp = RREG32(RADEON_MC_STATUS);
1964 if (tmp & RADEON_MC_IDLE) {
1972 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1974 lockup->last_cp_rptr = cp->rptr;
1975 lockup->last_jiffies = jiffies;
1979 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1980 * @rdev: radeon device structure
1981 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1982 * @cp: radeon_cp structure holding CP information
1984 * We don't need to initialize the lockup tracking information as we will either
1985 * have CP rptr to a different value of jiffies wrap around which will force
1986 * initialization of the lockup tracking informations.
1988 * A possible false positivie is if we get call after while and last_cp_rptr ==
1989 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1990 * if the elapsed time since last call is bigger than 2 second than we return
1991 * false and update the tracking information. Due to this the caller must call
1992 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1993 * the fencing code should be cautious about that.
1995 * Caller should write to the ring to force CP to do something so we don't get
1996 * false positive when CP is just gived nothing to do.
1999 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
2001 unsigned long cjiffies, elapsed;
2004 if (!time_after(cjiffies, lockup->last_jiffies)) {
2005 /* likely a wrap around */
2006 lockup->last_cp_rptr = cp->rptr;
2007 lockup->last_jiffies = jiffies;
2010 if (cp->rptr != lockup->last_cp_rptr) {
2011 /* CP is still working no lockup */
2012 lockup->last_cp_rptr = cp->rptr;
2013 lockup->last_jiffies = jiffies;
2016 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2017 if (elapsed >= 3000) {
2018 /* very likely the improbable case where current
2019 * rptr is equal to last recorded, a while ago, rptr
2020 * this is more likely a false positive update tracking
2021 * information which should force us to be recall at
2024 lockup->last_cp_rptr = cp->rptr;
2025 lockup->last_jiffies = jiffies;
2028 if (elapsed >= 1000) {
2029 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2032 /* give a chance to the GPU ... */
2036 bool r100_gpu_is_lockup(struct radeon_device *rdev)
2041 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2042 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2043 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
2046 /* force CP activities */
2047 r = radeon_ring_lock(rdev, 2);
2050 radeon_ring_write(rdev, 0x80000000);
2051 radeon_ring_write(rdev, 0x80000000);
2052 radeon_ring_unlock_commit(rdev);
2054 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2055 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
2058 void r100_bm_disable(struct radeon_device *rdev)
2062 /* disable bus mastering */
2063 tmp = RREG32(R_000030_BUS_CNTL);
2064 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2066 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2068 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2069 tmp = RREG32(RADEON_BUS_CNTL);
2071 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2072 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
2076 int r100_asic_reset(struct radeon_device *rdev)
2078 struct r100_mc_save save;
2081 r100_mc_stop(rdev, &save);
2082 status = RREG32(R_000E40_RBBM_STATUS);
2083 if (!G_000E40_GUI_ACTIVE(status)) {
2086 status = RREG32(R_000E40_RBBM_STATUS);
2087 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2089 WREG32(RADEON_CP_CSQ_CNTL, 0);
2090 tmp = RREG32(RADEON_CP_RB_CNTL);
2091 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2092 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2093 WREG32(RADEON_CP_RB_WPTR, 0);
2094 WREG32(RADEON_CP_RB_CNTL, tmp);
2095 /* save PCI state */
2096 pci_save_state(rdev->pdev);
2097 /* disable bus mastering */
2098 r100_bm_disable(rdev);
2099 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2100 S_0000F0_SOFT_RESET_RE(1) |
2101 S_0000F0_SOFT_RESET_PP(1) |
2102 S_0000F0_SOFT_RESET_RB(1));
2103 RREG32(R_0000F0_RBBM_SOFT_RESET);
2105 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2107 status = RREG32(R_000E40_RBBM_STATUS);
2108 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2110 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2111 RREG32(R_0000F0_RBBM_SOFT_RESET);
2113 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2115 status = RREG32(R_000E40_RBBM_STATUS);
2116 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2117 /* restore PCI & busmastering */
2118 pci_restore_state(rdev->pdev);
2119 r100_enable_bm(rdev);
2120 /* Check if GPU is idle */
2121 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2122 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2123 dev_err(rdev->dev, "failed to reset GPU\n");
2124 rdev->gpu_lockup = true;
2127 r100_mc_resume(rdev, &save);
2128 dev_info(rdev->dev, "GPU reset succeed\n");
2132 void r100_set_common_regs(struct radeon_device *rdev)
2134 struct drm_device *dev = rdev->ddev;
2135 bool force_dac2 = false;
2138 /* set these so they don't interfere with anything */
2139 WREG32(RADEON_OV0_SCALE_CNTL, 0);
2140 WREG32(RADEON_SUBPIC_CNTL, 0);
2141 WREG32(RADEON_VIPH_CONTROL, 0);
2142 WREG32(RADEON_I2C_CNTL_1, 0);
2143 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2144 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2145 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2147 /* always set up dac2 on rn50 and some rv100 as lots
2148 * of servers seem to wire it up to a VGA port but
2149 * don't report it in the bios connector
2152 switch (dev->pdev->device) {
2161 /* DELL triple head servers */
2162 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2163 ((dev->pdev->subsystem_device == 0x016c) ||
2164 (dev->pdev->subsystem_device == 0x016d) ||
2165 (dev->pdev->subsystem_device == 0x016e) ||
2166 (dev->pdev->subsystem_device == 0x016f) ||
2167 (dev->pdev->subsystem_device == 0x0170) ||
2168 (dev->pdev->subsystem_device == 0x017d) ||
2169 (dev->pdev->subsystem_device == 0x017e) ||
2170 (dev->pdev->subsystem_device == 0x0183) ||
2171 (dev->pdev->subsystem_device == 0x018a) ||
2172 (dev->pdev->subsystem_device == 0x019a)))
2178 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2179 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2180 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2182 /* For CRT on DAC2, don't turn it on if BIOS didn't
2183 enable it, even it's detected.
2186 /* force it to crtc0 */
2187 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2188 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2189 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2191 /* set up the TV DAC */
2192 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2193 RADEON_TV_DAC_STD_MASK |
2194 RADEON_TV_DAC_RDACPD |
2195 RADEON_TV_DAC_GDACPD |
2196 RADEON_TV_DAC_BDACPD |
2197 RADEON_TV_DAC_BGADJ_MASK |
2198 RADEON_TV_DAC_DACADJ_MASK);
2199 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2200 RADEON_TV_DAC_NHOLD |
2201 RADEON_TV_DAC_STD_PS2 |
2204 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2205 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2206 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2209 /* switch PM block to ACPI mode */
2210 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2211 tmp &= ~RADEON_PM_MODE_SEL;
2212 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2219 static void r100_vram_get_type(struct radeon_device *rdev)
2223 rdev->mc.vram_is_ddr = false;
2224 if (rdev->flags & RADEON_IS_IGP)
2225 rdev->mc.vram_is_ddr = true;
2226 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2227 rdev->mc.vram_is_ddr = true;
2228 if ((rdev->family == CHIP_RV100) ||
2229 (rdev->family == CHIP_RS100) ||
2230 (rdev->family == CHIP_RS200)) {
2231 tmp = RREG32(RADEON_MEM_CNTL);
2232 if (tmp & RV100_HALF_MODE) {
2233 rdev->mc.vram_width = 32;
2235 rdev->mc.vram_width = 64;
2237 if (rdev->flags & RADEON_SINGLE_CRTC) {
2238 rdev->mc.vram_width /= 4;
2239 rdev->mc.vram_is_ddr = true;
2241 } else if (rdev->family <= CHIP_RV280) {
2242 tmp = RREG32(RADEON_MEM_CNTL);
2243 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2244 rdev->mc.vram_width = 128;
2246 rdev->mc.vram_width = 64;
2250 rdev->mc.vram_width = 128;
2254 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2259 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2261 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2262 * that is has the 2nd generation multifunction PCI interface
2264 if (rdev->family == CHIP_RV280 ||
2265 rdev->family >= CHIP_RV350) {
2266 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2267 ~RADEON_HDP_APER_CNTL);
2268 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2269 return aper_size * 2;
2272 /* Older cards have all sorts of funny issues to deal with. First
2273 * check if it's a multifunction card by reading the PCI config
2274 * header type... Limit those to one aperture size
2276 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2278 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2279 DRM_INFO("Limiting VRAM to one aperture\n");
2283 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2284 * have set it up. We don't write this as it's broken on some ASICs but
2285 * we expect the BIOS to have done the right thing (might be too optimistic...)
2287 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2288 return aper_size * 2;
2292 void r100_vram_init_sizes(struct radeon_device *rdev)
2294 u64 config_aper_size;
2296 /* work out accessible VRAM */
2297 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2298 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2299 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2300 /* FIXME we don't use the second aperture yet when we could use it */
2301 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2302 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2303 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2304 if (rdev->flags & RADEON_IS_IGP) {
2306 /* read NB_TOM to get the amount of ram stolen for the GPU */
2307 tom = RREG32(RADEON_NB_TOM);
2308 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2309 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2310 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2312 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2313 /* Some production boards of m6 will report 0
2316 if (rdev->mc.real_vram_size == 0) {
2317 rdev->mc.real_vram_size = 8192 * 1024;
2318 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2320 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2321 * Novell bug 204882 + along with lots of ubuntu ones
2323 if (config_aper_size > rdev->mc.real_vram_size)
2324 rdev->mc.mc_vram_size = config_aper_size;
2326 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2330 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2334 temp = RREG32(RADEON_CONFIG_CNTL);
2335 if (state == false) {
2341 WREG32(RADEON_CONFIG_CNTL, temp);
2344 void r100_mc_init(struct radeon_device *rdev)
2348 r100_vram_get_type(rdev);
2349 r100_vram_init_sizes(rdev);
2350 base = rdev->mc.aper_base;
2351 if (rdev->flags & RADEON_IS_IGP)
2352 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2353 radeon_vram_location(rdev, &rdev->mc, base);
2354 if (!(rdev->flags & RADEON_IS_AGP))
2355 radeon_gtt_location(rdev, &rdev->mc);
2356 radeon_update_bandwidth_info(rdev);
2361 * Indirect registers accessor
2363 void r100_pll_errata_after_index(struct radeon_device *rdev)
2365 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
2368 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
2369 (void)RREG32(RADEON_CRTC_GEN_CNTL);
2372 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2374 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2375 * or the chip could hang on a subsequent access
2377 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2381 /* This function is required to workaround a hardware bug in some (all?)
2382 * revisions of the R300. This workaround should be called after every
2383 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2384 * may not be correct.
2386 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2389 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2390 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2391 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2392 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2393 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2397 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2401 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2402 r100_pll_errata_after_index(rdev);
2403 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2404 r100_pll_errata_after_data(rdev);
2408 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2410 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2411 r100_pll_errata_after_index(rdev);
2412 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2413 r100_pll_errata_after_data(rdev);
2416 void r100_set_safe_registers(struct radeon_device *rdev)
2418 if (ASIC_IS_RN50(rdev)) {
2419 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2420 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2421 } else if (rdev->family < CHIP_R200) {
2422 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2423 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2425 r200_set_safe_registers(rdev);
2432 #if defined(CONFIG_DEBUG_FS)
2433 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2435 struct drm_info_node *node = (struct drm_info_node *) m->private;
2436 struct drm_device *dev = node->minor->dev;
2437 struct radeon_device *rdev = dev->dev_private;
2438 uint32_t reg, value;
2441 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2442 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2443 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2444 for (i = 0; i < 64; i++) {
2445 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2446 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2447 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2448 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2449 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2454 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2456 struct drm_info_node *node = (struct drm_info_node *) m->private;
2457 struct drm_device *dev = node->minor->dev;
2458 struct radeon_device *rdev = dev->dev_private;
2460 unsigned count, i, j;
2462 radeon_ring_free_size(rdev);
2463 rdp = RREG32(RADEON_CP_RB_RPTR);
2464 wdp = RREG32(RADEON_CP_RB_WPTR);
2465 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2466 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2467 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2468 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2469 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2470 seq_printf(m, "%u dwords in ring\n", count);
2471 for (j = 0; j <= count; j++) {
2472 i = (rdp + j) & rdev->cp.ptr_mask;
2473 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2479 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2481 struct drm_info_node *node = (struct drm_info_node *) m->private;
2482 struct drm_device *dev = node->minor->dev;
2483 struct radeon_device *rdev = dev->dev_private;
2484 uint32_t csq_stat, csq2_stat, tmp;
2485 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2488 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2489 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2490 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2491 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2492 r_rptr = (csq_stat >> 0) & 0x3ff;
2493 r_wptr = (csq_stat >> 10) & 0x3ff;
2494 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2495 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2496 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2497 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2498 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2499 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2500 seq_printf(m, "Ring rptr %u\n", r_rptr);
2501 seq_printf(m, "Ring wptr %u\n", r_wptr);
2502 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2503 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2504 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2505 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2506 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2507 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2508 seq_printf(m, "Ring fifo:\n");
2509 for (i = 0; i < 256; i++) {
2510 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2511 tmp = RREG32(RADEON_CP_CSQ_DATA);
2512 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2514 seq_printf(m, "Indirect1 fifo:\n");
2515 for (i = 256; i <= 512; i++) {
2516 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2517 tmp = RREG32(RADEON_CP_CSQ_DATA);
2518 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2520 seq_printf(m, "Indirect2 fifo:\n");
2521 for (i = 640; i < ib1_wptr; i++) {
2522 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2523 tmp = RREG32(RADEON_CP_CSQ_DATA);
2524 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2529 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2531 struct drm_info_node *node = (struct drm_info_node *) m->private;
2532 struct drm_device *dev = node->minor->dev;
2533 struct radeon_device *rdev = dev->dev_private;
2536 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2537 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2538 tmp = RREG32(RADEON_MC_FB_LOCATION);
2539 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2540 tmp = RREG32(RADEON_BUS_CNTL);
2541 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2542 tmp = RREG32(RADEON_MC_AGP_LOCATION);
2543 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2544 tmp = RREG32(RADEON_AGP_BASE);
2545 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2546 tmp = RREG32(RADEON_HOST_PATH_CNTL);
2547 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2548 tmp = RREG32(0x01D0);
2549 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2550 tmp = RREG32(RADEON_AIC_LO_ADDR);
2551 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2552 tmp = RREG32(RADEON_AIC_HI_ADDR);
2553 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2554 tmp = RREG32(0x01E4);
2555 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2559 static struct drm_info_list r100_debugfs_rbbm_list[] = {
2560 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2563 static struct drm_info_list r100_debugfs_cp_list[] = {
2564 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2565 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2568 static struct drm_info_list r100_debugfs_mc_info_list[] = {
2569 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2573 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2575 #if defined(CONFIG_DEBUG_FS)
2576 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2582 int r100_debugfs_cp_init(struct radeon_device *rdev)
2584 #if defined(CONFIG_DEBUG_FS)
2585 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2591 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2593 #if defined(CONFIG_DEBUG_FS)
2594 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2600 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2601 uint32_t tiling_flags, uint32_t pitch,
2602 uint32_t offset, uint32_t obj_size)
2604 int surf_index = reg * 16;
2607 /* r100/r200 divide by 16 */
2608 if (rdev->family < CHIP_R300)
2613 if (rdev->family <= CHIP_RS200) {
2614 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2615 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2616 flags |= RADEON_SURF_TILE_COLOR_BOTH;
2617 if (tiling_flags & RADEON_TILING_MACRO)
2618 flags |= RADEON_SURF_TILE_COLOR_MACRO;
2619 } else if (rdev->family <= CHIP_RV280) {
2620 if (tiling_flags & (RADEON_TILING_MACRO))
2621 flags |= R200_SURF_TILE_COLOR_MACRO;
2622 if (tiling_flags & RADEON_TILING_MICRO)
2623 flags |= R200_SURF_TILE_COLOR_MICRO;
2625 if (tiling_flags & RADEON_TILING_MACRO)
2626 flags |= R300_SURF_TILE_MACRO;
2627 if (tiling_flags & RADEON_TILING_MICRO)
2628 flags |= R300_SURF_TILE_MICRO;
2631 if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2632 flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2633 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2634 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2636 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2637 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2638 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2639 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2643 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2645 int surf_index = reg * 16;
2646 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2649 void r100_bandwidth_update(struct radeon_device *rdev)
2651 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2652 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2653 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2654 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2655 fixed20_12 memtcas_ff[8] = {
2660 dfixed_init_half(1),
2661 dfixed_init_half(2),
2664 fixed20_12 memtcas_rs480_ff[8] = {
2670 dfixed_init_half(1),
2671 dfixed_init_half(2),
2672 dfixed_init_half(3),
2674 fixed20_12 memtcas2_ff[8] = {
2684 fixed20_12 memtrbs[8] = {
2686 dfixed_init_half(1),
2688 dfixed_init_half(2),
2690 dfixed_init_half(3),
2694 fixed20_12 memtrbs_r4xx[8] = {
2704 fixed20_12 min_mem_eff;
2705 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2706 fixed20_12 cur_latency_mclk, cur_latency_sclk;
2707 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2708 disp_drain_rate2, read_return_rate;
2709 fixed20_12 time_disp1_drop_priority;
2711 int cur_size = 16; /* in octawords */
2712 int critical_point = 0, critical_point2;
2713 /* uint32_t read_return_rate, time_disp1_drop_priority; */
2714 int stop_req, max_stop_req;
2715 struct drm_display_mode *mode1 = NULL;
2716 struct drm_display_mode *mode2 = NULL;
2717 uint32_t pixel_bytes1 = 0;
2718 uint32_t pixel_bytes2 = 0;
2720 radeon_update_display_priority(rdev);
2722 if (rdev->mode_info.crtcs[0]->base.enabled) {
2723 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2724 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2726 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2727 if (rdev->mode_info.crtcs[1]->base.enabled) {
2728 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2729 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2733 min_mem_eff.full = dfixed_const_8(0);
2735 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2736 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2737 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2738 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2739 /* check crtc enables */
2741 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2743 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2744 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2748 * determine is there is enough bw for current mode
2750 sclk_ff = rdev->pm.sclk;
2751 mclk_ff = rdev->pm.mclk;
2753 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2754 temp_ff.full = dfixed_const(temp);
2755 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2759 peak_disp_bw.full = 0;
2761 temp_ff.full = dfixed_const(1000);
2762 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2763 pix_clk.full = dfixed_div(pix_clk, temp_ff);
2764 temp_ff.full = dfixed_const(pixel_bytes1);
2765 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2768 temp_ff.full = dfixed_const(1000);
2769 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2770 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2771 temp_ff.full = dfixed_const(pixel_bytes2);
2772 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2775 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2776 if (peak_disp_bw.full >= mem_bw.full) {
2777 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2778 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2781 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
2782 temp = RREG32(RADEON_MEM_TIMING_CNTL);
2783 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2784 mem_trcd = ((temp >> 2) & 0x3) + 1;
2785 mem_trp = ((temp & 0x3)) + 1;
2786 mem_tras = ((temp & 0x70) >> 4) + 1;
2787 } else if (rdev->family == CHIP_R300 ||
2788 rdev->family == CHIP_R350) { /* r300, r350 */
2789 mem_trcd = (temp & 0x7) + 1;
2790 mem_trp = ((temp >> 8) & 0x7) + 1;
2791 mem_tras = ((temp >> 11) & 0xf) + 4;
2792 } else if (rdev->family == CHIP_RV350 ||
2793 rdev->family <= CHIP_RV380) {
2795 mem_trcd = (temp & 0x7) + 3;
2796 mem_trp = ((temp >> 8) & 0x7) + 3;
2797 mem_tras = ((temp >> 11) & 0xf) + 6;
2798 } else if (rdev->family == CHIP_R420 ||
2799 rdev->family == CHIP_R423 ||
2800 rdev->family == CHIP_RV410) {
2802 mem_trcd = (temp & 0xf) + 3;
2805 mem_trp = ((temp >> 8) & 0xf) + 3;
2808 mem_tras = ((temp >> 12) & 0x1f) + 6;
2811 } else { /* RV200, R200 */
2812 mem_trcd = (temp & 0x7) + 1;
2813 mem_trp = ((temp >> 8) & 0x7) + 1;
2814 mem_tras = ((temp >> 12) & 0xf) + 4;
2817 trcd_ff.full = dfixed_const(mem_trcd);
2818 trp_ff.full = dfixed_const(mem_trp);
2819 tras_ff.full = dfixed_const(mem_tras);
2821 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2822 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2823 data = (temp & (7 << 20)) >> 20;
2824 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2825 if (rdev->family == CHIP_RS480) /* don't think rs400 */
2826 tcas_ff = memtcas_rs480_ff[data];
2828 tcas_ff = memtcas_ff[data];
2830 tcas_ff = memtcas2_ff[data];
2832 if (rdev->family == CHIP_RS400 ||
2833 rdev->family == CHIP_RS480) {
2834 /* extra cas latency stored in bits 23-25 0-4 clocks */
2835 data = (temp >> 23) & 0x7;
2837 tcas_ff.full += dfixed_const(data);
2840 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2841 /* on the R300, Tcas is included in Trbs.
2843 temp = RREG32(RADEON_MEM_CNTL);
2844 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2846 if (R300_MEM_USE_CD_CH_ONLY & temp) {
2847 temp = RREG32(R300_MC_IND_INDEX);
2848 temp &= ~R300_MC_IND_ADDR_MASK;
2849 temp |= R300_MC_READ_CNTL_CD_mcind;
2850 WREG32(R300_MC_IND_INDEX, temp);
2851 temp = RREG32(R300_MC_IND_DATA);
2852 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2854 temp = RREG32(R300_MC_READ_CNTL_AB);
2855 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2858 temp = RREG32(R300_MC_READ_CNTL_AB);
2859 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2861 if (rdev->family == CHIP_RV410 ||
2862 rdev->family == CHIP_R420 ||
2863 rdev->family == CHIP_R423)
2864 trbs_ff = memtrbs_r4xx[data];
2866 trbs_ff = memtrbs[data];
2867 tcas_ff.full += trbs_ff.full;
2870 sclk_eff_ff.full = sclk_ff.full;
2872 if (rdev->flags & RADEON_IS_AGP) {
2873 fixed20_12 agpmode_ff;
2874 agpmode_ff.full = dfixed_const(radeon_agpmode);
2875 temp_ff.full = dfixed_const_666(16);
2876 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2878 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2880 if (ASIC_IS_R300(rdev)) {
2881 sclk_delay_ff.full = dfixed_const(250);
2883 if ((rdev->family == CHIP_RV100) ||
2884 rdev->flags & RADEON_IS_IGP) {
2885 if (rdev->mc.vram_is_ddr)
2886 sclk_delay_ff.full = dfixed_const(41);
2888 sclk_delay_ff.full = dfixed_const(33);
2890 if (rdev->mc.vram_width == 128)
2891 sclk_delay_ff.full = dfixed_const(57);
2893 sclk_delay_ff.full = dfixed_const(41);
2897 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2899 if (rdev->mc.vram_is_ddr) {
2900 if (rdev->mc.vram_width == 32) {
2901 k1.full = dfixed_const(40);
2904 k1.full = dfixed_const(20);
2908 k1.full = dfixed_const(40);
2912 temp_ff.full = dfixed_const(2);
2913 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2914 temp_ff.full = dfixed_const(c);
2915 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2916 temp_ff.full = dfixed_const(4);
2917 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2918 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2919 mc_latency_mclk.full += k1.full;
2921 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2922 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2925 HW cursor time assuming worst case of full size colour cursor.
2927 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2928 temp_ff.full += trcd_ff.full;
2929 if (temp_ff.full < tras_ff.full)
2930 temp_ff.full = tras_ff.full;
2931 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2933 temp_ff.full = dfixed_const(cur_size);
2934 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2936 Find the total latency for the display data.
2938 disp_latency_overhead.full = dfixed_const(8);
2939 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2940 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2941 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2943 if (mc_latency_mclk.full > mc_latency_sclk.full)
2944 disp_latency.full = mc_latency_mclk.full;
2946 disp_latency.full = mc_latency_sclk.full;
2948 /* setup Max GRPH_STOP_REQ default value */
2949 if (ASIC_IS_RV100(rdev))
2950 max_stop_req = 0x5c;
2952 max_stop_req = 0x7c;
2956 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2957 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2959 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2961 if (stop_req > max_stop_req)
2962 stop_req = max_stop_req;
2965 Find the drain rate of the display buffer.
2967 temp_ff.full = dfixed_const((16/pixel_bytes1));
2968 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2971 Find the critical point of the display buffer.
2973 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2974 crit_point_ff.full += dfixed_const_half(0);
2976 critical_point = dfixed_trunc(crit_point_ff);
2978 if (rdev->disp_priority == 2) {
2983 The critical point should never be above max_stop_req-4. Setting
2984 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2986 if (max_stop_req - critical_point < 4)
2989 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2990 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2991 critical_point = 0x10;
2994 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2995 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2996 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2997 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2998 if ((rdev->family == CHIP_R350) &&
2999 (stop_req > 0x15)) {
3002 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3003 temp |= RADEON_GRPH_BUFFER_SIZE;
3004 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
3005 RADEON_GRPH_CRITICAL_AT_SOF |
3006 RADEON_GRPH_STOP_CNTL);
3008 Write the result into the register.
3010 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3011 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3014 if ((rdev->family == CHIP_RS400) ||
3015 (rdev->family == CHIP_RS480)) {
3016 /* attempt to program RS400 disp regs correctly ??? */
3017 temp = RREG32(RS400_DISP1_REG_CNTL);
3018 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3019 RS400_DISP1_STOP_REQ_LEVEL_MASK);
3020 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3021 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3022 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3023 temp = RREG32(RS400_DMIF_MEM_CNTL1);
3024 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3025 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3026 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3027 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3028 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3032 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
3033 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
3034 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3039 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3041 if (stop_req > max_stop_req)
3042 stop_req = max_stop_req;
3045 Find the drain rate of the display buffer.
3047 temp_ff.full = dfixed_const((16/pixel_bytes2));
3048 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3050 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3051 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3052 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3053 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3054 if ((rdev->family == CHIP_R350) &&
3055 (stop_req > 0x15)) {
3058 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3059 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3060 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
3061 RADEON_GRPH_CRITICAL_AT_SOF |
3062 RADEON_GRPH_STOP_CNTL);
3064 if ((rdev->family == CHIP_RS100) ||
3065 (rdev->family == CHIP_RS200))
3066 critical_point2 = 0;
3068 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3069 temp_ff.full = dfixed_const(temp);
3070 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3071 if (sclk_ff.full < temp_ff.full)
3072 temp_ff.full = sclk_ff.full;
3074 read_return_rate.full = temp_ff.full;
3077 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3078 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3080 time_disp1_drop_priority.full = 0;
3082 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3083 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3084 crit_point_ff.full += dfixed_const_half(0);
3086 critical_point2 = dfixed_trunc(crit_point_ff);
3088 if (rdev->disp_priority == 2) {
3089 critical_point2 = 0;
3092 if (max_stop_req - critical_point2 < 4)
3093 critical_point2 = 0;
3097 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3098 /* some R300 cards have problem with this set to 0 */
3099 critical_point2 = 0x10;
3102 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3103 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3105 if ((rdev->family == CHIP_RS400) ||
3106 (rdev->family == CHIP_RS480)) {
3108 /* attempt to program RS400 disp2 regs correctly ??? */
3109 temp = RREG32(RS400_DISP2_REQ_CNTL1);
3110 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3111 RS400_DISP2_STOP_REQ_LEVEL_MASK);
3112 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3113 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3114 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3115 temp = RREG32(RS400_DISP2_REQ_CNTL2);
3116 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3117 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3118 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3119 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3120 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3122 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3123 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3124 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
3125 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3128 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
3129 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3133 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
3135 DRM_ERROR("pitch %d\n", t->pitch);
3136 DRM_ERROR("use_pitch %d\n", t->use_pitch);
3137 DRM_ERROR("width %d\n", t->width);
3138 DRM_ERROR("width_11 %d\n", t->width_11);
3139 DRM_ERROR("height %d\n", t->height);
3140 DRM_ERROR("height_11 %d\n", t->height_11);
3141 DRM_ERROR("num levels %d\n", t->num_levels);
3142 DRM_ERROR("depth %d\n", t->txdepth);
3143 DRM_ERROR("bpp %d\n", t->cpp);
3144 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
3145 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
3146 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
3147 DRM_ERROR("compress format %d\n", t->compress_format);
3150 static int r100_cs_track_cube(struct radeon_device *rdev,
3151 struct r100_cs_track *track, unsigned idx)
3153 unsigned face, w, h;
3154 struct radeon_bo *cube_robj;
3157 for (face = 0; face < 5; face++) {
3158 cube_robj = track->textures[idx].cube_info[face].robj;
3159 w = track->textures[idx].cube_info[face].width;
3160 h = track->textures[idx].cube_info[face].height;
3163 size *= track->textures[idx].cpp;
3165 size += track->textures[idx].cube_info[face].offset;
3167 if (size > radeon_bo_size(cube_robj)) {
3168 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3169 size, radeon_bo_size(cube_robj));
3170 r100_cs_track_texture_print(&track->textures[idx]);
3177 static int r100_track_compress_size(int compress_format, int w, int h)
3179 int block_width, block_height, block_bytes;
3180 int wblocks, hblocks;
3187 switch (compress_format) {
3188 case R100_TRACK_COMP_DXT1:
3193 case R100_TRACK_COMP_DXT35:
3199 hblocks = (h + block_height - 1) / block_height;
3200 wblocks = (w + block_width - 1) / block_width;
3201 if (wblocks < min_wblocks)
3202 wblocks = min_wblocks;
3203 sz = wblocks * hblocks * block_bytes;
3207 static int r100_cs_track_texture_check(struct radeon_device *rdev,
3208 struct r100_cs_track *track)
3210 struct radeon_bo *robj;
3212 unsigned u, i, w, h, d;
3215 for (u = 0; u < track->num_texture; u++) {
3216 if (!track->textures[u].enabled)
3218 robj = track->textures[u].robj;
3220 DRM_ERROR("No texture bound to unit %u\n", u);
3224 for (i = 0; i <= track->textures[u].num_levels; i++) {
3225 if (track->textures[u].use_pitch) {
3226 if (rdev->family < CHIP_R300)
3227 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
3229 w = track->textures[u].pitch / (1 << i);
3231 w = track->textures[u].width;
3232 if (rdev->family >= CHIP_RV515)
3233 w |= track->textures[u].width_11;
3235 if (track->textures[u].roundup_w)
3236 w = roundup_pow_of_two(w);
3238 h = track->textures[u].height;
3239 if (rdev->family >= CHIP_RV515)
3240 h |= track->textures[u].height_11;
3242 if (track->textures[u].roundup_h)
3243 h = roundup_pow_of_two(h);
3244 if (track->textures[u].tex_coord_type == 1) {
3245 d = (1 << track->textures[u].txdepth) / (1 << i);
3251 if (track->textures[u].compress_format) {
3253 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
3254 /* compressed textures are block based */
3258 size *= track->textures[u].cpp;
3260 switch (track->textures[u].tex_coord_type) {
3265 if (track->separate_cube) {
3266 ret = r100_cs_track_cube(rdev, track, u);
3273 DRM_ERROR("Invalid texture coordinate type %u for unit "
3274 "%u\n", track->textures[u].tex_coord_type, u);
3277 if (size > radeon_bo_size(robj)) {
3278 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
3279 "%lu\n", u, size, radeon_bo_size(robj));
3280 r100_cs_track_texture_print(&track->textures[u]);
3287 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3294 for (i = 0; i < track->num_cb; i++) {
3295 if (track->cb[i].robj == NULL) {
3296 if (!(track->zb_cb_clear || track->color_channel_mask ||
3297 track->blend_read_enable)) {
3300 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3303 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
3304 size += track->cb[i].offset;
3305 if (size > radeon_bo_size(track->cb[i].robj)) {
3306 DRM_ERROR("[drm] Buffer too small for color buffer %d "
3307 "(need %lu have %lu) !\n", i, size,
3308 radeon_bo_size(track->cb[i].robj));
3309 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
3310 i, track->cb[i].pitch, track->cb[i].cpp,
3311 track->cb[i].offset, track->maxy);
3315 if (track->z_enabled) {
3316 if (track->zb.robj == NULL) {
3317 DRM_ERROR("[drm] No buffer for z buffer !\n");
3320 size = track->zb.pitch * track->zb.cpp * track->maxy;
3321 size += track->zb.offset;
3322 if (size > radeon_bo_size(track->zb.robj)) {
3323 DRM_ERROR("[drm] Buffer too small for z buffer "
3324 "(need %lu have %lu) !\n", size,
3325 radeon_bo_size(track->zb.robj));
3326 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
3327 track->zb.pitch, track->zb.cpp,
3328 track->zb.offset, track->maxy);
3332 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3333 if (track->vap_vf_cntl & (1 << 14)) {
3334 nverts = track->vap_alt_nverts;
3336 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3338 switch (prim_walk) {
3340 for (i = 0; i < track->num_arrays; i++) {
3341 size = track->arrays[i].esize * track->max_indx * 4;
3342 if (track->arrays[i].robj == NULL) {
3343 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3344 "bound\n", prim_walk, i);
3347 if (size > radeon_bo_size(track->arrays[i].robj)) {
3348 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3349 "need %lu dwords have %lu dwords\n",
3350 prim_walk, i, size >> 2,
3351 radeon_bo_size(track->arrays[i].robj)
3353 DRM_ERROR("Max indices %u\n", track->max_indx);
3359 for (i = 0; i < track->num_arrays; i++) {
3360 size = track->arrays[i].esize * (nverts - 1) * 4;
3361 if (track->arrays[i].robj == NULL) {
3362 DRM_ERROR("(PW %u) Vertex array %u no buffer "
3363 "bound\n", prim_walk, i);
3366 if (size > radeon_bo_size(track->arrays[i].robj)) {
3367 dev_err(rdev->dev, "(PW %u) Vertex array %u "
3368 "need %lu dwords have %lu dwords\n",
3369 prim_walk, i, size >> 2,
3370 radeon_bo_size(track->arrays[i].robj)
3377 size = track->vtx_size * nverts;
3378 if (size != track->immd_dwords) {
3379 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
3380 track->immd_dwords, size);
3381 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
3382 nverts, track->vtx_size);
3387 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
3391 return r100_cs_track_texture_check(rdev, track);
3394 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3398 if (rdev->family < CHIP_R300) {
3400 if (rdev->family <= CHIP_RS200)
3401 track->num_texture = 3;
3403 track->num_texture = 6;
3405 track->separate_cube = 1;
3408 track->num_texture = 16;
3410 track->separate_cube = 0;
3413 for (i = 0; i < track->num_cb; i++) {
3414 track->cb[i].robj = NULL;
3415 track->cb[i].pitch = 8192;
3416 track->cb[i].cpp = 16;
3417 track->cb[i].offset = 0;
3419 track->z_enabled = true;
3420 track->zb.robj = NULL;
3421 track->zb.pitch = 8192;
3423 track->zb.offset = 0;
3424 track->vtx_size = 0x7F;
3425 track->immd_dwords = 0xFFFFFFFFUL;
3426 track->num_arrays = 11;
3427 track->max_indx = 0x00FFFFFFUL;
3428 for (i = 0; i < track->num_arrays; i++) {
3429 track->arrays[i].robj = NULL;
3430 track->arrays[i].esize = 0x7F;
3432 for (i = 0; i < track->num_texture; i++) {
3433 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
3434 track->textures[i].pitch = 16536;
3435 track->textures[i].width = 16536;
3436 track->textures[i].height = 16536;
3437 track->textures[i].width_11 = 1 << 11;
3438 track->textures[i].height_11 = 1 << 11;
3439 track->textures[i].num_levels = 12;
3440 if (rdev->family <= CHIP_RS200) {
3441 track->textures[i].tex_coord_type = 0;
3442 track->textures[i].txdepth = 0;
3444 track->textures[i].txdepth = 16;
3445 track->textures[i].tex_coord_type = 1;
3447 track->textures[i].cpp = 64;
3448 track->textures[i].robj = NULL;
3449 /* CS IB emission code makes sure texture unit are disabled */
3450 track->textures[i].enabled = false;
3451 track->textures[i].roundup_w = true;
3452 track->textures[i].roundup_h = true;
3453 if (track->separate_cube)
3454 for (face = 0; face < 5; face++) {
3455 track->textures[i].cube_info[face].robj = NULL;
3456 track->textures[i].cube_info[face].width = 16536;
3457 track->textures[i].cube_info[face].height = 16536;
3458 track->textures[i].cube_info[face].offset = 0;
3463 int r100_ring_test(struct radeon_device *rdev)
3470 r = radeon_scratch_get(rdev, &scratch);
3472 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3475 WREG32(scratch, 0xCAFEDEAD);
3476 r = radeon_ring_lock(rdev, 2);
3478 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3479 radeon_scratch_free(rdev, scratch);
3482 radeon_ring_write(rdev, PACKET0(scratch, 0));
3483 radeon_ring_write(rdev, 0xDEADBEEF);
3484 radeon_ring_unlock_commit(rdev);
3485 for (i = 0; i < rdev->usec_timeout; i++) {
3486 tmp = RREG32(scratch);
3487 if (tmp == 0xDEADBEEF) {
3492 if (i < rdev->usec_timeout) {
3493 DRM_INFO("ring test succeeded in %d usecs\n", i);
3495 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3499 radeon_scratch_free(rdev, scratch);
3503 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3505 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3506 radeon_ring_write(rdev, ib->gpu_addr);
3507 radeon_ring_write(rdev, ib->length_dw);
3510 int r100_ib_test(struct radeon_device *rdev)
3512 struct radeon_ib *ib;
3518 r = radeon_scratch_get(rdev, &scratch);
3520 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3523 WREG32(scratch, 0xCAFEDEAD);
3524 r = radeon_ib_get(rdev, &ib);
3528 ib->ptr[0] = PACKET0(scratch, 0);
3529 ib->ptr[1] = 0xDEADBEEF;
3530 ib->ptr[2] = PACKET2(0);
3531 ib->ptr[3] = PACKET2(0);
3532 ib->ptr[4] = PACKET2(0);
3533 ib->ptr[5] = PACKET2(0);
3534 ib->ptr[6] = PACKET2(0);
3535 ib->ptr[7] = PACKET2(0);
3537 r = radeon_ib_schedule(rdev, ib);
3539 radeon_scratch_free(rdev, scratch);
3540 radeon_ib_free(rdev, &ib);
3543 r = radeon_fence_wait(ib->fence, false);
3547 for (i = 0; i < rdev->usec_timeout; i++) {
3548 tmp = RREG32(scratch);
3549 if (tmp == 0xDEADBEEF) {
3554 if (i < rdev->usec_timeout) {
3555 DRM_INFO("ib test succeeded in %u usecs\n", i);
3557 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3561 radeon_scratch_free(rdev, scratch);
3562 radeon_ib_free(rdev, &ib);
3566 void r100_ib_fini(struct radeon_device *rdev)
3568 radeon_ib_pool_fini(rdev);
3571 int r100_ib_init(struct radeon_device *rdev)
3575 r = radeon_ib_pool_init(rdev);
3577 dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
3581 r = r100_ib_test(rdev);
3583 dev_err(rdev->dev, "failled testing IB (%d).\n", r);
3590 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3592 /* Shutdown CP we shouldn't need to do that but better be safe than
3595 rdev->cp.ready = false;
3596 WREG32(R_000740_CP_CSQ_CNTL, 0);
3598 /* Save few CRTC registers */
3599 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3600 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3601 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3602 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3603 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3604 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3605 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3608 /* Disable VGA aperture access */
3609 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3610 /* Disable cursor, overlay, crtc */
3611 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3612 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3613 S_000054_CRTC_DISPLAY_DIS(1));
3614 WREG32(R_000050_CRTC_GEN_CNTL,
3615 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3616 S_000050_CRTC_DISP_REQ_EN_B(1));
3617 WREG32(R_000420_OV0_SCALE_CNTL,
3618 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3619 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3620 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3621 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3622 S_000360_CUR2_LOCK(1));
3623 WREG32(R_0003F8_CRTC2_GEN_CNTL,
3624 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3625 S_0003F8_CRTC2_DISPLAY_DIS(1) |
3626 S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3627 WREG32(R_000360_CUR2_OFFSET,
3628 C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3632 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3634 /* Update base address for crtc */
3635 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3636 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3637 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3639 /* Restore CRTC registers */
3640 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3641 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3642 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3643 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3644 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3648 void r100_vga_render_disable(struct radeon_device *rdev)
3652 tmp = RREG8(R_0003C2_GENMO_WT);
3653 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3656 static void r100_debugfs(struct radeon_device *rdev)
3660 r = r100_debugfs_mc_info_init(rdev);
3662 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3665 static void r100_mc_program(struct radeon_device *rdev)
3667 struct r100_mc_save save;
3669 /* Stops all mc clients */
3670 r100_mc_stop(rdev, &save);
3671 if (rdev->flags & RADEON_IS_AGP) {
3672 WREG32(R_00014C_MC_AGP_LOCATION,
3673 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3674 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3675 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3676 if (rdev->family > CHIP_RV200)
3677 WREG32(R_00015C_AGP_BASE_2,
3678 upper_32_bits(rdev->mc.agp_base) & 0xff);
3680 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3681 WREG32(R_000170_AGP_BASE, 0);
3682 if (rdev->family > CHIP_RV200)
3683 WREG32(R_00015C_AGP_BASE_2, 0);
3685 /* Wait for mc idle */
3686 if (r100_mc_wait_for_idle(rdev))
3687 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3688 /* Program MC, should be a 32bits limited address space */
3689 WREG32(R_000148_MC_FB_LOCATION,
3690 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3691 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3692 r100_mc_resume(rdev, &save);
3695 void r100_clock_startup(struct radeon_device *rdev)
3699 if (radeon_dynclks != -1 && radeon_dynclks)
3700 radeon_legacy_set_clock_gating(rdev, 1);
3701 /* We need to force on some of the block */
3702 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3703 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3704 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3705 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3706 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3709 static int r100_startup(struct radeon_device *rdev)
3713 /* set common regs */
3714 r100_set_common_regs(rdev);
3716 r100_mc_program(rdev);
3718 r100_clock_startup(rdev);
3719 /* Initialize GPU configuration (# pipes, ...) */
3720 // r100_gpu_init(rdev);
3721 /* Initialize GART (initialize after TTM so we can allocate
3722 * memory through TTM but finalize after TTM) */
3723 r100_enable_bm(rdev);
3724 if (rdev->flags & RADEON_IS_PCI) {
3725 r = r100_pci_gart_enable(rdev);
3731 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3732 /* 1M ring buffer */
3733 r = r100_cp_init(rdev, 1024 * 1024);
3735 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3738 r = r100_wb_init(rdev);
3740 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3741 r = r100_ib_init(rdev);
3743 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3749 int r100_resume(struct radeon_device *rdev)
3751 /* Make sur GART are not working */
3752 if (rdev->flags & RADEON_IS_PCI)
3753 r100_pci_gart_disable(rdev);
3754 /* Resume clock before doing reset */
3755 r100_clock_startup(rdev);
3756 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3757 if (radeon_asic_reset(rdev)) {
3758 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3759 RREG32(R_000E40_RBBM_STATUS),
3760 RREG32(R_0007C0_CP_STAT));
3763 radeon_combios_asic_init(rdev->ddev);
3764 /* Resume clock after posting */
3765 r100_clock_startup(rdev);
3766 /* Initialize surface registers */
3767 radeon_surface_init(rdev);
3768 return r100_startup(rdev);
3771 int r100_suspend(struct radeon_device *rdev)
3773 r100_cp_disable(rdev);
3774 r100_wb_disable(rdev);
3775 r100_irq_disable(rdev);
3776 if (rdev->flags & RADEON_IS_PCI)
3777 r100_pci_gart_disable(rdev);
3781 void r100_fini(struct radeon_device *rdev)
3786 radeon_gem_fini(rdev);
3787 if (rdev->flags & RADEON_IS_PCI)
3788 r100_pci_gart_fini(rdev);
3789 radeon_agp_fini(rdev);
3790 radeon_irq_kms_fini(rdev);
3791 radeon_fence_driver_fini(rdev);
3792 radeon_bo_fini(rdev);
3793 radeon_atombios_fini(rdev);
3798 int r100_init(struct radeon_device *rdev)
3802 /* Register debugfs file specific to this group of asics */
3805 r100_vga_render_disable(rdev);
3806 /* Initialize scratch registers */
3807 radeon_scratch_init(rdev);
3808 /* Initialize surface registers */
3809 radeon_surface_init(rdev);
3810 /* TODO: disable VGA need to use VGA request */
3812 if (!radeon_get_bios(rdev)) {
3813 if (ASIC_IS_AVIVO(rdev))
3816 if (rdev->is_atom_bios) {
3817 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3820 r = radeon_combios_init(rdev);
3824 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3825 if (radeon_asic_reset(rdev)) {
3827 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3828 RREG32(R_000E40_RBBM_STATUS),
3829 RREG32(R_0007C0_CP_STAT));
3831 /* check if cards are posted or not */
3832 if (radeon_boot_test_post_card(rdev) == false)
3834 /* Set asic errata */
3836 /* Initialize clocks */
3837 radeon_get_clock_info(rdev->ddev);
3838 /* initialize AGP */
3839 if (rdev->flags & RADEON_IS_AGP) {
3840 r = radeon_agp_init(rdev);
3842 radeon_agp_disable(rdev);
3845 /* initialize VRAM */
3848 r = radeon_fence_driver_init(rdev);
3851 r = radeon_irq_kms_init(rdev);
3854 /* Memory manager */
3855 r = radeon_bo_init(rdev);
3858 if (rdev->flags & RADEON_IS_PCI) {
3859 r = r100_pci_gart_init(rdev);
3863 r100_set_safe_registers(rdev);
3864 rdev->accel_working = true;
3865 r = r100_startup(rdev);
3867 /* Somethings want wront with the accel init stop accel */
3868 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3872 radeon_irq_kms_fini(rdev);
3873 if (rdev->flags & RADEON_IS_PCI)
3874 r100_pci_gart_fini(rdev);
3875 rdev->accel_working = false;