From 43fb778754ffd1ef8b4f579a5c94f118292acfb5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 4 Jan 2013 09:24:18 -0500 Subject: [PATCH] drm/radeon: split r6xx and r7xx copy_dma functions - r6xx actually uses a slightly different packet format, although both formats seem to work ok. - r7xx doesn't have the count multiple of 2 limitation. Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/r600.c | 10 ++-- drivers/gpu/drm/radeon/radeon_asic.c | 4 +- drivers/gpu/drm/radeon/radeon_asic.h | 4 ++ drivers/gpu/drm/radeon/rv770.c | 74 ++++++++++++++++++++++++++++ 4 files changed, 85 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 923f93647042..537e259b3837 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2646,7 +2646,7 @@ int r600_copy_blit(struct radeon_device *rdev, * @num_gpu_pages: number of GPU pages to xfer * @fence: radeon fence object * - * Copy GPU paging using the DMA engine (r6xx-r7xx). + * Copy GPU paging using the DMA engine (r6xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ @@ -2669,8 +2669,8 @@ int r600_copy_dma(struct radeon_device *rdev, } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); + r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); @@ -2693,8 +2693,8 @@ int r600_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); radeon_ring_write(ring, dst_offset & 0xfffffffc); radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | + (upper_32_bits(src_offset) & 0xff))); src_offset += cur_size_in_dw * 4; dst_offset += cur_size_in_dw * 4; } diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 596bcbe80ed0..9056fafb00ea 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = &r600_copy_dma, + .dma = &rv770_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &r600_copy_dma, + .copy = &rv770_copy_dma, .copy_ring_index = R600_RING_TYPE_DMA_INDEX, }, .surface = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5f4882cc2152..15d70e613076 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_cp_stop(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev); +int rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence); /* * evergreen diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 87c979c4f721..1b2444f4d8f4 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev) return 0; } +/** + * rv770_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r7xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFF) + cur_size_in_dw = 0xFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + static int rv770_startup(struct radeon_device *rdev) { struct radeon_ring *ring; -- 2.39.5