]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/amdgpu: fix amdgpu_fill_buffer (v2)
authorChristian König <christian.koenig@amd.com>
Thu, 17 Nov 2016 11:06:38 +0000 (12:06 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 23 Nov 2016 20:08:44 +0000 (15:08 -0500)
The mem start is relative to the domain in the address space, so this
worked only when VRAM was mapped at offset 0.

It also didn't handled multiple drm_mm_nodes for split VRAM.

v2: rebase on dma_fence renaming

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 1821c05484d0ad5a244dae76c8d6c5f192046fd0..8f18b8ed2b3a22991e6ab1adee3f30d545a78fd9 100644 (file)
@@ -1382,28 +1382,40 @@ error_free:
 }
 
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-               uint32_t src_data,
-               struct reservation_object *resv,
-               struct dma_fence **fence)
+                      uint32_t src_data,
+                      struct reservation_object *resv,
+                      struct dma_fence **fence)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct amdgpu_job *job;
+       uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 
-       uint32_t max_bytes, byte_count;
-       uint64_t dst_offset;
+       struct drm_mm_node *mm_node;
+       unsigned long num_pages;
        unsigned int num_loops, num_dw;
-       unsigned int i;
+
+       struct amdgpu_job *job;
        int r;
 
-       byte_count = bo->tbo.num_pages << PAGE_SHIFT;
-       max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
-       num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+       if (!ring->ready) {
+               DRM_ERROR("Trying to clear memory with ring turned off.\n");
+               return -EINVAL;
+       }
+
+       num_pages = bo->tbo.num_pages;
+       mm_node = bo->tbo.mem.mm_node;
+       num_loops = 0;
+       while (num_pages) {
+               uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+
+               num_loops += DIV_ROUND_UP(byte_count, max_bytes);
+               num_pages -= mm_node->size;
+               ++mm_node;
+       }
        num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
 
        /* for IB padding */
-       while (num_dw & 0x7)
-               num_dw++;
+       num_dw += 64;
 
        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
        if (r)
@@ -1411,28 +1423,43 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                               AMDGPU_FENCE_OWNER_UNDEFINED);
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
                }
        }
 
-       dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
-       for (i = 0; i < num_loops; i++) {
-               uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+       num_pages = bo->tbo.num_pages;
+       mm_node = bo->tbo.mem.mm_node;
 
-               amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
-                               dst_offset, cur_size_in_bytes);
+       while (num_pages) {
+               uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+               uint64_t dst_addr;
 
-               dst_offset += cur_size_in_bytes;
-               byte_count -= cur_size_in_bytes;
+               r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
+                                       &bo->tbo.mem, &dst_addr);
+               if (r)
+                       return r;
+
+               while (byte_count) {
+                       uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+                       amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+                                               dst_addr, cur_size_in_bytes);
+
+                       dst_addr += cur_size_in_bytes;
+                       byte_count -= cur_size_in_bytes;
+               }
+
+               num_pages -= mm_node->size;
+               ++mm_node;
        }
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
        r = amdgpu_job_submit(job, ring, &adev->mman.entity,
-                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+                             AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;