]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/radeon: remove radeon_fence_create
authorChristian König <deathsimple@vodafone.de>
Tue, 8 May 2012 12:24:01 +0000 (14:24 +0200)
committerChristian König <deathsimple@vodafone.de>
Thu, 21 Jun 2012 07:38:35 +0000 (09:38 +0200)
It is completely unnecessary to create fences
before they are emitted, so remove it and a bunch
of checks if fences are emitted or not.

Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
15 files changed:
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si.c

index 7fb3d2e0434c71d52725633573bc3f17a34a010d..8e40ba4a9b79db12c38096aa7d690ba4d8283dc7 100644 (file)
@@ -1371,7 +1371,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
  */
 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
 
        /* set to DX10/11 mode */
        radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
index b7bf18e40215c3afe45766457fa9fe802f217fae..2366be3df0743863ad4a76f75bd7169a36da7917 100644 (file)
@@ -850,7 +850,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
 
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
 
        /* set to DX10/11 mode */
        radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
index fb44e7e49083ac71702fd79bd402c451aa75e29d..415b7d8fbba2f5c0de6a67b7ede27bb4f2b25a30 100644 (file)
@@ -883,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
                   unsigned num_gpu_pages,
-                  struct radeon_fence *fence)
+                  struct radeon_fence **fence)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t cur_pages;
@@ -947,7 +947,7 @@ int r100_copy_blit(struct radeon_device *rdev,
                          RADEON_WAIT_HOST_IDLECLEAN |
                          RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
-               r = radeon_fence_emit(rdev, fence);
+               r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
        radeon_ring_unlock_commit(rdev, ring);
        return r;
index a26144d012074bc302d15607b0660c2609347334..f0889259eb0826296af5e744aeb971a7ebdf6c2f 100644 (file)
@@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
                  unsigned num_gpu_pages,
-                 struct radeon_fence *fence)
+                 struct radeon_fence **fence)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        uint32_t size;
@@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev,
        radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
        radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
        if (fence) {
-               r = radeon_fence_emit(rdev, fence);
+               r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        }
        radeon_ring_unlock_commit(rdev, ring);
        return r;
index bff62729381215996778ee493515bb9b6a6469f9..a80e61e138dba867eb85fca27f56471181bc078f 100644 (file)
@@ -2309,7 +2309,7 @@ int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
                   unsigned num_gpu_pages,
-                  struct radeon_fence *fence)
+                  struct radeon_fence **fence)
 {
        struct radeon_sa_bo *vb = NULL;
        int r;
@@ -2607,7 +2607,7 @@ void r600_fini(struct radeon_device *rdev)
  */
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
 
        /* FIXME: implement */
        radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
index 03b6e0d3d503e58bf3c49f87d004741552fd1df9..02f4eebf805f2bc69f0ee3a1703d7ac84f60d9d9 100644 (file)
@@ -703,20 +703,20 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
        return 0;
 }
 
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
                         struct radeon_sa_bo *vb)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        int r;
 
-       r = radeon_fence_emit(rdev, fence);
+       r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
        if (r) {
                radeon_ring_unlock_undo(rdev, ring);
                return;
        }
 
        radeon_ring_unlock_commit(rdev, ring);
-       radeon_sa_bo_free(rdev, &vb, fence);
+       radeon_sa_bo_free(rdev, &vb, *fence);
 }
 
 void r600_kms_blit_copy(struct radeon_device *rdev,
index fefcca55c1eb8bb76c7611cc0f38e5f04fe47765..e2feddd91df5afcacbcafa8759cec3e513cb0b67 100644 (file)
@@ -113,7 +113,6 @@ extern int radeon_lockup_timeout;
 
 /* fence seq are set to this number when signaled */
 #define RADEON_FENCE_SIGNALED_SEQ              0LL
-#define RADEON_FENCE_NOTEMITED_SEQ             (~0LL)
 
 /* internal ring indices */
 /* r1xx+ has gfx CP ring */
@@ -277,8 +276,7 @@ struct radeon_fence {
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
@@ -630,6 +628,7 @@ struct radeon_ib {
        uint32_t                        length_dw;
        uint64_t                        gpu_addr;
        uint32_t                        *ptr;
+       int                             ring;
        struct radeon_fence             *fence;
        unsigned                        vm_id;
        bool                            is_const_ib;
@@ -1192,20 +1191,20 @@ struct radeon_asic {
                            uint64_t src_offset,
                            uint64_t dst_offset,
                            unsigned num_gpu_pages,
-                           struct radeon_fence *fence);
+                           struct radeon_fence **fence);
                u32 blit_ring_index;
                int (*dma)(struct radeon_device *rdev,
                           uint64_t src_offset,
                           uint64_t dst_offset,
                           unsigned num_gpu_pages,
-                          struct radeon_fence *fence);
+                          struct radeon_fence **fence);
                u32 dma_ring_index;
                /* method used for bo copy */
                int (*copy)(struct radeon_device *rdev,
                            uint64_t src_offset,
                            uint64_t dst_offset,
                            unsigned num_gpu_pages,
-                           struct radeon_fence *fence);
+                           struct radeon_fence **fence);
                /* ring used for bo copies */
                u32 copy_ring_index;
        } copy;
index e76a941ef14eaf8ebabe44635b1cf84403d737d0..8cdf075aacfd0678b079dfa08711c02667168fd2 100644 (file)
@@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
                   unsigned num_gpu_pages,
-                  struct radeon_fence *fence);
+                  struct radeon_fence **fence);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
@@ -144,7 +144,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
                         uint64_t src_offset,
                         uint64_t dst_offset,
                         unsigned num_gpu_pages,
-                        struct radeon_fence *fence);
+                        struct radeon_fence **fence);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
 /*
@@ -318,7 +318,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
-                  unsigned num_gpu_pages, struct radeon_fence *fence);
+                  unsigned num_gpu_pages, struct radeon_fence **fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -364,7 +364,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
 /* r600 blit */
 int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
                           struct radeon_sa_bo **vb);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
                         struct radeon_sa_bo *vb);
 void r600_kms_blit_copy(struct radeon_device *rdev,
                        u64 src_gpu_addr, u64 dst_gpu_addr,
index 364f5b1a04b90ff933988303aa868c916eff306d..bedda9caadd9fe7d43bca2539bf51fd5df669f1f 100644 (file)
@@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
        for (i = 0; i < n; i++) {
                switch (flag) {
                case RADEON_BENCHMARK_COPY_DMA:
-                       r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
-                       if (r)
-                               return r;
                        r = radeon_copy_dma(rdev, saddr, daddr,
                                            size / RADEON_GPU_PAGE_SIZE,
-                                           fence);
+                                           &fence);
                        break;
                case RADEON_BENCHMARK_COPY_BLIT:
-                       r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
-                       if (r)
-                               return r;
                        r = radeon_copy_blit(rdev, saddr, daddr,
                                             size / RADEON_GPU_PAGE_SIZE,
-                                            fence);
+                                            &fence);
                        break;
                default:
                        DRM_ERROR("Unknown copy method\n");
index 11f5f402d22cb331deed24dcb3b671e92a54892e..401d346a05c07f824a53d747a3b055d2414f04cc 100644 (file)
@@ -61,15 +61,21 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
        return seq;
 }
 
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+int radeon_fence_emit(struct radeon_device *rdev,
+                     struct radeon_fence **fence,
+                     int ring)
 {
        /* we are protected by the ring emission mutex */
-       if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
-               return 0;
+       *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+       if ((*fence) == NULL) {
+               return -ENOMEM;
        }
-       fence->seq = ++rdev->fence_drv[fence->ring].seq;
-       radeon_fence_ring_emit(rdev, fence->ring, fence);
-       trace_radeon_fence_emit(rdev->ddev, fence->seq);
+       kref_init(&((*fence)->kref));
+       (*fence)->rdev = rdev;
+       (*fence)->seq = ++rdev->fence_drv[ring].seq;
+       (*fence)->ring = ring;
+       radeon_fence_ring_emit(rdev, ring, *fence);
+       trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
        return 0;
 }
 
@@ -138,25 +144,9 @@ static void radeon_fence_destroy(struct kref *kref)
        struct radeon_fence *fence;
 
        fence = container_of(kref, struct radeon_fence, kref);
-       fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
        kfree(fence);
 }
 
-int radeon_fence_create(struct radeon_device *rdev,
-                       struct radeon_fence **fence,
-                       int ring)
-{
-       *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
-       if ((*fence) == NULL) {
-               return -ENOMEM;
-       }
-       kref_init(&((*fence)->kref));
-       (*fence)->rdev = rdev;
-       (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
-       (*fence)->ring = ring;
-       return 0;
-}
-
 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
                                      u64 seq, unsigned ring)
 {
@@ -176,10 +166,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
        if (!fence) {
                return true;
        }
-       if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
-               WARN(1, "Querying an unemitted fence : %p !\n", fence);
-               return true;
-       }
        if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
                return true;
        }
@@ -444,9 +430,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
                        return 0;
                }
 
-               if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
-                       seq[i] = fences[i]->seq;
-               }
+               seq[i] = fences[i]->seq;
        }
 
        r = radeon_fence_wait_any_seq(rdev, seq, intr);
index 983658c91358939e0123a4545519d2d6c3b24f05..dd506c216d8f0ade1f55266591ddce617f9debf3 100644 (file)
@@ -49,13 +49,9 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
                dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
                return r;
        }
-       r = radeon_fence_create(rdev, &ib->fence, ring);
-       if (r) {
-               dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
-               radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
-               return r;
-       }
 
+       ib->ring = ring;
+       ib->fence = NULL;
        ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
        ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
        ib->vm_id = 0;
@@ -74,7 +70,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
 
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
        int r = 0;
 
        if (!ib->length_dw || !ring->ready) {
@@ -89,8 +85,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
                dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
                return r;
        }
-       radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
-       radeon_fence_emit(rdev, ib->fence);
+       radeon_ring_ib_execute(rdev, ib->ring, ib);
+       r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+       if (r) {
+               dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
        radeon_ring_unlock_commit(rdev, ring);
        return 0;
 }
index 32059b745728945dc931f94978cfc7992d2c30ad..81dbb5b946ef427f49ad6379a18a5348f4e5847a 100644 (file)
@@ -349,7 +349,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
 
        sa_manager = (*sa_bo)->manager;
        spin_lock(&sa_manager->lock);
-       if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+       if (fence && !radeon_fence_signaled(fence)) {
                (*sa_bo)->fence = radeon_fence_ref(fence);
                list_add_tail(&(*sa_bo)->flist,
                              &sa_manager->flist[fence->ring]);
index efff929ea49dfb7f9eb29b5715e5c146b174c715..47e1535f2706e6fd5d8f086ffbe1390580bb4b03 100644 (file)
@@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(gtt_obj[i]);
 
-               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
-               if (r) {
-                       DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
-                       goto out_cleanup;
-               }
-
-               r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+               r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        goto out_cleanup;
@@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(vram_obj);
 
-               r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
-               if (r) {
-                       DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
-                       goto out_cleanup;
-               }
-
-               r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+               r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        goto out_cleanup;
@@ -245,17 +233,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
        int ridxB = radeon_ring_index(rdev, ringB);
        int r;
 
-       r = radeon_fence_create(rdev, &fence1, ridxA);
-       if (r) {
-               DRM_ERROR("Failed to create sync fence 1\n");
-               goto out_cleanup;
-       }
-       r = radeon_fence_create(rdev, &fence2, ridxA);
-       if (r) {
-               DRM_ERROR("Failed to create sync fence 2\n");
-               goto out_cleanup;
-       }
-
        r = radeon_semaphore_create(rdev, &semaphore);
        if (r) {
                DRM_ERROR("Failed to create semaphore\n");
@@ -268,9 +245,19 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-       radeon_fence_emit(rdev, fence1);
+       r = radeon_fence_emit(rdev, &fence1, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to emit fence 1\n");
+               radeon_ring_unlock_undo(rdev, ringA);
+               goto out_cleanup;
+       }
        radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-       radeon_fence_emit(rdev, fence2);
+       r = radeon_fence_emit(rdev, &fence2, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to emit fence 2\n");
+               radeon_ring_unlock_undo(rdev, ringA);
+               goto out_cleanup;
+       }
        radeon_ring_unlock_commit(rdev, ringA);
 
        mdelay(1000);
@@ -342,17 +329,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
        bool sigA, sigB;
        int i, r;
 
-       r = radeon_fence_create(rdev, &fenceA, ridxA);
-       if (r) {
-               DRM_ERROR("Failed to create sync fence 1\n");
-               goto out_cleanup;
-       }
-       r = radeon_fence_create(rdev, &fenceB, ridxB);
-       if (r) {
-               DRM_ERROR("Failed to create sync fence 2\n");
-               goto out_cleanup;
-       }
-
        r = radeon_semaphore_create(rdev, &semaphore);
        if (r) {
                DRM_ERROR("Failed to create semaphore\n");
@@ -365,7 +341,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
-       radeon_fence_emit(rdev, fenceA);
+       r = radeon_fence_emit(rdev, &fenceA, ridxA);
+       if (r) {
+               DRM_ERROR("Failed to emit sync fence 1\n");
+               radeon_ring_unlock_undo(rdev, ringA);
+               goto out_cleanup;
+       }
        radeon_ring_unlock_commit(rdev, ringA);
 
        r = radeon_ring_lock(rdev, ringB, 64);
@@ -374,7 +355,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
                goto out_cleanup;
        }
        radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
-       radeon_fence_emit(rdev, fenceB);
+       r = radeon_fence_emit(rdev, &fenceB, ridxB);
+       if (r) {
+               DRM_ERROR("Failed to create sync fence 2\n");
+               radeon_ring_unlock_undo(rdev, ringB);
+               goto out_cleanup;
+       }
        radeon_ring_unlock_commit(rdev, ringB);
 
        mdelay(1000);
index c94a2257761f1cafc6cfeacbb2633aaa1c082a42..2d36bdda9327aa191f319161310cbdd75e74b3f5 100644 (file)
@@ -222,15 +222,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
 {
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
-       struct radeon_fence *fence, *old_fence;
+       struct radeon_fence *fence;
        struct radeon_semaphore *sem = NULL;
-       int r;
+       int r, ridx;
 
        rdev = radeon_get_rdev(bo->bdev);
-       r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
-       if (unlikely(r)) {
-               return r;
-       }
+       ridx = radeon_copy_ring_index(rdev);
        old_start = old_mem->start << PAGE_SHIFT;
        new_start = new_mem->start << PAGE_SHIFT;
 
@@ -243,7 +240,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
-               radeon_fence_unref(&fence);
                return -EINVAL;
        }
        switch (new_mem->mem_type) {
@@ -255,42 +251,38 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
-               radeon_fence_unref(&fence);
                return -EINVAL;
        }
-       if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
+       if (!rdev->ring[ridx].ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
-               radeon_fence_unref(&fence);
                return -EINVAL;
        }
 
        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
        /* sync other rings */
-       old_fence = bo->sync_obj;
-       if (old_fence && old_fence->ring != fence->ring
-           && !radeon_fence_signaled(old_fence)) {
+       fence = bo->sync_obj;
+       if (fence && fence->ring != ridx
+           && !radeon_fence_signaled(fence)) {
                bool sync_to_ring[RADEON_NUM_RINGS] = { };
-               sync_to_ring[old_fence->ring] = true;
+               sync_to_ring[fence->ring] = true;
 
                r = radeon_semaphore_create(rdev, &sem);
                if (r) {
-                       radeon_fence_unref(&fence);
                        return r;
                }
 
-               r = radeon_semaphore_sync_rings(rdev, sem,
-                                               sync_to_ring, fence->ring);
+               r = radeon_semaphore_sync_rings(rdev, sem, sync_to_ring, ridx);
                if (r) {
                        radeon_semaphore_free(rdev, sem, NULL);
-                       radeon_fence_unref(&fence);
                        return r;
                }
        }
 
+       fence = NULL;
        r = radeon_copy(rdev, old_start, new_start,
                        new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
-                       fence);
+                       &fence);
        /* FIXME: handle copy error */
        r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
                                      evict, no_wait_reserve, no_wait_gpu, new_mem);
index c7b61f16ecfd79855315b5ad938499510a2d8b41..8868a1fa20e5f6579d2868d8f6f2c51856f78cd6 100644 (file)
@@ -1762,7 +1762,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
  */
 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
-       struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
        u32 header;
 
        if (ib->is_const_ib)
@@ -2702,7 +2702,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                        if (ib->is_const_ib)
                                ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
                        else {
-                               switch (ib->fence->ring) {
+                               switch (ib->ring) {
                                case RADEON_RING_TYPE_GFX_INDEX:
                                        ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
                                        break;
@@ -2711,7 +2711,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                                        ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
                                        break;
                                default:
-                                       dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
+                                       dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
                                        ret = -EINVAL;
                                        break;
                                }