]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: deal with foreign fences in amdgpu_sync
authorChristian König <christian.koenig@amd.com>
Mon, 6 Jul 2015 20:06:40 +0000 (22:06 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:13 +0000 (16:50 -0400)
This also requires some error handling from the callers of that function.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 069cc28941babb1fe5eedcb7445718a6b94abc87..70e783a849ed627111e7b318c1ff57a0dfb11fec 100644 (file)
@@ -699,8 +699,8 @@ struct amdgpu_sync {
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-                      struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                     struct fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
index 1f040d85ac47fe336f609a0cf9d39ac59b94f665..53e6a10fe9f920ccdf863d17904024fdfbdab8bd 100644 (file)
@@ -482,6 +482,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 
        if (p->bo_list) {
                for (i = 0; i < p->bo_list->num_entries; i++) {
+                       struct fence *f;
+
                        /* ignore duplicates */
                        bo = p->bo_list->array[i].robj;
                        if (!bo)
@@ -495,7 +497,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (r)
                                return r;
 
-                       amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+                       f = &bo_va->last_pt_update->base;
+                       r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+                       if (r)
+                               return r;
                }
        }
 
@@ -715,9 +720,12 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                                return r;
                        }
 
-                       amdgpu_sync_fence(&ib->sync, fence);
+                       r = amdgpu_sync_fence(adev, &ib->sync, &fence->base);
                        amdgpu_fence_unref(&fence);
                        amdgpu_ctx_put(ctx);
+
+                       if (r)
+                               return r;
                }
        }
 
index bc0fac618a3f01121edb740207b9866b6818f71f..2722815eddbbb72dfd88c6ed453758c681f5bc4b 100644 (file)
@@ -167,7 +167,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
                /* grab a vm id if necessary */
                struct amdgpu_fence *vm_id_fence = NULL;
                vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
-               amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+               r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
+               if (r) {
+                       amdgpu_ring_unlock_undo(ring);
+                       return r;
+               }
        }
 
        r = amdgpu_sync_rings(&ibs->sync, ring);
index 21accbdd0a1afc0bc494dcafd4db39d44fdbe0a5..9c292cf770f43bc39b0d88dee930e55e845238c4 100644 (file)
@@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
 }
 
 /**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
  * @fence: fence to sync to
  *
- * Sync to the fence using the semaphore objects
  */
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-                      struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                     struct fence *f)
 {
+       struct amdgpu_fence *fence;
        struct amdgpu_fence *other;
 
-       if (!fence)
-               return;
+       if (!f)
+               return 0;
+
+       fence = to_amdgpu_fence(f);
+       if (!fence || fence->ring->adev != adev)
+               return fence_wait(f, true);
 
        other = sync->sync_to[fence->ring->idx];
        sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
@@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync,
                        amdgpu_fence_later(fence, other));
                amdgpu_fence_unref(&other);
        }
+
+       return 0;
 }
 
 /**
@@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 
        /* always sync to the exclusive fence */
        f = reservation_object_get_excl(resv);
-       fence = f ? to_amdgpu_fence(f) : NULL;
-       if (fence && fence->ring->adev == adev)
-               amdgpu_sync_fence(sync, fence);
-       else if (f)
-               r = fence_wait(f, true);
+       r = amdgpu_sync_fence(adev, sync, f);
 
        flist = reservation_object_get_list(resv);
        if (!flist || r)
@@ -120,15 +122,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                f = rcu_dereference_protected(flist->shared[i],
                                              reservation_object_held(resv));
                fence = f ? to_amdgpu_fence(f) : NULL;
-               if (fence && fence->ring->adev == adev) {
-                       if (fence->owner != owner ||
-                           fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
-                               amdgpu_sync_fence(sync, fence);
-               } else if (f) {
-                       r = fence_wait(f, true);
-                       if (r)
-                               break;
-               }
+               if (fence && fence->ring->adev == adev &&
+                   fence->owner == owner &&
+                   fence->owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+                               continue;
+
+               r = amdgpu_sync_fence(adev, sync, f);
+               if (r)
+                       break;
        }
        return r;
 }
index 9a4e3b63f1cb4bf7ca9c73e813a0568f320c6574..0c8c9904d880ff5b259b09ba59e511f279091fb0 100644 (file)
@@ -732,7 +732,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_fence *f = vm->ids[i].last_id_use;
-                       amdgpu_sync_fence(&ib.sync, f);
+                       r = amdgpu_sync_fence(adev, &ib.sync, &f->base);
+                       if (r)
+                               return r;
                }
        }
 
@@ -861,7 +863,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
                             struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
        struct amdgpu_bo_va *bo_va = NULL;
-       int r;
+       int r = 0;
 
        spin_lock(&vm->status_lock);
        while (!list_empty(&vm->invalidated)) {
@@ -878,8 +880,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
        spin_unlock(&vm->status_lock);
 
        if (bo_va)
-               amdgpu_sync_fence(sync, bo_va->last_pt_update);
-       return 0;
+               r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+
+       return r;
 }
 
 /**