};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
- struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) {
+ struct fence *f;
+
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
if (!bo)
if (r)
return r;
- amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+ f = &bo_va->last_pt_update->base;
+ r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+ if (r)
+ return r;
}
}
return r;
}
- amdgpu_sync_fence(&ib->sync, fence);
+ r = amdgpu_sync_fence(adev, &ib->sync, &fence->base);
amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
+
+ if (r)
+ return r;
}
}
/* grab a vm id if necessary */
struct amdgpu_fence *vm_id_fence = NULL;
vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
- amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+ r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
+ if (r) {
+ amdgpu_ring_unlock_undo(ring);
+ return r;
+ }
}
r = amdgpu_sync_rings(&ibs->sync, ring);
}
/**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
* @fence: fence to sync to
*
- * Sync to the fence using the semaphore objects
*/
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
- struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct fence *f)
{
+ struct amdgpu_fence *fence;
struct amdgpu_fence *other;
- if (!fence)
- return;
+ if (!f)
+ return 0;
+
+ fence = to_amdgpu_fence(f);
+ if (!fence || fence->ring->adev != adev)
+ return fence_wait(f, true);
other = sync->sync_to[fence->ring->idx];
sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
amdgpu_fence_later(fence, other));
amdgpu_fence_unref(&other);
}
+
+ return 0;
}
/**
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- fence = f ? to_amdgpu_fence(f) : NULL;
- if (fence && fence->ring->adev == adev)
- amdgpu_sync_fence(sync, fence);
- else if (f)
- r = fence_wait(f, true);
+ r = amdgpu_sync_fence(adev, sync, f);
flist = reservation_object_get_list(resv);
if (!flist || r)
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
fence = f ? to_amdgpu_fence(f) : NULL;
- if (fence && fence->ring->adev == adev) {
- if (fence->owner != owner ||
- fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
- amdgpu_sync_fence(sync, fence);
- } else if (f) {
- r = fence_wait(f, true);
- if (r)
- break;
- }
+ if (fence && fence->ring->adev == adev &&
+ fence->owner == owner &&
+ fence->owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ continue;
+
+ r = amdgpu_sync_fence(adev, sync, f);
+ if (r)
+ break;
}
return r;
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_fence *f = vm->ids[i].last_id_use;
- amdgpu_sync_fence(&ib.sync, f);
+ r = amdgpu_sync_fence(adev, &ib.sync, &f->base);
+ if (r)
+ return r;
}
}
struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
- int r;
+ int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) {
spin_unlock(&vm->status_lock);
if (bo_va)
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
- return 0;
+ r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+
+ return r;
}
/**