int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
- struct fence **array,
+signed long amdgpu_fence_wait_any(struct fence **array,
uint32_t count,
bool intr,
signed long t);
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
signed long t)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- struct amdgpu_device *adev = fence->ring->adev;
-
- return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
+ return amdgpu_fence_wait_any(&f, 1, intr, t);
}
/**
* Wait the fence array with timeout
*
- * @adev: amdgpu device
* @array: the fence array with amdgpu fence pointer
* @count: the number of the fence array
* @intr: when sleep, set the current task interruptable or not
*
* It will return when any fence is signaled or timeout.
*/
-signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
- struct fence **array, uint32_t count,
+signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count,
bool intr, signed long t)
{
struct amdgpu_wait_cb *cb;
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock);
- t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+ t = amdgpu_fence_wait_any(fences, AMDGPU_MAX_RINGS,
false, MAX_SCHEDULE_TIMEOUT);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);