struct radeon_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
+ struct radeon_fence *fence;
};
/*
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
+ RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align);
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo **sa_bo);
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m);
if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
if (radeon_fence_signaled(ib->fence)) {
radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
done = true;
}
}
if (rdev->ib_pool.ibs[idx].fence == NULL) {
r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
&rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
+ size, 256, false);
if (!r) {
*ib = &rdev->ib_pool.ibs[idx];
(*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
}
radeon_mutex_lock(&rdev->ib_pool.mutex);
if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL);
radeon_fence_unref(&tmp->fence);
}
radeon_mutex_unlock(&rdev->ib_pool.mutex);
radeon_mutex_lock(&rdev->ib_pool.mutex);
if (rdev->ib_pool.ready) {
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+ radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL);
radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
*
* Alignment can't be bigger than page size
*/
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ list_del(&sa_bo->list);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align)
+ unsigned size, unsigned align, bool block)
{
- struct radeon_sa_bo *tmp;
+ struct radeon_fence *fence = NULL;
+ struct radeon_sa_bo *tmp, *next;
struct list_head *head;
unsigned offset = 0, wasted = 0;
+ int r;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+retry:
+
spin_lock(&sa_manager->lock);
/* no one ? */
/* look for a hole big enough */
offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+ list_for_each_entry_safe(tmp, next, &sa_manager->sa_bo, list) {
+ /* try to free this object */
+ if (tmp->fence) {
+ if (radeon_fence_signaled(tmp->fence)) {
+ radeon_sa_bo_remove_locked(tmp);
+ continue;
+ } else {
+ fence = tmp->fence;
+ }
+ }
+
/* room before this object ? */
if (offset < tmp->soffset && (tmp->soffset - offset) >= size) {
head = tmp->list.prev;
if ((sa_manager->size - offset) < size) {
/* failed to find somethings big enough */
spin_unlock(&sa_manager->lock);
+ if (block && fence) {
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ return r;
+
+ goto retry;
+ }
kfree(*sa_bo);
*sa_bo = NULL;
return -ENOMEM;
return 0;
}
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo)
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
{
+ struct radeon_sa_manager *sa_manager;
+
if (!sa_bo || !*sa_bo)
return;
- spin_lock(&(*sa_bo)->manager->lock);
- list_del_init(&(*sa_bo)->list);
- spin_unlock(&(*sa_bo)->manager->lock);
- kfree(*sa_bo);
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->lock);
+ if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ spin_unlock(&sa_manager->lock);
*sa_bo = NULL;
}
spin_lock(&sa_manager->lock);
list_for_each_entry(i, &sa_manager->sa_bo, list) {
- seq_printf(m, "[%08x %08x] size %4d [%p]\n",
+ seq_printf(m, "[%08x %08x] size %4d (%p)",
i->soffset, i->eoffset, i->eoffset - i->soffset, i);
+ if (i->fence) {
+ seq_printf(m, " protected by %Ld (%p) on ring %d\n",
+ i->fence->seq, i->fence, i->fence->ring);
+ } else {
+ seq_printf(m, "\n");
+ }
}
spin_unlock(&sa_manager->lock);
}
static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
struct radeon_semaphore_bo *bo)
{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+ radeon_sa_bo_free(rdev, &bo->ib->sa_bo, NULL);
radeon_fence_unref(&bo->ib->fence);
list_del(&bo->list);
kfree(bo);