struct amdgpu_fence *fence;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
- bool is_const_ib;
bool flush_hdp_writefifo;
struct amdgpu_sync sync;
- bool gds_needed;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
+ uint32_t flags;
};
enum amdgpu_ring_type {
}
ib->length_dw = chunk_ib->ib_bytes / 4;
- if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
- ib->is_const_ib = true;
- if (chunk_ib->flags & AMDGPU_IB_FLAG_GDS)
- ib->gds_needed = true;
+ ib->flags = chunk_ib->flags;
+
if ((ib->ring->current_filp != parser->filp) ||
(ib->ring->current_ctx != parser->ctx_id)) {
ib->ring->need_ctx_switch = true;
ib->fence = NULL;
ib->user = NULL;
ib->vm = vm;
- ib->is_const_ib = false;
ib->gds_base = 0;
ib->gds_size = 0;
ib->gws_base = 0;
ib->gws_size = 0;
ib->oa_base = 0;
ib->oa_size = 0;
+ ib->flags = 0;
return 0;
}
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
}
- if (ring->funcs->emit_gds_switch && ib->vm && ib->gds_needed)
+ if (ring->funcs->emit_gds_switch && ib->vm && (ib->flags & AMDGPU_IB_FLAG_GDS))
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ring->need_ctx_switch = false;
}
- if (ib->is_const_ib)
+ if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
ring->need_ctx_switch = false;
}
- if (ib->is_const_ib)
+ if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);