V3: directly use pd_addr.
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
if (!ring)
continue;
kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched);
}
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(adev);
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
+ amd_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
- amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
+ kfree(ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush && job->vm_needs_flush) {
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);