From: Changbin Du Date: Thu, 5 Jan 2017 08:49:03 +0000 (+0800) Subject: drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done X-Git-Tag: v4.10-rc6~31^2~6^2~5^2~5 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=440a9b9fae37dfd7e4c7d76db34fada57f9afd92;p=karo-tx-linux.git drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done The vgpu->running_workload_num is used to determine whether a vgpu has any workload running or not. So we should make sure the workload is really done before we dec running_workload_num. Function complete_current_workload is not the right place to do it, since this function is still processing the workload. This patch move the dec op afterward. v2: move dec op before wake_up(&scheduler->workload_complete_wq) (Min He) Signed-off-by: Changbin Du Reviewed-by: Min He Signed-off-by: Zhenyu Wang --- diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c694dd039f3b..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); }