From: Zhenyu Wang Date: Thu, 20 Oct 2016 05:30:33 +0000 (+0800) Subject: drm/i915/gvt: properly access enabled intel_engine_cs X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=0fac21e7e978f8556d3f9bb1b2fadfc722bfe992;p=linux-beck.git drm/i915/gvt: properly access enabled intel_engine_cs Switch to use new for_each_engine() helper to properly access enabled intel_engine_cs as i915 core has changed that to be dynamic managed. At GVT-g init time would still depend on ring mask to determine engine list as it's earlier. Signed-off-by: Zhenyu Wang --- diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index d4bd29306d84..0e9b340897e3 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -817,10 +817,11 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { - int i; + enum intel_engine_id i; + struct intel_engine_cs *engine; /* each ring has a virtual execlist engine */ - for (i = 0; i < I915_NUM_ENGINES; i++) { + for_each_engine(engine, vgpu->gvt->dev_priv, i) { init_vgpu_execlist(vgpu, i); INIT_LIST_HEAD(&vgpu->workload_q_head[i]); } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b21115fecf86..3e74fb3d4aa9 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -132,12 +132,13 @@ static int new_mmio_info(struct intel_gvt *gvt, static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) { - int i; + enum intel_engine_id id; + struct intel_engine_cs *engine; reg &= ~GENMASK(11, 0); - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (gvt->dev_priv->engine[i]->mmio_base == reg) - return i; + for_each_engine(engine, gvt->dev_priv, id) { + if (engine->mmio_base == reg) + return id; } return -1; } @@ -1306,7 +1307,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, u32 data = *(u32 *)p_data; int ret; - if (WARN_ON(ring_id < 0)) + if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1)) return -EINVAL; execlist = &vgpu->execlist[ring_id]; diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 278db0c180fc..b605ac6137eb 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -37,9 +37,10 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) { struct intel_vgpu_execlist *execlist; - int i; + enum intel_engine_id i; + struct intel_engine_cs *engine; - for (i = 0; i < I915_NUM_ENGINES; i++) { + for_each_engine(engine, vgpu->gvt->dev_priv, i) { execlist = &vgpu->execlist[i]; if (!list_empty(workload_q_head(vgpu, i))) return true; @@ -51,7 +52,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - int i; + enum intel_engine_id i; + struct intel_engine_cs *engine; /* no target to schedule */ if (!scheduler->next_vgpu) @@ -67,7 +69,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) scheduler->need_reschedule = true; /* still have uncompleted workload? */ - for (i = 0; i < I915_NUM_ENGINES; i++) { + for_each_engine(engine, gvt->dev_priv, i) { if (scheduler->current_workload[i]) { gvt_dbg_sched("still have running workload\n"); return; @@ -84,7 +86,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) scheduler->need_reschedule = false; /* wake up workload dispatch thread */ - for (i = 0; i < I915_NUM_ENGINES; i++) + for_each_engine(engine, gvt->dev_priv, i) wake_up(&scheduler->waitq[i]); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 12f825512e9b..a6ba60141ff4 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -510,6 +510,10 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) init_waitqueue_head(&scheduler->workload_complete_wq); for (i = 0; i < I915_NUM_ENGINES; i++) { + /* check ring mask at init time */ + if (!HAS_ENGINE(gvt->dev_priv, i)) + continue; + init_waitqueue_head(&scheduler->waitq[i]); param = kzalloc(sizeof(*param), GFP_KERNEL);