struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
}
void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
- struct drm_file *file,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
- ring->gpu_caches_dirty = true;
+ params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
- __i915_add_request(ring, file, obj);
+ __i915_add_request(params->ring, params->file, params->batch_obj);
}
static int
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(params->dev, params->file, ring,
- params->batch_obj);
+ i915_gem_execbuffer_retire_commands(params);
error:
kfree(cliprects);
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(params->dev, params->file, ring, params->batch_obj);
+ i915_gem_execbuffer_retire_commands(params);
return 0;
}