struct intel_engine_cs *ring;
struct drm_i915_gem_object *batch_obj;
struct intel_context *ctx;
+ struct drm_i915_gem_request *request;
};
struct drm_i915_private {
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
struct i915_execbuffer_params *params = ¶ms_master;
- struct drm_i915_gem_request *request;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
int ret;
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- ret = i915_gem_request_alloc(ring, ctx, &request);
+ ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
if (ret)
goto err_batch_unpin;
i915_gem_context_unreference(ctx);
eb_destroy(eb);
+ /*
+ * If the request was created but not successfully submitted then it
+ * must be freed again. If it was submitted then it is being tracked
+ * on the active request list and no clean up is required here.
+ */
+ if (ret && params->request) {
+ i915_gem_request_cancel(params->request);
+ ring->outstanding_lazy_request = NULL;
+ }
+
mutex_unlock(&dev->struct_mutex);
pre_mutex_err: