list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
+ i915_sw_fence_commit(&request->execute);
+
spin_unlock_irqrestore(&timeline->lock, flags);
return NOTIFY_DONE;
}
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
/**
* i915_gem_request_alloc - allocate a request structure
*
__timeline_get_seqno(req->timeline->common));
i915_sw_fence_init(&req->submit, submit_notify);
+ i915_sw_fence_init(&req->execute, execute_notify);
+ /* Ensure that the execute fence completes after the submit fence -
+ * as we complete the execute fence from within the submit fence
+ * callback, its completion would otherwise be visible first.
+ */
+ i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
}
static long
-__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
- unsigned int flags,
- long timeout)
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+ unsigned int flags,
+ long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
add_wait_queue(q, &reset);
do {
- prepare_to_wait(&request->submit.wait, &wait, state);
+ prepare_to_wait(&request->execute.wait, &wait, state);
- if (i915_sw_fence_done(&request->submit))
+ if (i915_sw_fence_done(&request->execute))
break;
if (flags & I915_WAIT_LOCKED &&
timeout = io_schedule_timeout(timeout);
} while (timeout);
- finish_wait(&request->submit.wait, &wait);
+ finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(q, &reset);
trace_i915_gem_request_wait_begin(req);
- if (!i915_sw_fence_done(&req->submit)) {
- timeout = __i915_request_wait_for_submit(req, flags, timeout);
+ if (!i915_sw_fence_done(&req->execute)) {
+ timeout = __i915_request_wait_for_execute(req, flags, timeout);
if (timeout < 0)
goto complete;
- GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
}
+ GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */
struct intel_timeline *timeline;
struct intel_signal_node signaling;
+ /* Fences for the various phases in the request's lifetime.
+ *
+ * The submit fence is used to await upon all of the request's
+ * dependencies. When it is signaled, the request is ready to run.
+ * It is used by the driver to then queue the request for execution.
+ *
+ * The execute fence is used to signal when the request has been
+ * sent to hardware.
+ *
+ * It is illegal for the submit fence of one request to wait upon the
+ * execute fence of an earlier request. It should be sufficient to
+ * wait upon the submit fence of the earlier request.
+ */
struct i915_sw_fence submit;
+ struct i915_sw_fence execute;
wait_queue_t submitq;
+ wait_queue_t execq;
u32 global_seqno;