static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
+ struct drm_i915_private *dev_priv = rq0->i915;
+
+ /* BUG_ON(!irqs_disabled()); */
+
execlists_update_context(rq0);
if (rq1)
execlists_update_context(rq1);
+ spin_lock(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+
execlists_elsp_write(rq0, rq1);
+
+ intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ spin_unlock(&dev_priv->uncore.lock);
}
-static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
struct drm_i915_gem_request *cursor, *tmp;
execlists_submit_requests(req0, req1);
}
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
-
- spin_lock(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-
- execlists_context_unqueue__locked(engine);
-
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
- spin_unlock(&dev_priv->uncore.lock);
-}
-
static unsigned int
execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 status_pointer;
unsigned int read_pointer, write_pointer;
- u32 status = 0;
- u32 status_id;
+ u32 csb[GEN8_CSB_ENTRIES][2];
+ unsigned int csb_read = 0, i;
unsigned int submit_contexts = 0;
- spin_lock(&engine->execlist_lock);
-
spin_lock(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
write_pointer += GEN8_CSB_ENTRIES;
while (read_pointer < write_pointer) {
- status = get_context_status(engine, ++read_pointer,
- &status_id);
+ if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
+ break;
+ csb[csb_read][0] = get_context_status(engine, ++read_pointer,
+ &csb[csb_read][1]);
+ csb_read++;
+ }
- if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) {
- if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
- if (execlists_check_remove_request(engine, status_id))
+ engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+
+ /* Update the read pointer to the old write pointer. Manual ringbuffer
+ * management ftw </sarcasm> */
+ I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
+ _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ engine->next_context_status_buffer << 8));
+
+ intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ spin_unlock(&dev_priv->uncore.lock);
+
+ spin_lock(&engine->execlist_lock);
+
+ for (i = 0; i < csb_read; i++) {
+ if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
+ if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
+ if (execlists_check_remove_request(engine, csb[i][1]))
WARN(1, "Lite Restored request removed from queue\n");
} else
WARN(1, "Preemption without Lite Restore\n");
}
- if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE |
+ if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
GEN8_CTX_STATUS_ELEMENT_SWITCH))
submit_contexts +=
- execlists_check_remove_request(engine,
- status_id);
+ execlists_check_remove_request(engine, csb[i][1]);
}
if (submit_contexts) {
if (!engine->disable_lite_restore_wa ||
- (status & GEN8_CTX_STATUS_ACTIVE_IDLE))
- execlists_context_unqueue__locked(engine);
+ (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
+ execlists_context_unqueue(engine);
}
- engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
- /* Update the read pointer to the old write pointer. Manual ringbuffer
- * management ftw </sarcasm> */
- I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
- _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- engine->next_context_status_buffer << 8));
-
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
- spin_unlock(&dev_priv->uncore.lock);
-
spin_unlock(&engine->execlist_lock);
if (unlikely(submit_contexts > 2))