]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem_request.c
drm/i915: Move the global sync optimisation to the timeline
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_request.c
index 017cadf54d80c9518efdba6be248df5bcdaf570b..9c34a4c540b503d884564f8cf4f9f9ad34850141 100644 (file)
  */
 
 #include <linux/prefetch.h>
+#include <linux/dma-fence-array.h>
 
 #include "i915_drv.h"
 
-static const char *i915_fence_get_driver_name(struct fence *fence)
+static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
        return "i915";
 }
 
-static const char *i915_fence_get_timeline_name(struct fence *fence)
+static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
        /* Timelines are bound by eviction to a VM. However, since
         * we only have a global seqno at the moment, we only have
@@ -39,15 +40,15 @@ static const char *i915_fence_get_timeline_name(struct fence *fence)
         * multiple execution contexts (fence contexts) as we allow
         * engines within a single timeline to execute in parallel.
         */
-       return "global";
+       return to_request(fence)->timeline->common->name;
 }
 
-static bool i915_fence_signaled(struct fence *fence)
+static bool i915_fence_signaled(struct dma_fence *fence)
 {
        return i915_gem_request_completed(to_request(fence));
 }
 
-static bool i915_fence_enable_signaling(struct fence *fence)
+static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
        if (i915_fence_signaled(fence))
                return false;
@@ -56,55 +57,33 @@ static bool i915_fence_enable_signaling(struct fence *fence)
        return true;
 }
 
-static signed long i915_fence_wait(struct fence *fence,
+static signed long i915_fence_wait(struct dma_fence *fence,
                                   bool interruptible,
-                                  signed long timeout_jiffies)
+                                  signed long timeout)
 {
-       s64 timeout_ns, *timeout;
-       int ret;
-
-       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
-               timeout_ns = jiffies_to_nsecs(timeout_jiffies);
-               timeout = &timeout_ns;
-       } else {
-               timeout = NULL;
-       }
-
-       ret = i915_wait_request(to_request(fence),
-                               interruptible, timeout,
-                               NO_WAITBOOST);
-       if (ret == -ETIME)
-               return 0;
-
-       if (ret < 0)
-               return ret;
-
-       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
-               timeout_jiffies = nsecs_to_jiffies(timeout_ns);
-
-       return timeout_jiffies;
+       return i915_wait_request(to_request(fence), interruptible, timeout);
 }
 
-static void i915_fence_value_str(struct fence *fence, char *str, int size)
+static void i915_fence_value_str(struct dma_fence *fence, char *str, int size)
 {
        snprintf(str, size, "%u", fence->seqno);
 }
 
-static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+static void i915_fence_timeline_value_str(struct dma_fence *fence, char *str,
                                          int size)
 {
        snprintf(str, size, "%u",
                 intel_engine_get_seqno(to_request(fence)->engine));
 }
 
-static void i915_fence_release(struct fence *fence)
+static void i915_fence_release(struct dma_fence *fence)
 {
        struct drm_i915_gem_request *req = to_request(fence);
 
        kmem_cache_free(req->i915->requests, req);
 }
 
-const struct fence_ops i915_fence_ops = {
+const struct dma_fence_ops i915_fence_ops = {
        .get_driver_name = i915_fence_get_driver_name,
        .get_timeline_name = i915_fence_get_timeline_name,
        .enable_signaling = i915_fence_enable_signaling,
@@ -164,8 +143,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
        struct i915_gem_active *active, *next;
 
+       lockdep_assert_held(&request->i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915_gem_request_completed(request));
+
        trace_i915_gem_request_retire(request);
-       list_del(&request->link);
+       list_del_init(&request->link);
 
        /* We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
@@ -214,6 +196,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        }
 
        i915_gem_context_put(request->ctx);
+
+       dma_fence_signal(&request->fence);
        i915_gem_request_put(request);
 }
 
@@ -223,10 +207,11 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
        struct drm_i915_gem_request *tmp;
 
        lockdep_assert_held(&req->i915->drm.struct_mutex);
-       GEM_BUG_ON(list_empty(&req->link));
+       if (list_empty(&req->link))
+               return;
 
        do {
-               tmp = list_first_entry(&engine->request_list,
+               tmp = list_first_entry(&engine->timeline->requests,
                                       typeof(*tmp), link);
 
                i915_gem_request_retire(tmp);
@@ -253,68 +238,82 @@ static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
+       struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
        struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        int ret;
 
        /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine,
-                                       I915_WAIT_INTERRUPTIBLE |
-                                       I915_WAIT_LOCKED);
-               if (ret)
-                       return ret;
-       }
-       i915_gem_retire_requests(dev_priv);
+       ret = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
+       if (ret)
+               return ret;
+
+       i915_gem_retire_requests(i915);
 
        /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-               while (intel_kick_waiters(dev_priv) ||
-                      intel_kick_signalers(dev_priv))
+       if (!i915_seqno_passed(seqno, timeline->next_seqno)) {
+               while (intel_kick_waiters(i915) || intel_kick_signalers(i915))
                        yield();
+               yield();
        }
 
        /* Finally reset hw state */
-       for_each_engine(engine, dev_priv)
-               intel_engine_init_seqno(engine, seqno);
+       for_each_engine(engine, i915, id)
+               intel_engine_init_global_seqno(engine, seqno);
+
+       list_for_each_entry(timeline, &i915->gt.timelines, link) {
+               for_each_engine(engine, i915, id) {
+                       struct intel_timeline *tl = &timeline->engine[id];
+
+                       memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+               }
+       }
 
        return 0;
 }
 
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
        if (seqno == 0)
                return -EINVAL;
 
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+       ret = i915_gem_init_global_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
 
-       dev_priv->next_seqno = seqno;
+       dev_priv->gt.global_timeline.next_seqno = seqno;
        return 0;
 }
 
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+static int i915_gem_get_global_seqno(struct drm_i915_private *dev_priv,
+                                    u32 *seqno)
 {
+       struct i915_gem_timeline *tl = &dev_priv->gt.global_timeline;
+
        /* reserve 0 for non-seqno */
-       if (unlikely(dev_priv->next_seqno == 0)) {
+       if (unlikely(tl->next_seqno == 0)) {
                int ret;
 
-               ret = i915_gem_init_seqno(dev_priv, 0);
+               ret = i915_gem_init_global_seqno(dev_priv, 0);
                if (ret)
                        return ret;
 
-               dev_priv->next_seqno = 1;
+               tl->next_seqno = 1;
        }
 
-       *seqno = dev_priv->next_seqno++;
+       *seqno = tl->next_seqno++;
        return 0;
 }
 
@@ -323,17 +322,18 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
        struct drm_i915_gem_request *request =
                container_of(fence, typeof(*request), submit);
+       struct intel_engine_cs *engine = request->engine;
+
+       if (state != FENCE_COMPLETE)
+               return NOTIFY_DONE;
 
        /* Will be called from irq-context when using foreign DMA fences */
 
-       switch (state) {
-       case FENCE_COMPLETE:
-               request->engine->submit_request(request);
-               break;
+       engine->timeline->last_submitted_seqno = request->fence.seqno;
 
-       case FENCE_FREE:
-               break;
-       }
+       engine->emit_breadcrumb(request,
+                               request->ring->vaddr + request->postfix);
+       engine->submit_request(request);
 
        return NOTIFY_DONE;
 }
@@ -368,7 +368,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                return ERR_PTR(ret);
 
        /* Move the oldest request to the slab-cache (if not in use!) */
-       req = list_first_entry_or_null(&engine->request_list,
+       req = list_first_entry_or_null(&engine->timeline->requests,
                                       typeof(*req), link);
        if (req && i915_gem_request_completed(req))
                i915_gem_request_retire(req);
@@ -381,13 +381,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * of being read by __i915_gem_active_get_rcu(). As such,
         * we have to be very careful when overwriting the contents. During
         * the RCU lookup, we change chase the request->engine pointer,
-        * read the request->fence.seqno and increment the reference count.
+        * read the request->global_seqno and increment the reference count.
         *
         * The reference count is incremented atomically. If it is zero,
         * the lookup knows the request is unallocated and complete. Otherwise,
         * it is either still in use, or has been reallocated and reset
-        * with fence_init(). This increment is safe for release as we check
-        * that the request we have a reference to and matches the active
+        * with dma_fence_init(). This increment is safe for release as we
+        * check that the request we have a reference to and matches the active
         * request.
         *
         * Before we increment the refcount, we chase the request->engine
@@ -405,22 +405,25 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (!req)
                return ERR_PTR(-ENOMEM);
 
-       ret = i915_gem_get_seqno(dev_priv, &seqno);
+       ret = i915_gem_get_global_seqno(dev_priv, &seqno);
        if (ret)
                goto err;
 
+       req->timeline = engine->timeline;
+
        spin_lock_init(&req->lock);
-       fence_init(&req->fence,
-                  &i915_fence_ops,
-                  &req->lock,
-                  engine->fence_context,
-                  seqno);
+       dma_fence_init(&req->fence,
+                      &i915_fence_ops,
+                      &req->lock,
+                      req->timeline->fence_context,
+                      seqno);
 
        i915_sw_fence_init(&req->submit, submit_notify);
 
        INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
        req->engine = engine;
+       req->global_seqno = seqno;
        req->ctx = i915_gem_context_get(ctx);
 
        /* No zalloc, must clear what we need by hand */
@@ -436,6 +439,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * away, e.g. because a GPU scheduler has deferred it.
         */
        req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+       GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
 
        if (i915.enable_execlists)
                ret = intel_logical_ring_alloc_request_extras(req);
@@ -464,32 +468,93 @@ static int
 i915_gem_request_await_request(struct drm_i915_gem_request *to,
                               struct drm_i915_gem_request *from)
 {
-       int idx, ret;
+       int ret;
 
        GEM_BUG_ON(to == from);
 
-       if (to->engine == from->engine)
+       if (to->timeline == from->timeline)
                return 0;
 
-       idx = intel_engine_sync_index(from->engine, to->engine);
-       if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+       if (to->engine == from->engine) {
+               ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
+                                                      &from->submit,
+                                                      GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
+       if (!from->global_seqno) {
+               ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                   &from->fence, 0,
+                                                   GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
+       if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
                return 0;
 
        trace_i915_gem_ring_sync_to(to, from);
        if (!i915.semaphores) {
-               ret = i915_wait_request(from,
-                                       I915_WAIT_INTERRUPTIBLE |
-                                       I915_WAIT_LOCKED,
-                                       NULL, NO_WAITBOOST);
-               if (ret)
-                       return ret;
+               if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+                       ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                           &from->fence, 0,
+                                                           GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
        } else {
                ret = to->engine->semaphore.sync_to(to, from);
                if (ret)
                        return ret;
        }
 
-       from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+       to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+       return 0;
+}
+
+int
+i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
+                                struct dma_fence *fence)
+{
+       struct dma_fence_array *array;
+       int ret;
+       int i;
+
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               return 0;
+
+       if (dma_fence_is_i915(fence))
+               return i915_gem_request_await_request(req, to_request(fence));
+
+       if (!dma_fence_is_array(fence)) {
+               ret = i915_sw_fence_await_dma_fence(&req->submit,
+                                                   fence, I915_FENCE_TIMEOUT,
+                                                   GFP_KERNEL);
+               return ret < 0 ? ret : 0;
+       }
+
+       /* Note that if the fence-array was created in signal-on-any mode,
+        * we should *not* decompose it into its individual fences. However,
+        * we don't currently store which mode the fence-array is operating
+        * in. Fortunately, the only user of signal-on-any is private to
+        * amdgpu and we should not see any incoming fence-array from
+        * sync-file being in signal-on-any mode.
+        */
+
+       array = to_dma_fence_array(fence);
+       for (i = 0; i < array->num_fences; i++) {
+               struct dma_fence *child = array->fences[i];
+
+               if (dma_fence_is_i915(child))
+                       ret = i915_gem_request_await_request(req,
+                                                            to_request(child));
+               else
+                       ret = i915_sw_fence_await_dma_fence(&req->submit,
+                                                           child, I915_FENCE_TIMEOUT,
+                                                           GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -518,33 +583,41 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
                              struct drm_i915_gem_object *obj,
                              bool write)
 {
-       struct i915_gem_active *active;
-       unsigned long active_mask;
-       int idx;
+       struct dma_fence *excl;
+       int ret = 0;
 
        if (write) {
-               active_mask = i915_gem_object_get_active(obj);
-               active = obj->last_read;
+               struct dma_fence **shared;
+               unsigned int count, i;
+
+               ret = reservation_object_get_fences_rcu(obj->resv,
+                                                       &excl, &count, &shared);
+               if (ret)
+                       return ret;
+
+               for (i = 0; i < count; i++) {
+                       ret = i915_gem_request_await_dma_fence(to, shared[i]);
+                       if (ret)
+                               break;
+
+                       dma_fence_put(shared[i]);
+               }
+
+               for (; i < count; i++)
+                       dma_fence_put(shared[i]);
+               kfree(shared);
        } else {
-               active_mask = 1;
-               active = &obj->last_write;
+               excl = reservation_object_get_excl_rcu(obj->resv);
        }
 
-       for_each_active(active_mask, idx) {
-               struct drm_i915_gem_request *request;
-               int ret;
-
-               request = i915_gem_active_peek(&active[idx],
-                                              &obj->base.dev->struct_mutex);
-               if (!request)
-                       continue;
+       if (excl) {
+               if (ret == 0)
+                       ret = i915_gem_request_await_dma_fence(to, excl);
 
-               ret = i915_gem_request_await_request(to, request);
-               if (ret)
-                       return ret;
+               dma_fence_put(excl);
        }
 
-       return 0;
+       return ret;
 }
 
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
@@ -577,10 +650,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
-       u32 request_start;
-       u32 reserved_tail;
-       int ret;
+       struct intel_timeline *timeline = request->timeline;
+       struct drm_i915_gem_request *prev;
+       int err;
 
+       lockdep_assert_held(&request->i915->drm.struct_mutex);
        trace_i915_gem_request_add(request);
 
        /*
@@ -588,8 +662,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       request_start = ring->tail;
-       reserved_tail = request->reserved_space;
        request->reserved_space = 0;
 
        /*
@@ -600,10 +672,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * what.
         */
        if (flush_caches) {
-               ret = engine->emit_flush(request, EMIT_FLUSH);
+               err = engine->emit_flush(request, EMIT_FLUSH);
 
                /* Not allowed to fail! */
-               WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+               WARN(err, "engine->emit_flush() failed: %d!\n", err);
        }
 
        /* Record the position of the start of the breadcrumb so that
@@ -611,31 +683,28 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * GPU processing the request, we never over-estimate the
         * position of the ring's HEAD.
         */
+       err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+       GEM_BUG_ON(err);
        request->postfix = ring->tail;
-
-       /* Not allowed to fail! */
-       ret = engine->emit_request(request);
-       WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
-
-       /* Sanity check that the reserved size was large enough. */
-       ret = ring->tail - request_start;
-       if (ret < 0)
-               ret += ring->size;
-       WARN_ONCE(ret > reserved_tail,
-                 "Not enough space reserved (%d bytes) "
-                 "for adding the request (%d bytes)\n",
-                 reserved_tail, ret);
+       ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
 
        /* Seal the request and mark it as pending execution. Note that
         * we may inspect this state, without holding any locks, during
         * hangcheck. Hence we apply the barrier to ensure that we do not
         * see a more recent value in the hws than we are tracking.
         */
+
+       prev = i915_gem_active_raw(&timeline->last_request,
+                                  &request->i915->drm.struct_mutex);
+       if (prev)
+               i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+                                            &request->submitq);
+
        request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       engine->last_submitted_seqno = request->fence.seqno;
-       i915_gem_active_set(&engine->last_request, request);
-       list_add_tail(&request->link, &engine->request_list);
+       request->previous_seqno = timeline->last_pending_seqno;
+       timeline->last_pending_seqno = request->fence.seqno;
+       i915_gem_active_set(&timeline->last_request, request);
+       list_add_tail(&request->link, &timeline->requests);
        list_add_tail(&request->ring_link, &ring->request_list);
 
        i915_gem_mark_busy(engine);
@@ -704,7 +773,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 
        timeout_us += local_clock_us(&cpu);
        do {
-               if (i915_gem_request_completed(req))
+               if (__i915_gem_request_completed(req))
                        return true;
 
                if (signal_pending_state(state, current))
@@ -719,76 +788,101 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
        return false;
 }
 
+static long
+__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
+                              unsigned int flags,
+                              long timeout)
+{
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
+       DEFINE_WAIT(reset);
+       DEFINE_WAIT(wait);
+
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(q, &reset);
+
+       do {
+               prepare_to_wait(&request->submit.wait, &wait, state);
+
+               if (i915_sw_fence_done(&request->submit))
+                       break;
+
+               if (flags & I915_WAIT_LOCKED &&
+                   i915_reset_in_progress(&request->i915->gpu_error)) {
+                       __set_current_state(TASK_RUNNING);
+                       i915_reset(request->i915);
+                       reset_wait_queue(q, &reset);
+                       continue;
+               }
+
+               if (signal_pending_state(state, current)) {
+                       timeout = -ERESTARTSYS;
+                       break;
+               }
+
+               timeout = io_schedule_timeout(timeout);
+       } while (timeout);
+       finish_wait(&request->submit.wait, &wait);
+
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(q, &reset);
+
+       return timeout;
+}
+
 /**
  * i915_wait_request - wait until execution of request has finished
- * @req: duh!
+ * @req: the request to wait upon
  * @flags: how to wait
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: client to charge for RPS boosting
+ * @timeout: how long to wait in jiffies
  *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
+ * i915_wait_request() waits for the request to be completed, for a
+ * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
+ * unbounded wait).
  *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
+ * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
+ * in via the flags, and vice versa if the struct_mutex is not held, the caller
+ * must not specify that the wait is locked.
+ *
+ * Returns the remaining time (in jiffies) if the request completed, which may
+ * be zero or -ETIME if the request is unfinished after the timeout expires.
+ * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
+ * pending before the request completes.
  */
-int i915_wait_request(struct drm_i915_gem_request *req,
-                     unsigned int flags,
-                     s64 *timeout,
-                     struct intel_rps_client *rps)
+long i915_wait_request(struct drm_i915_gem_request *req,
+                      unsigned int flags,
+                      long timeout)
 {
        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
        struct intel_wait wait;
-       unsigned long timeout_remain;
-       int ret = 0;
 
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
-       GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+       GEM_BUG_ON(debug_locks &&
+                  !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
                   !!(flags & I915_WAIT_LOCKED));
 #endif
+       GEM_BUG_ON(timeout < 0);
 
        if (i915_gem_request_completed(req))
-               return 0;
-
-       timeout_remain = MAX_SCHEDULE_TIMEOUT;
-       if (timeout) {
-               if (WARN_ON(*timeout < 0))
-                       return -EINVAL;
+               return timeout;
 
-               if (*timeout == 0)
-                       return -ETIME;
-
-               /* Record current time in case interrupted, or wedged */
-               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-               *timeout += ktime_get_raw_ns();
-       }
+       if (!timeout)
+               return -ETIME;
 
        trace_i915_gem_request_wait_begin(req);
 
-       /* This client is about to stall waiting for the GPU. In many cases
-        * this is undesirable and limits the throughput of the system, as
-        * many clients cannot continue processing user input/output whilst
-        * blocked. RPS autotuning may take tens of milliseconds to respond
-        * to the GPU load and thus incurs additional latency for the client.
-        * We can circumvent that by promoting the GPU frequency to maximum
-        * before we wait. This makes the GPU throttle up much more quickly
-        * (good for benchmarks and user experience, e.g. window animations),
-        * but at a cost of spending more power processing the workload
-        * (bad for battery). Not all clients even want their results
-        * immediately and for them we should just let the GPU select its own
-        * frequency to maximise efficiency. To prevent a single client from
-        * forcing the clocks too high for the whole system, we only allow
-        * each client to waitboost once in a busy period.
-        */
-       if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
-               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+       if (!i915_sw_fence_done(&req->submit)) {
+               timeout = __i915_request_wait_for_submit(req, flags, timeout);
+               if (timeout < 0)
+                       goto complete;
+
+               GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+       }
+       GEM_BUG_ON(!req->global_seqno);
 
        /* Optimistic short spin before touching IRQs */
        if (i915_spin_request(req, state, 5))
@@ -798,7 +892,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
        if (flags & I915_WAIT_LOCKED)
                add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-       intel_wait_init(&wait, req->fence.seqno);
+       intel_wait_init(&wait, req->global_seqno);
        if (intel_engine_add_wait(req->engine, &wait))
                /* In order to check that we haven't missed the interrupt
                 * as we enabled it, we need to kick ourselves to do a
@@ -808,16 +902,17 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 
        for (;;) {
                if (signal_pending_state(state, current)) {
-                       ret = -ERESTARTSYS;
+                       timeout = -ERESTARTSYS;
                        break;
                }
 
-               timeout_remain = io_schedule_timeout(timeout_remain);
-               if (timeout_remain == 0) {
-                       ret = -ETIME;
+               if (!timeout) {
+                       timeout = -ETIME;
                        break;
                }
 
+               timeout = io_schedule_timeout(timeout);
+
                if (intel_wait_complete(&wait))
                        break;
 
@@ -864,47 +959,15 @@ wakeup:
 complete:
        trace_i915_gem_request_wait_end(req);
 
-       if (timeout) {
-               *timeout -= ktime_get_raw_ns();
-               if (*timeout < 0)
-                       *timeout = 0;
-
-               /*
-                * Apparently ktime isn't accurate enough and occasionally has a
-                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-                * things up to make the test happy. We allow up to 1 jiffy.
-                *
-                * This is a regrssion from the timespec->ktime conversion.
-                */
-               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-                       *timeout = 0;
-       }
-
-       if (IS_RPS_USER(rps) &&
-           req->fence.seqno == req->engine->last_submitted_seqno) {
-               /* The GPU is now idle and this client has stalled.
-                * Since no other client has submitted a request in the
-                * meantime, assume that this client is the only one
-                * supplying work to the GPU but is unable to keep that
-                * work supplied because it is waiting. Since the GPU is
-                * then never kept fully busy, RPS autoclocking will
-                * keep the clocks relatively low, causing further delays.
-                * Compensate by giving the synchronous client credit for
-                * a waitboost next time.
-                */
-               spin_lock(&req->i915->rps.client_lock);
-               list_del_init(&rps->link);
-               spin_unlock(&req->i915->rps.client_lock);
-       }
-
-       return ret;
+       return timeout;
 }
 
 static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request, *next;
 
-       list_for_each_entry_safe(request, next, &engine->request_list, link) {
+       list_for_each_entry_safe(request, next,
+                                &engine->timeline->requests, link) {
                if (!i915_gem_request_completed(request))
                        return false;