]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_request.c
perf config: Refactor a duplicated code for obtaining config file name
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30
31 #include "i915_drv.h"
32
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35         return "i915";
36 }
37
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40         return to_request(fence)->timeline->common->name;
41 }
42
43 static bool i915_fence_signaled(struct dma_fence *fence)
44 {
45         return i915_gem_request_completed(to_request(fence));
46 }
47
48 static bool i915_fence_enable_signaling(struct dma_fence *fence)
49 {
50         if (i915_fence_signaled(fence))
51                 return false;
52
53         intel_engine_enable_signaling(to_request(fence));
54         return true;
55 }
56
57 static signed long i915_fence_wait(struct dma_fence *fence,
58                                    bool interruptible,
59                                    signed long timeout)
60 {
61         return i915_wait_request(to_request(fence), interruptible, timeout);
62 }
63
64 static void i915_fence_release(struct dma_fence *fence)
65 {
66         struct drm_i915_gem_request *req = to_request(fence);
67
68         /* The request is put onto a RCU freelist (i.e. the address
69          * is immediately reused), mark the fences as being freed now.
70          * Otherwise the debugobjects for the fences are only marked as
71          * freed when the slab cache itself is freed, and so we would get
72          * caught trying to reuse dead objects.
73          */
74         i915_sw_fence_fini(&req->submit);
75         i915_sw_fence_fini(&req->execute);
76
77         kmem_cache_free(req->i915->requests, req);
78 }
79
80 const struct dma_fence_ops i915_fence_ops = {
81         .get_driver_name = i915_fence_get_driver_name,
82         .get_timeline_name = i915_fence_get_timeline_name,
83         .enable_signaling = i915_fence_enable_signaling,
84         .signaled = i915_fence_signaled,
85         .wait = i915_fence_wait,
86         .release = i915_fence_release,
87 };
88
89 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
90                                    struct drm_file *file)
91 {
92         struct drm_i915_private *dev_private;
93         struct drm_i915_file_private *file_priv;
94
95         WARN_ON(!req || !file || req->file_priv);
96
97         if (!req || !file)
98                 return -EINVAL;
99
100         if (req->file_priv)
101                 return -EINVAL;
102
103         dev_private = req->i915;
104         file_priv = file->driver_priv;
105
106         spin_lock(&file_priv->mm.lock);
107         req->file_priv = file_priv;
108         list_add_tail(&req->client_list, &file_priv->mm.request_list);
109         spin_unlock(&file_priv->mm.lock);
110
111         return 0;
112 }
113
114 static inline void
115 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
116 {
117         struct drm_i915_file_private *file_priv = request->file_priv;
118
119         if (!file_priv)
120                 return;
121
122         spin_lock(&file_priv->mm.lock);
123         list_del(&request->client_list);
124         request->file_priv = NULL;
125         spin_unlock(&file_priv->mm.lock);
126 }
127
128 static struct i915_dependency *
129 i915_dependency_alloc(struct drm_i915_private *i915)
130 {
131         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
132 }
133
134 static void
135 i915_dependency_free(struct drm_i915_private *i915,
136                      struct i915_dependency *dep)
137 {
138         kmem_cache_free(i915->dependencies, dep);
139 }
140
141 static void
142 __i915_priotree_add_dependency(struct i915_priotree *pt,
143                                struct i915_priotree *signal,
144                                struct i915_dependency *dep,
145                                unsigned long flags)
146 {
147         INIT_LIST_HEAD(&dep->dfs_link);
148         list_add(&dep->wait_link, &signal->waiters_list);
149         list_add(&dep->signal_link, &pt->signalers_list);
150         dep->signaler = signal;
151         dep->flags = flags;
152 }
153
154 static int
155 i915_priotree_add_dependency(struct drm_i915_private *i915,
156                              struct i915_priotree *pt,
157                              struct i915_priotree *signal)
158 {
159         struct i915_dependency *dep;
160
161         dep = i915_dependency_alloc(i915);
162         if (!dep)
163                 return -ENOMEM;
164
165         __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
166         return 0;
167 }
168
169 static void
170 i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
171 {
172         struct i915_dependency *dep, *next;
173
174         GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
175
176         /* Everyone we depended upon (the fences we wait to be signaled)
177          * should retire before us and remove themselves from our list.
178          * However, retirement is run independently on each timeline and
179          * so we may be called out-of-order.
180          */
181         list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
182                 list_del(&dep->wait_link);
183                 if (dep->flags & I915_DEPENDENCY_ALLOC)
184                         i915_dependency_free(i915, dep);
185         }
186
187         /* Remove ourselves from everyone who depends upon us */
188         list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
189                 list_del(&dep->signal_link);
190                 if (dep->flags & I915_DEPENDENCY_ALLOC)
191                         i915_dependency_free(i915, dep);
192         }
193 }
194
195 static void
196 i915_priotree_init(struct i915_priotree *pt)
197 {
198         INIT_LIST_HEAD(&pt->signalers_list);
199         INIT_LIST_HEAD(&pt->waiters_list);
200         RB_CLEAR_NODE(&pt->node);
201         pt->priority = INT_MIN;
202 }
203
204 void i915_gem_retire_noop(struct i915_gem_active *active,
205                           struct drm_i915_gem_request *request)
206 {
207         /* Space left intentionally blank */
208 }
209
210 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
211 {
212         struct intel_engine_cs *engine = request->engine;
213         struct i915_gem_active *active, *next;
214
215         lockdep_assert_held(&request->i915->drm.struct_mutex);
216         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
217         GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
218         GEM_BUG_ON(!i915_gem_request_completed(request));
219         GEM_BUG_ON(!request->i915->gt.active_requests);
220
221         trace_i915_gem_request_retire(request);
222
223         spin_lock_irq(&engine->timeline->lock);
224         list_del_init(&request->link);
225         spin_unlock_irq(&engine->timeline->lock);
226
227         /* We know the GPU must have read the request to have
228          * sent us the seqno + interrupt, so use the position
229          * of tail of the request to update the last known position
230          * of the GPU head.
231          *
232          * Note this requires that we are always called in request
233          * completion order.
234          */
235         list_del(&request->ring_link);
236         request->ring->last_retired_head = request->postfix;
237         if (!--request->i915->gt.active_requests) {
238                 GEM_BUG_ON(!request->i915->gt.awake);
239                 mod_delayed_work(request->i915->wq,
240                                  &request->i915->gt.idle_work,
241                                  msecs_to_jiffies(100));
242         }
243
244         /* Walk through the active list, calling retire on each. This allows
245          * objects to track their GPU activity and mark themselves as idle
246          * when their *last* active request is completed (updating state
247          * tracking lists for eviction, active references for GEM, etc).
248          *
249          * As the ->retire() may free the node, we decouple it first and
250          * pass along the auxiliary information (to avoid dereferencing
251          * the node after the callback).
252          */
253         list_for_each_entry_safe(active, next, &request->active_list, link) {
254                 /* In microbenchmarks or focusing upon time inside the kernel,
255                  * we may spend an inordinate amount of time simply handling
256                  * the retirement of requests and processing their callbacks.
257                  * Of which, this loop itself is particularly hot due to the
258                  * cache misses when jumping around the list of i915_gem_active.
259                  * So we try to keep this loop as streamlined as possible and
260                  * also prefetch the next i915_gem_active to try and hide
261                  * the likely cache miss.
262                  */
263                 prefetchw(next);
264
265                 INIT_LIST_HEAD(&active->link);
266                 RCU_INIT_POINTER(active->request, NULL);
267
268                 active->retire(active, request);
269         }
270
271         i915_gem_request_remove_from_client(request);
272
273         /* Retirement decays the ban score as it is a sign of ctx progress */
274         if (request->ctx->ban_score > 0)
275                 request->ctx->ban_score--;
276
277         /* The backing object for the context is done after switching to the
278          * *next* context. Therefore we cannot retire the previous context until
279          * the next context has already started running. However, since we
280          * cannot take the required locks at i915_gem_request_submit() we
281          * defer the unpinning of the active context to now, retirement of
282          * the subsequent request.
283          */
284         if (engine->last_retired_context)
285                 engine->context_unpin(engine, engine->last_retired_context);
286         engine->last_retired_context = request->ctx;
287
288         dma_fence_signal(&request->fence);
289
290         i915_priotree_fini(request->i915, &request->priotree);
291         i915_gem_request_put(request);
292 }
293
294 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
295 {
296         struct intel_engine_cs *engine = req->engine;
297         struct drm_i915_gem_request *tmp;
298
299         lockdep_assert_held(&req->i915->drm.struct_mutex);
300         GEM_BUG_ON(!i915_gem_request_completed(req));
301
302         if (list_empty(&req->link))
303                 return;
304
305         do {
306                 tmp = list_first_entry(&engine->timeline->requests,
307                                        typeof(*tmp), link);
308
309                 i915_gem_request_retire(tmp);
310         } while (tmp != req);
311 }
312
313 static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
314 {
315         struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
316         struct intel_engine_cs *engine;
317         enum intel_engine_id id;
318         int ret;
319
320         /* Carefully retire all requests without writing to the rings */
321         ret = i915_gem_wait_for_idle(i915,
322                                      I915_WAIT_INTERRUPTIBLE |
323                                      I915_WAIT_LOCKED);
324         if (ret)
325                 return ret;
326
327         i915_gem_retire_requests(i915);
328         GEM_BUG_ON(i915->gt.active_requests > 1);
329
330         /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
331         if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
332                 while (intel_breadcrumbs_busy(i915))
333                         cond_resched(); /* spin until threads are complete */
334         }
335         atomic_set(&timeline->seqno, seqno);
336
337         /* Finally reset hw state */
338         for_each_engine(engine, i915, id)
339                 intel_engine_init_global_seqno(engine, seqno);
340
341         list_for_each_entry(timeline, &i915->gt.timelines, link) {
342                 for_each_engine(engine, i915, id) {
343                         struct intel_timeline *tl = &timeline->engine[id];
344
345                         memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
346                 }
347         }
348
349         return 0;
350 }
351
352 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
353 {
354         struct drm_i915_private *dev_priv = to_i915(dev);
355
356         lockdep_assert_held(&dev_priv->drm.struct_mutex);
357
358         if (seqno == 0)
359                 return -EINVAL;
360
361         /* HWS page needs to be set less than what we
362          * will inject to ring
363          */
364         return i915_gem_init_global_seqno(dev_priv, seqno - 1);
365 }
366
367 static int reserve_global_seqno(struct drm_i915_private *i915)
368 {
369         u32 active_requests = ++i915->gt.active_requests;
370         u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
371         int ret;
372
373         /* Reservation is fine until we need to wrap around */
374         if (likely(seqno + active_requests > seqno))
375                 return 0;
376
377         ret = i915_gem_init_global_seqno(i915, 0);
378         if (ret) {
379                 i915->gt.active_requests--;
380                 return ret;
381         }
382
383         return 0;
384 }
385
386 static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
387 {
388         /* seqno only incremented under a mutex */
389         return ++tl->seqno.counter;
390 }
391
392 static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
393 {
394         return atomic_inc_return(&tl->seqno);
395 }
396
397 void __i915_gem_request_submit(struct drm_i915_gem_request *request)
398 {
399         struct intel_engine_cs *engine = request->engine;
400         struct intel_timeline *timeline;
401         u32 seqno;
402
403         /* Transfer from per-context onto the global per-engine timeline */
404         timeline = engine->timeline;
405         GEM_BUG_ON(timeline == request->timeline);
406         assert_spin_locked(&timeline->lock);
407
408         seqno = timeline_get_seqno(timeline->common);
409         GEM_BUG_ON(!seqno);
410         GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
411
412         GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
413         request->previous_seqno = timeline->last_submitted_seqno;
414         timeline->last_submitted_seqno = seqno;
415
416         /* We may be recursing from the signal callback of another i915 fence */
417         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
418         request->global_seqno = seqno;
419         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
420                 intel_engine_enable_signaling(request);
421         spin_unlock(&request->lock);
422
423         GEM_BUG_ON(!request->global_seqno);
424         engine->emit_breadcrumb(request,
425                                 request->ring->vaddr + request->postfix);
426
427         spin_lock(&request->timeline->lock);
428         list_move_tail(&request->link, &timeline->requests);
429         spin_unlock(&request->timeline->lock);
430
431         i915_sw_fence_commit(&request->execute);
432 }
433
434 void i915_gem_request_submit(struct drm_i915_gem_request *request)
435 {
436         struct intel_engine_cs *engine = request->engine;
437         unsigned long flags;
438
439         /* Will be called from irq-context when using foreign fences. */
440         spin_lock_irqsave(&engine->timeline->lock, flags);
441
442         __i915_gem_request_submit(request);
443
444         spin_unlock_irqrestore(&engine->timeline->lock, flags);
445 }
446
447 static int __i915_sw_fence_call
448 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
449 {
450         struct drm_i915_gem_request *request =
451                 container_of(fence, typeof(*request), submit);
452
453         switch (state) {
454         case FENCE_COMPLETE:
455                 request->engine->submit_request(request);
456                 break;
457
458         case FENCE_FREE:
459                 i915_gem_request_put(request);
460                 break;
461         }
462
463         return NOTIFY_DONE;
464 }
465
466 static int __i915_sw_fence_call
467 execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
468 {
469         struct drm_i915_gem_request *request =
470                 container_of(fence, typeof(*request), execute);
471
472         switch (state) {
473         case FENCE_COMPLETE:
474                 break;
475
476         case FENCE_FREE:
477                 i915_gem_request_put(request);
478                 break;
479         }
480
481         return NOTIFY_DONE;
482 }
483
484 /**
485  * i915_gem_request_alloc - allocate a request structure
486  *
487  * @engine: engine that we wish to issue the request on.
488  * @ctx: context that the request will be associated with.
489  *       This can be NULL if the request is not directly related to
490  *       any specific user context, in which case this function will
491  *       choose an appropriate context to use.
492  *
493  * Returns a pointer to the allocated request if successful,
494  * or an error code if not.
495  */
496 struct drm_i915_gem_request *
497 i915_gem_request_alloc(struct intel_engine_cs *engine,
498                        struct i915_gem_context *ctx)
499 {
500         struct drm_i915_private *dev_priv = engine->i915;
501         struct drm_i915_gem_request *req;
502         int ret;
503
504         lockdep_assert_held(&dev_priv->drm.struct_mutex);
505
506         /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
507          * EIO if the GPU is already wedged.
508          */
509         if (i915_terminally_wedged(&dev_priv->gpu_error))
510                 return ERR_PTR(-EIO);
511
512         /* Pinning the contexts may generate requests in order to acquire
513          * GGTT space, so do this first before we reserve a seqno for
514          * ourselves.
515          */
516         ret = engine->context_pin(engine, ctx);
517         if (ret)
518                 return ERR_PTR(ret);
519
520         ret = reserve_global_seqno(dev_priv);
521         if (ret)
522                 goto err_unpin;
523
524         /* Move the oldest request to the slab-cache (if not in use!) */
525         req = list_first_entry_or_null(&engine->timeline->requests,
526                                        typeof(*req), link);
527         if (req && __i915_gem_request_completed(req))
528                 i915_gem_request_retire(req);
529
530         /* Beware: Dragons be flying overhead.
531          *
532          * We use RCU to look up requests in flight. The lookups may
533          * race with the request being allocated from the slab freelist.
534          * That is the request we are writing to here, may be in the process
535          * of being read by __i915_gem_active_get_rcu(). As such,
536          * we have to be very careful when overwriting the contents. During
537          * the RCU lookup, we change chase the request->engine pointer,
538          * read the request->global_seqno and increment the reference count.
539          *
540          * The reference count is incremented atomically. If it is zero,
541          * the lookup knows the request is unallocated and complete. Otherwise,
542          * it is either still in use, or has been reallocated and reset
543          * with dma_fence_init(). This increment is safe for release as we
544          * check that the request we have a reference to and matches the active
545          * request.
546          *
547          * Before we increment the refcount, we chase the request->engine
548          * pointer. We must not call kmem_cache_zalloc() or else we set
549          * that pointer to NULL and cause a crash during the lookup. If
550          * we see the request is completed (based on the value of the
551          * old engine and seqno), the lookup is complete and reports NULL.
552          * If we decide the request is not completed (new engine or seqno),
553          * then we grab a reference and double check that it is still the
554          * active request - which it won't be and restart the lookup.
555          *
556          * Do not use kmem_cache_zalloc() here!
557          */
558         req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
559         if (!req) {
560                 ret = -ENOMEM;
561                 goto err_unreserve;
562         }
563
564         req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
565         GEM_BUG_ON(req->timeline == engine->timeline);
566
567         spin_lock_init(&req->lock);
568         dma_fence_init(&req->fence,
569                        &i915_fence_ops,
570                        &req->lock,
571                        req->timeline->fence_context,
572                        __timeline_get_seqno(req->timeline->common));
573
574         /* We bump the ref for the fence chain */
575         i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
576         i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
577
578         /* Ensure that the execute fence completes after the submit fence -
579          * as we complete the execute fence from within the submit fence
580          * callback, its completion would otherwise be visible first.
581          */
582         i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
583
584         i915_priotree_init(&req->priotree);
585
586         INIT_LIST_HEAD(&req->active_list);
587         req->i915 = dev_priv;
588         req->engine = engine;
589         req->ctx = ctx;
590
591         /* No zalloc, must clear what we need by hand */
592         req->global_seqno = 0;
593         req->file_priv = NULL;
594         req->batch = NULL;
595
596         /*
597          * Reserve space in the ring buffer for all the commands required to
598          * eventually emit this request. This is to guarantee that the
599          * i915_add_request() call can't fail. Note that the reserve may need
600          * to be redone if the request is not actually submitted straight
601          * away, e.g. because a GPU scheduler has deferred it.
602          */
603         req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
604         GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
605
606         ret = engine->request_alloc(req);
607         if (ret)
608                 goto err_ctx;
609
610         /* Record the position of the start of the request so that
611          * should we detect the updated seqno part-way through the
612          * GPU processing the request, we never over-estimate the
613          * position of the head.
614          */
615         req->head = req->ring->tail;
616
617         return req;
618
619 err_ctx:
620         /* Make sure we didn't add ourselves to external state before freeing */
621         GEM_BUG_ON(!list_empty(&req->active_list));
622         GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
623         GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
624
625         kmem_cache_free(dev_priv->requests, req);
626 err_unreserve:
627         dev_priv->gt.active_requests--;
628 err_unpin:
629         engine->context_unpin(engine, ctx);
630         return ERR_PTR(ret);
631 }
632
633 static int
634 i915_gem_request_await_request(struct drm_i915_gem_request *to,
635                                struct drm_i915_gem_request *from)
636 {
637         int ret;
638
639         GEM_BUG_ON(to == from);
640
641         if (to->engine->schedule) {
642                 ret = i915_priotree_add_dependency(to->i915,
643                                                    &to->priotree,
644                                                    &from->priotree);
645                 if (ret < 0)
646                         return ret;
647         }
648
649         if (to->timeline == from->timeline)
650                 return 0;
651
652         if (to->engine == from->engine) {
653                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
654                                                        &from->submit,
655                                                        GFP_KERNEL);
656                 return ret < 0 ? ret : 0;
657         }
658
659         if (!from->global_seqno) {
660                 ret = i915_sw_fence_await_dma_fence(&to->submit,
661                                                     &from->fence, 0,
662                                                     GFP_KERNEL);
663                 return ret < 0 ? ret : 0;
664         }
665
666         if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
667                 return 0;
668
669         trace_i915_gem_ring_sync_to(to, from);
670         if (!i915.semaphores) {
671                 if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
672                         ret = i915_sw_fence_await_dma_fence(&to->submit,
673                                                             &from->fence, 0,
674                                                             GFP_KERNEL);
675                         if (ret < 0)
676                                 return ret;
677                 }
678         } else {
679                 ret = to->engine->semaphore.sync_to(to, from);
680                 if (ret)
681                         return ret;
682         }
683
684         to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
685         return 0;
686 }
687
688 int
689 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
690                                  struct dma_fence *fence)
691 {
692         struct dma_fence_array *array;
693         int ret;
694         int i;
695
696         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
697                 return 0;
698
699         if (dma_fence_is_i915(fence))
700                 return i915_gem_request_await_request(req, to_request(fence));
701
702         if (!dma_fence_is_array(fence)) {
703                 ret = i915_sw_fence_await_dma_fence(&req->submit,
704                                                     fence, I915_FENCE_TIMEOUT,
705                                                     GFP_KERNEL);
706                 return ret < 0 ? ret : 0;
707         }
708
709         /* Note that if the fence-array was created in signal-on-any mode,
710          * we should *not* decompose it into its individual fences. However,
711          * we don't currently store which mode the fence-array is operating
712          * in. Fortunately, the only user of signal-on-any is private to
713          * amdgpu and we should not see any incoming fence-array from
714          * sync-file being in signal-on-any mode.
715          */
716
717         array = to_dma_fence_array(fence);
718         for (i = 0; i < array->num_fences; i++) {
719                 struct dma_fence *child = array->fences[i];
720
721                 if (dma_fence_is_i915(child))
722                         ret = i915_gem_request_await_request(req,
723                                                              to_request(child));
724                 else
725                         ret = i915_sw_fence_await_dma_fence(&req->submit,
726                                                             child, I915_FENCE_TIMEOUT,
727                                                             GFP_KERNEL);
728                 if (ret < 0)
729                         return ret;
730         }
731
732         return 0;
733 }
734
735 /**
736  * i915_gem_request_await_object - set this request to (async) wait upon a bo
737  *
738  * @to: request we are wishing to use
739  * @obj: object which may be in use on another ring.
740  *
741  * This code is meant to abstract object synchronization with the GPU.
742  * Conceptually we serialise writes between engines inside the GPU.
743  * We only allow one engine to write into a buffer at any time, but
744  * multiple readers. To ensure each has a coherent view of memory, we must:
745  *
746  * - If there is an outstanding write request to the object, the new
747  *   request must wait for it to complete (either CPU or in hw, requests
748  *   on the same ring will be naturally ordered).
749  *
750  * - If we are a write request (pending_write_domain is set), the new
751  *   request must wait for outstanding read requests to complete.
752  *
753  * Returns 0 if successful, else propagates up the lower layer error.
754  */
755 int
756 i915_gem_request_await_object(struct drm_i915_gem_request *to,
757                               struct drm_i915_gem_object *obj,
758                               bool write)
759 {
760         struct dma_fence *excl;
761         int ret = 0;
762
763         if (write) {
764                 struct dma_fence **shared;
765                 unsigned int count, i;
766
767                 ret = reservation_object_get_fences_rcu(obj->resv,
768                                                         &excl, &count, &shared);
769                 if (ret)
770                         return ret;
771
772                 for (i = 0; i < count; i++) {
773                         ret = i915_gem_request_await_dma_fence(to, shared[i]);
774                         if (ret)
775                                 break;
776
777                         dma_fence_put(shared[i]);
778                 }
779
780                 for (; i < count; i++)
781                         dma_fence_put(shared[i]);
782                 kfree(shared);
783         } else {
784                 excl = reservation_object_get_excl_rcu(obj->resv);
785         }
786
787         if (excl) {
788                 if (ret == 0)
789                         ret = i915_gem_request_await_dma_fence(to, excl);
790
791                 dma_fence_put(excl);
792         }
793
794         return ret;
795 }
796
797 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
798 {
799         struct drm_i915_private *dev_priv = engine->i915;
800
801         if (dev_priv->gt.awake)
802                 return;
803
804         GEM_BUG_ON(!dev_priv->gt.active_requests);
805
806         intel_runtime_pm_get_noresume(dev_priv);
807         dev_priv->gt.awake = true;
808
809         intel_enable_gt_powersave(dev_priv);
810         i915_update_gfx_val(dev_priv);
811         if (INTEL_GEN(dev_priv) >= 6)
812                 gen6_rps_busy(dev_priv);
813
814         queue_delayed_work(dev_priv->wq,
815                            &dev_priv->gt.retire_work,
816                            round_jiffies_up_relative(HZ));
817 }
818
819 /*
820  * NB: This function is not allowed to fail. Doing so would mean the the
821  * request is not being tracked for completion but the work itself is
822  * going to happen on the hardware. This would be a Bad Thing(tm).
823  */
824 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
825 {
826         struct intel_engine_cs *engine = request->engine;
827         struct intel_ring *ring = request->ring;
828         struct intel_timeline *timeline = request->timeline;
829         struct drm_i915_gem_request *prev;
830         int err;
831
832         lockdep_assert_held(&request->i915->drm.struct_mutex);
833         trace_i915_gem_request_add(request);
834
835         /* Make sure that no request gazumped us - if it was allocated after
836          * our i915_gem_request_alloc() and called __i915_add_request() before
837          * us, the timeline will hold its seqno which is later than ours.
838          */
839         GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
840                                      request->fence.seqno));
841
842         /*
843          * To ensure that this call will not fail, space for its emissions
844          * should already have been reserved in the ring buffer. Let the ring
845          * know that it is time to use that space up.
846          */
847         request->reserved_space = 0;
848
849         /*
850          * Emit any outstanding flushes - execbuf can fail to emit the flush
851          * after having emitted the batchbuffer command. Hence we need to fix
852          * things up similar to emitting the lazy request. The difference here
853          * is that the flush _must_ happen before the next request, no matter
854          * what.
855          */
856         if (flush_caches) {
857                 err = engine->emit_flush(request, EMIT_FLUSH);
858
859                 /* Not allowed to fail! */
860                 WARN(err, "engine->emit_flush() failed: %d!\n", err);
861         }
862
863         /* Record the position of the start of the breadcrumb so that
864          * should we detect the updated seqno part-way through the
865          * GPU processing the request, we never over-estimate the
866          * position of the ring's HEAD.
867          */
868         err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
869         GEM_BUG_ON(err);
870         request->postfix = ring->tail;
871         ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
872
873         /* Seal the request and mark it as pending execution. Note that
874          * we may inspect this state, without holding any locks, during
875          * hangcheck. Hence we apply the barrier to ensure that we do not
876          * see a more recent value in the hws than we are tracking.
877          */
878
879         prev = i915_gem_active_raw(&timeline->last_request,
880                                    &request->i915->drm.struct_mutex);
881         if (prev) {
882                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
883                                              &request->submitq);
884                 if (engine->schedule)
885                         __i915_priotree_add_dependency(&request->priotree,
886                                                        &prev->priotree,
887                                                        &request->dep,
888                                                        0);
889         }
890
891         spin_lock_irq(&timeline->lock);
892         list_add_tail(&request->link, &timeline->requests);
893         spin_unlock_irq(&timeline->lock);
894
895         GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
896                                      request->fence.seqno));
897
898         timeline->last_submitted_seqno = request->fence.seqno;
899         i915_gem_active_set(&timeline->last_request, request);
900
901         list_add_tail(&request->ring_link, &ring->request_list);
902         request->emitted_jiffies = jiffies;
903
904         i915_gem_mark_busy(engine);
905
906         /* Let the backend know a new request has arrived that may need
907          * to adjust the existing execution schedule due to a high priority
908          * request - i.e. we may want to preempt the current request in order
909          * to run a high priority dependency chain *before* we can execute this
910          * request.
911          *
912          * This is called before the request is ready to run so that we can
913          * decide whether to preempt the entire chain so that it is ready to
914          * run at the earliest possible convenience.
915          */
916         if (engine->schedule)
917                 engine->schedule(request, request->ctx->priority);
918
919         local_bh_disable();
920         i915_sw_fence_commit(&request->submit);
921         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
922 }
923
924 static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
925 {
926         unsigned long flags;
927
928         spin_lock_irqsave(&q->lock, flags);
929         if (list_empty(&wait->task_list))
930                 __add_wait_queue(q, wait);
931         spin_unlock_irqrestore(&q->lock, flags);
932 }
933
934 static unsigned long local_clock_us(unsigned int *cpu)
935 {
936         unsigned long t;
937
938         /* Cheaply and approximately convert from nanoseconds to microseconds.
939          * The result and subsequent calculations are also defined in the same
940          * approximate microseconds units. The principal source of timing
941          * error here is from the simple truncation.
942          *
943          * Note that local_clock() is only defined wrt to the current CPU;
944          * the comparisons are no longer valid if we switch CPUs. Instead of
945          * blocking preemption for the entire busywait, we can detect the CPU
946          * switch and use that as indicator of system load and a reason to
947          * stop busywaiting, see busywait_stop().
948          */
949         *cpu = get_cpu();
950         t = local_clock() >> 10;
951         put_cpu();
952
953         return t;
954 }
955
956 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
957 {
958         unsigned int this_cpu;
959
960         if (time_after(local_clock_us(&this_cpu), timeout))
961                 return true;
962
963         return this_cpu != cpu;
964 }
965
966 bool __i915_spin_request(const struct drm_i915_gem_request *req,
967                          int state, unsigned long timeout_us)
968 {
969         unsigned int cpu;
970
971         /* When waiting for high frequency requests, e.g. during synchronous
972          * rendering split between the CPU and GPU, the finite amount of time
973          * required to set up the irq and wait upon it limits the response
974          * rate. By busywaiting on the request completion for a short while we
975          * can service the high frequency waits as quick as possible. However,
976          * if it is a slow request, we want to sleep as quickly as possible.
977          * The tradeoff between waiting and sleeping is roughly the time it
978          * takes to sleep on a request, on the order of a microsecond.
979          */
980
981         timeout_us += local_clock_us(&cpu);
982         do {
983                 if (__i915_gem_request_completed(req))
984                         return true;
985
986                 if (signal_pending_state(state, current))
987                         break;
988
989                 if (busywait_stop(timeout_us, cpu))
990                         break;
991
992                 cpu_relax();
993         } while (!need_resched());
994
995         return false;
996 }
997
998 static long
999 __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
1000                                 unsigned int flags,
1001                                 long timeout)
1002 {
1003         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1004                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1005         wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
1006         DEFINE_WAIT(reset);
1007         DEFINE_WAIT(wait);
1008
1009         if (flags & I915_WAIT_LOCKED)
1010                 add_wait_queue(q, &reset);
1011
1012         do {
1013                 prepare_to_wait(&request->execute.wait, &wait, state);
1014
1015                 if (i915_sw_fence_done(&request->execute))
1016                         break;
1017
1018                 if (flags & I915_WAIT_LOCKED &&
1019                     i915_reset_in_progress(&request->i915->gpu_error)) {
1020                         __set_current_state(TASK_RUNNING);
1021                         i915_reset(request->i915);
1022                         reset_wait_queue(q, &reset);
1023                         continue;
1024                 }
1025
1026                 if (signal_pending_state(state, current)) {
1027                         timeout = -ERESTARTSYS;
1028                         break;
1029                 }
1030
1031                 if (!timeout) {
1032                         timeout = -ETIME;
1033                         break;
1034                 }
1035
1036                 timeout = io_schedule_timeout(timeout);
1037         } while (1);
1038         finish_wait(&request->execute.wait, &wait);
1039
1040         if (flags & I915_WAIT_LOCKED)
1041                 remove_wait_queue(q, &reset);
1042
1043         return timeout;
1044 }
1045
1046 /**
1047  * i915_wait_request - wait until execution of request has finished
1048  * @req: the request to wait upon
1049  * @flags: how to wait
1050  * @timeout: how long to wait in jiffies
1051  *
1052  * i915_wait_request() waits for the request to be completed, for a
1053  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1054  * unbounded wait).
1055  *
1056  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1057  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1058  * must not specify that the wait is locked.
1059  *
1060  * Returns the remaining time (in jiffies) if the request completed, which may
1061  * be zero or -ETIME if the request is unfinished after the timeout expires.
1062  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1063  * pending before the request completes.
1064  */
1065 long i915_wait_request(struct drm_i915_gem_request *req,
1066                        unsigned int flags,
1067                        long timeout)
1068 {
1069         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1070                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1071         DEFINE_WAIT(reset);
1072         struct intel_wait wait;
1073
1074         might_sleep();
1075 #if IS_ENABLED(CONFIG_LOCKDEP)
1076         GEM_BUG_ON(debug_locks &&
1077                    !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
1078                    !!(flags & I915_WAIT_LOCKED));
1079 #endif
1080         GEM_BUG_ON(timeout < 0);
1081
1082         if (i915_gem_request_completed(req))
1083                 return timeout;
1084
1085         if (!timeout)
1086                 return -ETIME;
1087
1088         trace_i915_gem_request_wait_begin(req);
1089
1090         if (!i915_sw_fence_done(&req->execute)) {
1091                 timeout = __i915_request_wait_for_execute(req, flags, timeout);
1092                 if (timeout < 0)
1093                         goto complete;
1094
1095                 GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
1096         }
1097         GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
1098         GEM_BUG_ON(!req->global_seqno);
1099
1100         /* Optimistic short spin before touching IRQs */
1101         if (i915_spin_request(req, state, 5))
1102                 goto complete;
1103
1104         set_current_state(state);
1105         if (flags & I915_WAIT_LOCKED)
1106                 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1107
1108         intel_wait_init(&wait, req->global_seqno);
1109         if (intel_engine_add_wait(req->engine, &wait))
1110                 /* In order to check that we haven't missed the interrupt
1111                  * as we enabled it, we need to kick ourselves to do a
1112                  * coherent check on the seqno before we sleep.
1113                  */
1114                 goto wakeup;
1115
1116         for (;;) {
1117                 if (signal_pending_state(state, current)) {
1118                         timeout = -ERESTARTSYS;
1119                         break;
1120                 }
1121
1122                 if (!timeout) {
1123                         timeout = -ETIME;
1124                         break;
1125                 }
1126
1127                 timeout = io_schedule_timeout(timeout);
1128
1129                 if (intel_wait_complete(&wait))
1130                         break;
1131
1132                 set_current_state(state);
1133
1134 wakeup:
1135                 /* Carefully check if the request is complete, giving time
1136                  * for the seqno to be visible following the interrupt.
1137                  * We also have to check in case we are kicked by the GPU
1138                  * reset in order to drop the struct_mutex.
1139                  */
1140                 if (__i915_request_irq_complete(req))
1141                         break;
1142
1143                 /* If the GPU is hung, and we hold the lock, reset the GPU
1144                  * and then check for completion. On a full reset, the engine's
1145                  * HW seqno will be advanced passed us and we are complete.
1146                  * If we do a partial reset, we have to wait for the GPU to
1147                  * resume and update the breadcrumb.
1148                  *
1149                  * If we don't hold the mutex, we can just wait for the worker
1150                  * to come along and update the breadcrumb (either directly
1151                  * itself, or indirectly by recovering the GPU).
1152                  */
1153                 if (flags & I915_WAIT_LOCKED &&
1154                     i915_reset_in_progress(&req->i915->gpu_error)) {
1155                         __set_current_state(TASK_RUNNING);
1156                         i915_reset(req->i915);
1157                         reset_wait_queue(&req->i915->gpu_error.wait_queue,
1158                                          &reset);
1159                         continue;
1160                 }
1161
1162                 /* Only spin if we know the GPU is processing this request */
1163                 if (i915_spin_request(req, state, 2))
1164                         break;
1165         }
1166
1167         intel_engine_remove_wait(req->engine, &wait);
1168         if (flags & I915_WAIT_LOCKED)
1169                 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1170         __set_current_state(TASK_RUNNING);
1171
1172 complete:
1173         trace_i915_gem_request_wait_end(req);
1174
1175         return timeout;
1176 }
1177
1178 static void engine_retire_requests(struct intel_engine_cs *engine)
1179 {
1180         struct drm_i915_gem_request *request, *next;
1181
1182         list_for_each_entry_safe(request, next,
1183                                  &engine->timeline->requests, link) {
1184                 if (!__i915_gem_request_completed(request))
1185                         return;
1186
1187                 i915_gem_request_retire(request);
1188         }
1189 }
1190
1191 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
1192 {
1193         struct intel_engine_cs *engine;
1194         enum intel_engine_id id;
1195
1196         lockdep_assert_held(&dev_priv->drm.struct_mutex);
1197
1198         if (!dev_priv->gt.active_requests)
1199                 return;
1200
1201         for_each_engine(engine, dev_priv, id)
1202                 engine_retire_requests(engine);
1203 }