]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_sw_fence.c
Merge tag 'rtc-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_sw_fence.c
1 /*
2  * (C) Copyright 2016 Intel Corporation
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; version 2
7  * of the License.
8  */
9
10 #include <linux/slab.h>
11 #include <linux/dma-fence.h>
12 #include <linux/reservation.h>
13
14 #include "i915_sw_fence.h"
15
16 #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
17
18 static DEFINE_SPINLOCK(i915_sw_fence_lock);
19
20 static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
21                                   enum i915_sw_fence_notify state)
22 {
23         i915_sw_fence_notify_t fn;
24
25         fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
26         return fn(fence, state);
27 }
28
29 static void i915_sw_fence_free(struct kref *kref)
30 {
31         struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
32
33         WARN_ON(atomic_read(&fence->pending) > 0);
34
35         if (fence->flags & I915_SW_FENCE_MASK)
36                 __i915_sw_fence_notify(fence, FENCE_FREE);
37         else
38                 kfree(fence);
39 }
40
41 static void i915_sw_fence_put(struct i915_sw_fence *fence)
42 {
43         kref_put(&fence->kref, i915_sw_fence_free);
44 }
45
46 static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
47 {
48         kref_get(&fence->kref);
49         return fence;
50 }
51
52 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
53                                         struct list_head *continuation)
54 {
55         wait_queue_head_t *x = &fence->wait;
56         wait_queue_t *pos, *next;
57         unsigned long flags;
58
59         atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
60
61         /*
62          * To prevent unbounded recursion as we traverse the graph of
63          * i915_sw_fences, we move the task_list from this, the next ready
64          * fence, to the tail of the original fence's task_list
65          * (and so added to the list to be woken).
66          */
67
68         spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
69         if (continuation) {
70                 list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
71                         if (pos->func == autoremove_wake_function)
72                                 pos->func(pos, TASK_NORMAL, 0, continuation);
73                         else
74                                 list_move_tail(&pos->task_list, continuation);
75                 }
76         } else {
77                 LIST_HEAD(extra);
78
79                 do {
80                         list_for_each_entry_safe(pos, next,
81                                                  &x->task_list, task_list)
82                                 pos->func(pos, TASK_NORMAL, 0, &extra);
83
84                         if (list_empty(&extra))
85                                 break;
86
87                         list_splice_tail_init(&extra, &x->task_list);
88                 } while (1);
89         }
90         spin_unlock_irqrestore(&x->lock, flags);
91 }
92
93 static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
94                                      struct list_head *continuation)
95 {
96         if (!atomic_dec_and_test(&fence->pending))
97                 return;
98
99         if (fence->flags & I915_SW_FENCE_MASK &&
100             __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
101                 return;
102
103         __i915_sw_fence_wake_up_all(fence, continuation);
104 }
105
106 static void i915_sw_fence_complete(struct i915_sw_fence *fence)
107 {
108         if (WARN_ON(i915_sw_fence_done(fence)))
109                 return;
110
111         __i915_sw_fence_complete(fence, NULL);
112 }
113
114 static void i915_sw_fence_await(struct i915_sw_fence *fence)
115 {
116         WARN_ON(atomic_inc_return(&fence->pending) <= 1);
117 }
118
119 void __i915_sw_fence_init(struct i915_sw_fence *fence,
120                           i915_sw_fence_notify_t fn,
121                           const char *name,
122                           struct lock_class_key *key)
123 {
124         BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
125
126         __init_waitqueue_head(&fence->wait, name, key);
127         kref_init(&fence->kref);
128         atomic_set(&fence->pending, 1);
129         fence->flags = (unsigned long)fn;
130 }
131
132 void i915_sw_fence_commit(struct i915_sw_fence *fence)
133 {
134         i915_sw_fence_complete(fence);
135         i915_sw_fence_put(fence);
136 }
137
138 static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
139 {
140         list_del(&wq->task_list);
141         __i915_sw_fence_complete(wq->private, key);
142         i915_sw_fence_put(wq->private);
143         if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
144                 kfree(wq);
145         return 0;
146 }
147
148 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
149                                     const struct i915_sw_fence * const signaler)
150 {
151         wait_queue_t *wq;
152
153         if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
154                 return false;
155
156         if (fence == signaler)
157                 return true;
158
159         list_for_each_entry(wq, &fence->wait.task_list, task_list) {
160                 if (wq->func != i915_sw_fence_wake)
161                         continue;
162
163                 if (__i915_sw_fence_check_if_after(wq->private, signaler))
164                         return true;
165         }
166
167         return false;
168 }
169
170 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
171 {
172         wait_queue_t *wq;
173
174         if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
175                 return;
176
177         list_for_each_entry(wq, &fence->wait.task_list, task_list) {
178                 if (wq->func != i915_sw_fence_wake)
179                         continue;
180
181                 __i915_sw_fence_clear_checked_bit(wq->private);
182         }
183 }
184
185 static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
186                                   const struct i915_sw_fence * const signaler)
187 {
188         unsigned long flags;
189         bool err;
190
191         if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
192                 return false;
193
194         spin_lock_irqsave(&i915_sw_fence_lock, flags);
195         err = __i915_sw_fence_check_if_after(fence, signaler);
196         __i915_sw_fence_clear_checked_bit(fence);
197         spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
198
199         return err;
200 }
201
202 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
203                                           struct i915_sw_fence *signaler,
204                                           wait_queue_t *wq, gfp_t gfp)
205 {
206         unsigned long flags;
207         int pending;
208
209         if (i915_sw_fence_done(signaler))
210                 return 0;
211
212         /* The dependency graph must be acyclic. */
213         if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
214                 return -EINVAL;
215
216         pending = 0;
217         if (!wq) {
218                 wq = kmalloc(sizeof(*wq), gfp);
219                 if (!wq) {
220                         if (!gfpflags_allow_blocking(gfp))
221                                 return -ENOMEM;
222
223                         i915_sw_fence_wait(signaler);
224                         return 0;
225                 }
226
227                 pending |= I915_SW_FENCE_FLAG_ALLOC;
228         }
229
230         INIT_LIST_HEAD(&wq->task_list);
231         wq->flags = pending;
232         wq->func = i915_sw_fence_wake;
233         wq->private = i915_sw_fence_get(fence);
234
235         i915_sw_fence_await(fence);
236
237         spin_lock_irqsave(&signaler->wait.lock, flags);
238         if (likely(!i915_sw_fence_done(signaler))) {
239                 __add_wait_queue_tail(&signaler->wait, wq);
240                 pending = 1;
241         } else {
242                 i915_sw_fence_wake(wq, 0, 0, NULL);
243                 pending = 0;
244         }
245         spin_unlock_irqrestore(&signaler->wait.lock, flags);
246
247         return pending;
248 }
249
250 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
251                                  struct i915_sw_fence *signaler,
252                                  wait_queue_t *wq)
253 {
254         return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
255 }
256
257 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
258                                      struct i915_sw_fence *signaler,
259                                      gfp_t gfp)
260 {
261         return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
262 }
263
264 struct i915_sw_dma_fence_cb {
265         struct dma_fence_cb base;
266         struct i915_sw_fence *fence;
267         struct dma_fence *dma;
268         struct timer_list timer;
269 };
270
271 static void timer_i915_sw_fence_wake(unsigned long data)
272 {
273         struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
274
275         printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
276                cb->dma->ops->get_driver_name(cb->dma),
277                cb->dma->ops->get_timeline_name(cb->dma),
278                cb->dma->seqno);
279         dma_fence_put(cb->dma);
280         cb->dma = NULL;
281
282         i915_sw_fence_commit(cb->fence);
283         cb->timer.function = NULL;
284 }
285
286 static void dma_i915_sw_fence_wake(struct dma_fence *dma,
287                                    struct dma_fence_cb *data)
288 {
289         struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
290
291         del_timer_sync(&cb->timer);
292         if (cb->timer.function)
293                 i915_sw_fence_commit(cb->fence);
294         dma_fence_put(cb->dma);
295
296         kfree(cb);
297 }
298
299 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
300                                   struct dma_fence *dma,
301                                   unsigned long timeout,
302                                   gfp_t gfp)
303 {
304         struct i915_sw_dma_fence_cb *cb;
305         int ret;
306
307         if (dma_fence_is_signaled(dma))
308                 return 0;
309
310         cb = kmalloc(sizeof(*cb), gfp);
311         if (!cb) {
312                 if (!gfpflags_allow_blocking(gfp))
313                         return -ENOMEM;
314
315                 return dma_fence_wait(dma, false);
316         }
317
318         cb->fence = i915_sw_fence_get(fence);
319         i915_sw_fence_await(fence);
320
321         cb->dma = NULL;
322         __setup_timer(&cb->timer,
323                       timer_i915_sw_fence_wake, (unsigned long)cb,
324                       TIMER_IRQSAFE);
325         if (timeout) {
326                 cb->dma = dma_fence_get(dma);
327                 mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
328         }
329
330         ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
331         if (ret == 0) {
332                 ret = 1;
333         } else {
334                 dma_i915_sw_fence_wake(dma, &cb->base);
335                 if (ret == -ENOENT) /* fence already signaled */
336                         ret = 0;
337         }
338
339         return ret;
340 }
341
342 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
343                                     struct reservation_object *resv,
344                                     const struct dma_fence_ops *exclude,
345                                     bool write,
346                                     unsigned long timeout,
347                                     gfp_t gfp)
348 {
349         struct dma_fence *excl;
350         int ret = 0, pending;
351
352         if (write) {
353                 struct dma_fence **shared;
354                 unsigned int count, i;
355
356                 ret = reservation_object_get_fences_rcu(resv,
357                                                         &excl, &count, &shared);
358                 if (ret)
359                         return ret;
360
361                 for (i = 0; i < count; i++) {
362                         if (shared[i]->ops == exclude)
363                                 continue;
364
365                         pending = i915_sw_fence_await_dma_fence(fence,
366                                                                 shared[i],
367                                                                 timeout,
368                                                                 gfp);
369                         if (pending < 0) {
370                                 ret = pending;
371                                 break;
372                         }
373
374                         ret |= pending;
375                 }
376
377                 for (i = 0; i < count; i++)
378                         dma_fence_put(shared[i]);
379                 kfree(shared);
380         } else {
381                 excl = reservation_object_get_excl_rcu(resv);
382         }
383
384         if (ret >= 0 && excl && excl->ops != exclude) {
385                 pending = i915_sw_fence_await_dma_fence(fence,
386                                                         excl,
387                                                         timeout,
388                                                         gfp);
389                 if (pending < 0)
390                         ret = pending;
391                 else
392                         ret |= pending;
393         }
394
395         dma_fence_put(excl);
396
397         return ret;
398 }