]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nouveau_fence.c
drm/nouveau/fence: minor api changes for an upcoming rework
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nouveau_fence.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29
30 #include <linux/ktime.h>
31 #include <linux/hrtimer.h>
32
33 #include "nouveau_drv.h"
34 #include "nouveau_ramht.h"
35 #include "nouveau_fence.h"
36 #include "nouveau_software.h"
37 #include "nouveau_dma.h"
38
39 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
40 #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
41
42 void
43 nouveau_fence_update(struct nouveau_channel *chan)
44 {
45         struct drm_device *dev = chan->dev;
46         struct nouveau_fence *tmp, *fence;
47         uint32_t sequence;
48
49         spin_lock(&chan->fence.lock);
50
51         /* Fetch the last sequence if the channel is still up and running */
52         if (likely(!list_empty(&chan->fence.pending))) {
53                 if (USE_REFCNT(dev))
54                         sequence = nvchan_rd32(chan, 0x48);
55                 else
56                         sequence = atomic_read(&chan->fence.last_sequence_irq);
57
58                 if (chan->fence.sequence_ack == sequence)
59                         goto out;
60                 chan->fence.sequence_ack = sequence;
61         }
62
63         list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
64                 if (fence->sequence > chan->fence.sequence_ack)
65                         break;
66
67                 fence->channel = NULL;
68                 list_del(&fence->head);
69                 if (fence->work)
70                         fence->work(fence->priv, true);
71
72                 nouveau_fence_unref(&fence);
73         }
74
75 out:
76         spin_unlock(&chan->fence.lock);
77 }
78
79 int
80 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
81 {
82         struct drm_device *dev = chan->dev;
83         struct drm_nouveau_private *dev_priv = dev->dev_private;
84         int ret;
85
86         ret = RING_SPACE(chan, 2);
87         if (ret)
88                 return ret;
89
90         if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
91                 nouveau_fence_update(chan);
92
93                 BUG_ON(chan->fence.sequence ==
94                        chan->fence.sequence_ack - 1);
95         }
96
97         fence->sequence = ++chan->fence.sequence;
98         fence->channel = chan;
99
100         kref_get(&fence->kref);
101         spin_lock(&chan->fence.lock);
102         list_add_tail(&fence->head, &chan->fence.pending);
103         spin_unlock(&chan->fence.lock);
104
105         if (USE_REFCNT(dev)) {
106                 if (dev_priv->card_type < NV_C0)
107                         BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
108                 else
109                         BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
110         } else {
111                 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
112         }
113         OUT_RING (chan, fence->sequence);
114         FIRE_RING(chan);
115         fence->timeout = jiffies + 3 * DRM_HZ;
116
117         return 0;
118 }
119
120 bool
121 nouveau_fence_done(struct nouveau_fence *fence)
122 {
123         if (fence->channel)
124                 nouveau_fence_update(fence->channel);
125         return !fence->channel;
126 }
127
128 int
129 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
130 {
131         unsigned long sleep_time = NSEC_PER_MSEC / 1000;
132         ktime_t t;
133         int ret = 0;
134
135         while (!nouveau_fence_done(fence)) {
136                 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
137                         ret = -EBUSY;
138                         break;
139                 }
140
141                 __set_current_state(intr ? TASK_INTERRUPTIBLE :
142                                            TASK_UNINTERRUPTIBLE);
143                 if (lazy) {
144                         t = ktime_set(0, sleep_time);
145                         schedule_hrtimeout(&t, HRTIMER_MODE_REL);
146                         sleep_time *= 2;
147                         if (sleep_time > NSEC_PER_MSEC)
148                                 sleep_time = NSEC_PER_MSEC;
149                 }
150
151                 if (intr && signal_pending(current)) {
152                         ret = -ERESTARTSYS;
153                         break;
154                 }
155         }
156
157         __set_current_state(TASK_RUNNING);
158         return ret;
159 }
160
161 static void
162 nouveau_fence_del(struct kref *kref)
163 {
164         struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
165         kfree(fence);
166 }
167
168 void
169 nouveau_fence_unref(struct nouveau_fence **pfence)
170 {
171         if (*pfence)
172                 kref_put(&(*pfence)->kref, nouveau_fence_del);
173         *pfence = NULL;
174 }
175
176 struct nouveau_fence *
177 nouveau_fence_ref(struct nouveau_fence *fence)
178 {
179         kref_get(&fence->kref);
180         return fence;
181 }
182
183 int
184 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
185 {
186         struct nouveau_fence *fence;
187         int ret = 0;
188
189         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
190         if (!fence)
191                 return -ENOMEM;
192         kref_init(&fence->kref);
193
194         if (chan) {
195                 ret = nouveau_fence_emit(fence, chan);
196                 if (ret)
197                         nouveau_fence_unref(&fence);
198         }
199
200         *pfence = fence;
201         return ret;
202 }
203
204 struct nouveau_semaphore {
205         struct kref ref;
206         struct drm_device *dev;
207         struct drm_mm_node *mem;
208 };
209
210 void
211 nouveau_fence_work(struct nouveau_fence *fence,
212                    void (*work)(void *priv, bool signalled),
213                    void *priv)
214 {
215         if (!fence->channel) {
216                 work(priv, true);
217         } else {
218                 fence->work = work;
219                 fence->priv = priv;
220         }
221 }
222
223 static struct nouveau_semaphore *
224 semaphore_alloc(struct drm_device *dev)
225 {
226         struct drm_nouveau_private *dev_priv = dev->dev_private;
227         struct nouveau_semaphore *sema;
228         int size = (dev_priv->chipset < 0x84) ? 4 : 16;
229         int ret, i;
230
231         if (!USE_SEMA(dev))
232                 return NULL;
233
234         sema = kmalloc(sizeof(*sema), GFP_KERNEL);
235         if (!sema)
236                 goto fail;
237
238         ret = drm_mm_pre_get(&dev_priv->fence.heap);
239         if (ret)
240                 goto fail;
241
242         spin_lock(&dev_priv->fence.lock);
243         sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
244         if (sema->mem)
245                 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
246         spin_unlock(&dev_priv->fence.lock);
247
248         if (!sema->mem)
249                 goto fail;
250
251         kref_init(&sema->ref);
252         sema->dev = dev;
253         for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
254                 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
255
256         return sema;
257 fail:
258         kfree(sema);
259         return NULL;
260 }
261
262 static void
263 semaphore_free(struct kref *ref)
264 {
265         struct nouveau_semaphore *sema =
266                 container_of(ref, struct nouveau_semaphore, ref);
267         struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
268
269         spin_lock(&dev_priv->fence.lock);
270         drm_mm_put_block(sema->mem);
271         spin_unlock(&dev_priv->fence.lock);
272
273         kfree(sema);
274 }
275
276 static void
277 semaphore_work(void *priv, bool signalled)
278 {
279         struct nouveau_semaphore *sema = priv;
280         struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
281
282         if (unlikely(!signalled))
283                 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
284
285         kref_put(&sema->ref, semaphore_free);
286 }
287
288 static int
289 semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
290 {
291         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
292         struct nouveau_fence *fence = NULL;
293         u64 offset = chan->fence.vma.offset + sema->mem->start;
294         int ret;
295
296         if (dev_priv->chipset < 0x84) {
297                 ret = RING_SPACE(chan, 4);
298                 if (ret)
299                         return ret;
300
301                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
302                 OUT_RING  (chan, NvSema);
303                 OUT_RING  (chan, offset);
304                 OUT_RING  (chan, 1);
305         } else
306         if (dev_priv->chipset < 0xc0) {
307                 ret = RING_SPACE(chan, 7);
308                 if (ret)
309                         return ret;
310
311                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
312                 OUT_RING  (chan, chan->vram_handle);
313                 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
314                 OUT_RING  (chan, upper_32_bits(offset));
315                 OUT_RING  (chan, lower_32_bits(offset));
316                 OUT_RING  (chan, 1);
317                 OUT_RING  (chan, 1); /* ACQUIRE_EQ */
318         } else {
319                 ret = RING_SPACE(chan, 5);
320                 if (ret)
321                         return ret;
322
323                 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
324                 OUT_RING  (chan, upper_32_bits(offset));
325                 OUT_RING  (chan, lower_32_bits(offset));
326                 OUT_RING  (chan, 1);
327                 OUT_RING  (chan, 0x1001); /* ACQUIRE_EQ */
328         }
329
330         /* Delay semaphore destruction until its work is done */
331         ret = nouveau_fence_new(chan, &fence);
332         if (ret)
333                 return ret;
334
335         kref_get(&sema->ref);
336         nouveau_fence_work(fence, semaphore_work, sema);
337         nouveau_fence_unref(&fence);
338         return 0;
339 }
340
341 static int
342 semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
343 {
344         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
345         struct nouveau_fence *fence = NULL;
346         u64 offset = chan->fence.vma.offset + sema->mem->start;
347         int ret;
348
349         if (dev_priv->chipset < 0x84) {
350                 ret = RING_SPACE(chan, 5);
351                 if (ret)
352                         return ret;
353
354                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
355                 OUT_RING  (chan, NvSema);
356                 OUT_RING  (chan, offset);
357                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
358                 OUT_RING  (chan, 1);
359         } else
360         if (dev_priv->chipset < 0xc0) {
361                 ret = RING_SPACE(chan, 7);
362                 if (ret)
363                         return ret;
364
365                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
366                 OUT_RING  (chan, chan->vram_handle);
367                 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
368                 OUT_RING  (chan, upper_32_bits(offset));
369                 OUT_RING  (chan, lower_32_bits(offset));
370                 OUT_RING  (chan, 1);
371                 OUT_RING  (chan, 2); /* RELEASE */
372         } else {
373                 ret = RING_SPACE(chan, 5);
374                 if (ret)
375                         return ret;
376
377                 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
378                 OUT_RING  (chan, upper_32_bits(offset));
379                 OUT_RING  (chan, lower_32_bits(offset));
380                 OUT_RING  (chan, 1);
381                 OUT_RING  (chan, 0x1002); /* RELEASE */
382         }
383
384         /* Delay semaphore destruction until its work is done */
385         ret = nouveau_fence_new(chan, &fence);
386         if (ret)
387                 return ret;
388
389         kref_get(&sema->ref);
390         nouveau_fence_work(fence, semaphore_work, sema);
391         nouveau_fence_unref(&fence);
392         return 0;
393 }
394
395 int
396 nouveau_fence_sync(struct nouveau_fence *fence,
397                    struct nouveau_channel *wchan)
398 {
399         struct nouveau_channel *chan;
400         struct drm_device *dev = wchan->dev;
401         struct nouveau_semaphore *sema;
402         int ret = 0;
403
404         chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
405         if (likely(!chan || chan == wchan || nouveau_fence_done(fence)))
406                 goto out;
407
408         sema = semaphore_alloc(dev);
409         if (!sema) {
410                 /* Early card or broken userspace, fall back to
411                  * software sync. */
412                 ret = nouveau_fence_wait(fence, true, false);
413                 goto out;
414         }
415
416         /* try to take chan's mutex, if we can't take it right away
417          * we have to fallback to software sync to prevent locking
418          * order issues
419          */
420         if (!mutex_trylock(&chan->mutex)) {
421                 ret = nouveau_fence_wait(fence, true, false);
422                 goto out_unref;
423         }
424
425         /* Make wchan wait until it gets signalled */
426         ret = semaphore_acquire(wchan, sema);
427         if (ret)
428                 goto out_unlock;
429
430         /* Signal the semaphore from chan */
431         ret = semaphore_release(chan, sema);
432
433 out_unlock:
434         mutex_unlock(&chan->mutex);
435 out_unref:
436         kref_put(&sema->ref, semaphore_free);
437 out:
438         if (chan)
439                 nouveau_channel_put_unlocked(&chan);
440         return ret;
441 }
442
443 int
444 nouveau_fence_channel_init(struct nouveau_channel *chan)
445 {
446         struct drm_device *dev = chan->dev;
447         struct drm_nouveau_private *dev_priv = dev->dev_private;
448         struct nouveau_gpuobj *obj = NULL;
449         int ret;
450
451         if (dev_priv->card_type < NV_C0) {
452                 ret = RING_SPACE(chan, 2);
453                 if (ret)
454                         return ret;
455
456                 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
457                 OUT_RING  (chan, NvSw);
458                 FIRE_RING (chan);
459         }
460
461         /* Setup area of memory shared between all channels for x-chan sync */
462         if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
463                 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
464
465                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
466                                              mem->start << PAGE_SHIFT,
467                                              mem->size, NV_MEM_ACCESS_RW,
468                                              NV_MEM_TARGET_VRAM, &obj);
469                 if (ret)
470                         return ret;
471
472                 ret = nouveau_ramht_insert(chan, NvSema, obj);
473                 nouveau_gpuobj_ref(NULL, &obj);
474                 if (ret)
475                         return ret;
476         } else
477         if (USE_SEMA(dev)) {
478                 /* map fence bo into channel's vm */
479                 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
480                                          &chan->fence.vma);
481                 if (ret)
482                         return ret;
483         }
484
485         atomic_set(&chan->fence.last_sequence_irq, 0);
486         return 0;
487 }
488
489 void
490 nouveau_fence_channel_fini(struct nouveau_channel *chan)
491 {
492         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
493         struct nouveau_fence *tmp, *fence;
494
495         spin_lock(&chan->fence.lock);
496         list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
497                 fence->channel = NULL;
498                 list_del(&fence->head);
499
500                 if (unlikely(fence->work))
501                         fence->work(fence->priv, false);
502
503                 kref_put(&fence->kref, nouveau_fence_del);
504         }
505         spin_unlock(&chan->fence.lock);
506
507         nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
508 }
509
510 int
511 nouveau_fence_init(struct drm_device *dev)
512 {
513         struct drm_nouveau_private *dev_priv = dev->dev_private;
514         int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
515         int ret;
516
517         /* Create a shared VRAM heap for cross-channel sync. */
518         if (USE_SEMA(dev)) {
519                 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
520                                      0, 0, NULL, &dev_priv->fence.bo);
521                 if (ret)
522                         return ret;
523
524                 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
525                 if (ret)
526                         goto fail;
527
528                 ret = nouveau_bo_map(dev_priv->fence.bo);
529                 if (ret)
530                         goto fail;
531
532                 ret = drm_mm_init(&dev_priv->fence.heap, 0,
533                                   dev_priv->fence.bo->bo.mem.size);
534                 if (ret)
535                         goto fail;
536
537                 spin_lock_init(&dev_priv->fence.lock);
538         }
539
540         return 0;
541 fail:
542         nouveau_bo_unmap(dev_priv->fence.bo);
543         nouveau_bo_ref(NULL, &dev_priv->fence.bo);
544         return ret;
545 }
546
547 void
548 nouveau_fence_fini(struct drm_device *dev)
549 {
550         struct drm_nouveau_private *dev_priv = dev->dev_private;
551
552         if (USE_SEMA(dev)) {
553                 drm_mm_takedown(&dev_priv->fence.heap);
554                 nouveau_bo_unmap(dev_priv->fence.bo);
555                 nouveau_bo_unpin(dev_priv->fence.bo);
556                 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
557         }
558 }