2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/ktime.h>
31 #include <linux/hrtimer.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_ramht.h"
35 #include "nouveau_fence.h"
36 #include "nouveau_software.h"
37 #include "nouveau_dma.h"
39 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
40 #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
43 nouveau_fence_update(struct nouveau_channel *chan)
45 struct drm_device *dev = chan->dev;
46 struct nouveau_fence *tmp, *fence;
49 spin_lock(&chan->fence.lock);
51 /* Fetch the last sequence if the channel is still up and running */
52 if (likely(!list_empty(&chan->fence.pending))) {
54 sequence = nvchan_rd32(chan, 0x48);
56 sequence = atomic_read(&chan->fence.last_sequence_irq);
58 if (chan->fence.sequence_ack == sequence)
60 chan->fence.sequence_ack = sequence;
63 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
64 if (fence->sequence > chan->fence.sequence_ack)
67 fence->channel = NULL;
68 list_del(&fence->head);
70 fence->work(fence->priv, true);
72 nouveau_fence_unref(&fence);
76 spin_unlock(&chan->fence.lock);
80 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
82 struct drm_device *dev = chan->dev;
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 ret = RING_SPACE(chan, 2);
90 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
91 nouveau_fence_update(chan);
93 BUG_ON(chan->fence.sequence ==
94 chan->fence.sequence_ack - 1);
97 fence->sequence = ++chan->fence.sequence;
98 fence->channel = chan;
100 kref_get(&fence->kref);
101 spin_lock(&chan->fence.lock);
102 list_add_tail(&fence->head, &chan->fence.pending);
103 spin_unlock(&chan->fence.lock);
105 if (USE_REFCNT(dev)) {
106 if (dev_priv->card_type < NV_C0)
107 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
109 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
111 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
113 OUT_RING (chan, fence->sequence);
115 fence->timeout = jiffies + 3 * DRM_HZ;
121 nouveau_fence_done(struct nouveau_fence *fence)
124 nouveau_fence_update(fence->channel);
125 return !fence->channel;
129 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
131 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
135 while (!nouveau_fence_done(fence)) {
136 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
141 __set_current_state(intr ? TASK_INTERRUPTIBLE :
142 TASK_UNINTERRUPTIBLE);
144 t = ktime_set(0, sleep_time);
145 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
147 if (sleep_time > NSEC_PER_MSEC)
148 sleep_time = NSEC_PER_MSEC;
151 if (intr && signal_pending(current)) {
157 __set_current_state(TASK_RUNNING);
162 nouveau_fence_del(struct kref *kref)
164 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
169 nouveau_fence_unref(struct nouveau_fence **pfence)
172 kref_put(&(*pfence)->kref, nouveau_fence_del);
176 struct nouveau_fence *
177 nouveau_fence_ref(struct nouveau_fence *fence)
179 kref_get(&fence->kref);
184 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
186 struct nouveau_fence *fence;
189 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
192 kref_init(&fence->kref);
195 ret = nouveau_fence_emit(fence, chan);
197 nouveau_fence_unref(&fence);
204 struct nouveau_semaphore {
206 struct drm_device *dev;
207 struct drm_mm_node *mem;
211 nouveau_fence_work(struct nouveau_fence *fence,
212 void (*work)(void *priv, bool signalled),
215 if (!fence->channel) {
223 static struct nouveau_semaphore *
224 semaphore_alloc(struct drm_device *dev)
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_semaphore *sema;
228 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
234 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
238 ret = drm_mm_pre_get(&dev_priv->fence.heap);
242 spin_lock(&dev_priv->fence.lock);
243 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
245 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
246 spin_unlock(&dev_priv->fence.lock);
251 kref_init(&sema->ref);
253 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
254 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
263 semaphore_free(struct kref *ref)
265 struct nouveau_semaphore *sema =
266 container_of(ref, struct nouveau_semaphore, ref);
267 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
269 spin_lock(&dev_priv->fence.lock);
270 drm_mm_put_block(sema->mem);
271 spin_unlock(&dev_priv->fence.lock);
277 semaphore_work(void *priv, bool signalled)
279 struct nouveau_semaphore *sema = priv;
280 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
282 if (unlikely(!signalled))
283 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
285 kref_put(&sema->ref, semaphore_free);
289 semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
291 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
292 struct nouveau_fence *fence = NULL;
293 u64 offset = chan->fence.vma.offset + sema->mem->start;
296 if (dev_priv->chipset < 0x84) {
297 ret = RING_SPACE(chan, 4);
301 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
302 OUT_RING (chan, NvSema);
303 OUT_RING (chan, offset);
306 if (dev_priv->chipset < 0xc0) {
307 ret = RING_SPACE(chan, 7);
311 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
312 OUT_RING (chan, chan->vram_handle);
313 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
314 OUT_RING (chan, upper_32_bits(offset));
315 OUT_RING (chan, lower_32_bits(offset));
317 OUT_RING (chan, 1); /* ACQUIRE_EQ */
319 ret = RING_SPACE(chan, 5);
323 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
324 OUT_RING (chan, upper_32_bits(offset));
325 OUT_RING (chan, lower_32_bits(offset));
327 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
330 /* Delay semaphore destruction until its work is done */
331 ret = nouveau_fence_new(chan, &fence);
335 kref_get(&sema->ref);
336 nouveau_fence_work(fence, semaphore_work, sema);
337 nouveau_fence_unref(&fence);
342 semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
344 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
345 struct nouveau_fence *fence = NULL;
346 u64 offset = chan->fence.vma.offset + sema->mem->start;
349 if (dev_priv->chipset < 0x84) {
350 ret = RING_SPACE(chan, 5);
354 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
355 OUT_RING (chan, NvSema);
356 OUT_RING (chan, offset);
357 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
360 if (dev_priv->chipset < 0xc0) {
361 ret = RING_SPACE(chan, 7);
365 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
366 OUT_RING (chan, chan->vram_handle);
367 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
368 OUT_RING (chan, upper_32_bits(offset));
369 OUT_RING (chan, lower_32_bits(offset));
371 OUT_RING (chan, 2); /* RELEASE */
373 ret = RING_SPACE(chan, 5);
377 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
378 OUT_RING (chan, upper_32_bits(offset));
379 OUT_RING (chan, lower_32_bits(offset));
381 OUT_RING (chan, 0x1002); /* RELEASE */
384 /* Delay semaphore destruction until its work is done */
385 ret = nouveau_fence_new(chan, &fence);
389 kref_get(&sema->ref);
390 nouveau_fence_work(fence, semaphore_work, sema);
391 nouveau_fence_unref(&fence);
396 nouveau_fence_sync(struct nouveau_fence *fence,
397 struct nouveau_channel *wchan)
399 struct nouveau_channel *chan;
400 struct drm_device *dev = wchan->dev;
401 struct nouveau_semaphore *sema;
404 chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
405 if (likely(!chan || chan == wchan || nouveau_fence_done(fence)))
408 sema = semaphore_alloc(dev);
410 /* Early card or broken userspace, fall back to
412 ret = nouveau_fence_wait(fence, true, false);
416 /* try to take chan's mutex, if we can't take it right away
417 * we have to fallback to software sync to prevent locking
420 if (!mutex_trylock(&chan->mutex)) {
421 ret = nouveau_fence_wait(fence, true, false);
425 /* Make wchan wait until it gets signalled */
426 ret = semaphore_acquire(wchan, sema);
430 /* Signal the semaphore from chan */
431 ret = semaphore_release(chan, sema);
434 mutex_unlock(&chan->mutex);
436 kref_put(&sema->ref, semaphore_free);
439 nouveau_channel_put_unlocked(&chan);
444 nouveau_fence_channel_init(struct nouveau_channel *chan)
446 struct drm_device *dev = chan->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 struct nouveau_gpuobj *obj = NULL;
451 if (dev_priv->card_type < NV_C0) {
452 ret = RING_SPACE(chan, 2);
456 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
457 OUT_RING (chan, NvSw);
461 /* Setup area of memory shared between all channels for x-chan sync */
462 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
463 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
465 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
466 mem->start << PAGE_SHIFT,
467 mem->size, NV_MEM_ACCESS_RW,
468 NV_MEM_TARGET_VRAM, &obj);
472 ret = nouveau_ramht_insert(chan, NvSema, obj);
473 nouveau_gpuobj_ref(NULL, &obj);
478 /* map fence bo into channel's vm */
479 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
485 atomic_set(&chan->fence.last_sequence_irq, 0);
490 nouveau_fence_channel_fini(struct nouveau_channel *chan)
492 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
493 struct nouveau_fence *tmp, *fence;
495 spin_lock(&chan->fence.lock);
496 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
497 fence->channel = NULL;
498 list_del(&fence->head);
500 if (unlikely(fence->work))
501 fence->work(fence->priv, false);
503 kref_put(&fence->kref, nouveau_fence_del);
505 spin_unlock(&chan->fence.lock);
507 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
511 nouveau_fence_init(struct drm_device *dev)
513 struct drm_nouveau_private *dev_priv = dev->dev_private;
514 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
517 /* Create a shared VRAM heap for cross-channel sync. */
519 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
520 0, 0, NULL, &dev_priv->fence.bo);
524 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
528 ret = nouveau_bo_map(dev_priv->fence.bo);
532 ret = drm_mm_init(&dev_priv->fence.heap, 0,
533 dev_priv->fence.bo->bo.mem.size);
537 spin_lock_init(&dev_priv->fence.lock);
542 nouveau_bo_unmap(dev_priv->fence.bo);
543 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
548 nouveau_fence_fini(struct drm_device *dev)
550 struct drm_nouveau_private *dev_priv = dev->dev_private;
553 drm_mm_takedown(&dev_priv->fence.heap);
554 nouveau_bo_unmap(dev_priv->fence.bo);
555 nouveau_bo_unpin(dev_priv->fence.bo);
556 nouveau_bo_ref(NULL, &dev_priv->fence.bo);