2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/ktime.h>
31 #include <linux/hrtimer.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_ramht.h"
35 #include "nouveau_software.h"
36 #include "nouveau_dma.h"
38 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
39 #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
41 struct nouveau_fence {
42 struct nouveau_channel *channel;
44 struct list_head entry;
48 unsigned long timeout;
50 void (*work)(void *priv, bool signalled);
54 struct nouveau_semaphore {
56 struct drm_device *dev;
57 struct drm_mm_node *mem;
60 static inline struct nouveau_fence *
61 nouveau_fence(void *sync_obj)
63 return (struct nouveau_fence *)sync_obj;
67 nouveau_fence_del(struct kref *ref)
69 struct nouveau_fence *fence =
70 container_of(ref, struct nouveau_fence, refcount);
72 nouveau_channel_ref(NULL, &fence->channel);
77 nouveau_fence_update(struct nouveau_channel *chan)
79 struct drm_device *dev = chan->dev;
80 struct nouveau_fence *tmp, *fence;
83 spin_lock(&chan->fence.lock);
85 /* Fetch the last sequence if the channel is still up and running */
86 if (likely(!list_empty(&chan->fence.pending))) {
88 sequence = nvchan_rd32(chan, 0x48);
90 sequence = atomic_read(&chan->fence.last_sequence_irq);
92 if (chan->fence.sequence_ack == sequence)
94 chan->fence.sequence_ack = sequence;
97 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
98 if (fence->sequence > chan->fence.sequence_ack)
101 fence->signalled = true;
102 list_del(&fence->entry);
104 fence->work(fence->priv, true);
106 kref_put(&fence->refcount, nouveau_fence_del);
110 spin_unlock(&chan->fence.lock);
114 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
117 struct nouveau_fence *fence;
120 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
123 kref_init(&fence->refcount);
124 nouveau_channel_ref(chan, &fence->channel);
127 ret = nouveau_fence_emit(fence);
130 nouveau_fence_unref(&fence);
135 struct nouveau_channel *
136 nouveau_fence_channel(struct nouveau_fence *fence)
138 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
142 nouveau_fence_emit(struct nouveau_fence *fence)
144 struct nouveau_channel *chan = fence->channel;
145 struct drm_device *dev = chan->dev;
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 ret = RING_SPACE(chan, 2);
153 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
154 nouveau_fence_update(chan);
156 BUG_ON(chan->fence.sequence ==
157 chan->fence.sequence_ack - 1);
160 fence->sequence = ++chan->fence.sequence;
162 kref_get(&fence->refcount);
163 spin_lock(&chan->fence.lock);
164 list_add_tail(&fence->entry, &chan->fence.pending);
165 spin_unlock(&chan->fence.lock);
167 if (USE_REFCNT(dev)) {
168 if (dev_priv->card_type < NV_C0)
169 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
171 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
173 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
175 OUT_RING (chan, fence->sequence);
177 fence->timeout = jiffies + 3 * DRM_HZ;
183 nouveau_fence_work(struct nouveau_fence *fence,
184 void (*work)(void *priv, bool signalled),
189 spin_lock(&fence->channel->fence.lock);
191 if (fence->signalled) {
198 spin_unlock(&fence->channel->fence.lock);
202 nouveau_fence_unref(struct nouveau_fence **pfence)
205 kref_put(&(*pfence)->refcount, nouveau_fence_del);
209 struct nouveau_fence *
210 nouveau_fence_ref(struct nouveau_fence *fence)
212 kref_get(&fence->refcount);
217 nouveau_fence_signalled(struct nouveau_fence *fence)
219 struct nouveau_channel *chan = fence->channel;
221 if (fence->signalled)
224 nouveau_fence_update(chan);
225 return fence->signalled;
229 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
231 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
235 while (!nouveau_fence_signalled(fence)) {
236 if (time_after_eq(jiffies, fence->timeout)) {
241 __set_current_state(intr ? TASK_INTERRUPTIBLE :
242 TASK_UNINTERRUPTIBLE);
244 t = ktime_set(0, sleep_time);
245 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
247 if (sleep_time > NSEC_PER_MSEC)
248 sleep_time = NSEC_PER_MSEC;
251 if (intr && signal_pending(current)) {
257 __set_current_state(TASK_RUNNING);
262 static struct nouveau_semaphore *
263 semaphore_alloc(struct drm_device *dev)
265 struct drm_nouveau_private *dev_priv = dev->dev_private;
266 struct nouveau_semaphore *sema;
267 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
273 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
277 ret = drm_mm_pre_get(&dev_priv->fence.heap);
281 spin_lock(&dev_priv->fence.lock);
282 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
284 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
285 spin_unlock(&dev_priv->fence.lock);
290 kref_init(&sema->ref);
292 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
293 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
302 semaphore_free(struct kref *ref)
304 struct nouveau_semaphore *sema =
305 container_of(ref, struct nouveau_semaphore, ref);
306 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
308 spin_lock(&dev_priv->fence.lock);
309 drm_mm_put_block(sema->mem);
310 spin_unlock(&dev_priv->fence.lock);
316 semaphore_work(void *priv, bool signalled)
318 struct nouveau_semaphore *sema = priv;
319 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
321 if (unlikely(!signalled))
322 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
324 kref_put(&sema->ref, semaphore_free);
328 semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
330 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
331 struct nouveau_fence *fence = NULL;
332 u64 offset = chan->fence.vma.offset + sema->mem->start;
335 if (dev_priv->chipset < 0x84) {
336 ret = RING_SPACE(chan, 4);
340 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
341 OUT_RING (chan, NvSema);
342 OUT_RING (chan, offset);
345 if (dev_priv->chipset < 0xc0) {
346 ret = RING_SPACE(chan, 7);
350 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
351 OUT_RING (chan, chan->vram_handle);
352 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
353 OUT_RING (chan, upper_32_bits(offset));
354 OUT_RING (chan, lower_32_bits(offset));
356 OUT_RING (chan, 1); /* ACQUIRE_EQ */
358 ret = RING_SPACE(chan, 5);
362 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
363 OUT_RING (chan, upper_32_bits(offset));
364 OUT_RING (chan, lower_32_bits(offset));
366 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
369 /* Delay semaphore destruction until its work is done */
370 ret = nouveau_fence_new(chan, &fence, true);
374 kref_get(&sema->ref);
375 nouveau_fence_work(fence, semaphore_work, sema);
376 nouveau_fence_unref(&fence);
381 semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
383 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
384 struct nouveau_fence *fence = NULL;
385 u64 offset = chan->fence.vma.offset + sema->mem->start;
388 if (dev_priv->chipset < 0x84) {
389 ret = RING_SPACE(chan, 5);
393 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
394 OUT_RING (chan, NvSema);
395 OUT_RING (chan, offset);
396 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
399 if (dev_priv->chipset < 0xc0) {
400 ret = RING_SPACE(chan, 7);
404 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
405 OUT_RING (chan, chan->vram_handle);
406 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
407 OUT_RING (chan, upper_32_bits(offset));
408 OUT_RING (chan, lower_32_bits(offset));
410 OUT_RING (chan, 2); /* RELEASE */
412 ret = RING_SPACE(chan, 5);
416 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
417 OUT_RING (chan, upper_32_bits(offset));
418 OUT_RING (chan, lower_32_bits(offset));
420 OUT_RING (chan, 0x1002); /* RELEASE */
423 /* Delay semaphore destruction until its work is done */
424 ret = nouveau_fence_new(chan, &fence, true);
428 kref_get(&sema->ref);
429 nouveau_fence_work(fence, semaphore_work, sema);
430 nouveau_fence_unref(&fence);
435 nouveau_fence_sync(struct nouveau_fence *fence,
436 struct nouveau_channel *wchan)
438 struct nouveau_channel *chan = nouveau_fence_channel(fence);
439 struct drm_device *dev = wchan->dev;
440 struct nouveau_semaphore *sema;
443 if (likely(!chan || chan == wchan ||
444 nouveau_fence_signalled(fence)))
447 sema = semaphore_alloc(dev);
449 /* Early card or broken userspace, fall back to
451 ret = nouveau_fence_wait(fence, true, false);
455 /* try to take chan's mutex, if we can't take it right away
456 * we have to fallback to software sync to prevent locking
459 if (!mutex_trylock(&chan->mutex)) {
460 ret = nouveau_fence_wait(fence, true, false);
464 /* Make wchan wait until it gets signalled */
465 ret = semaphore_acquire(wchan, sema);
469 /* Signal the semaphore from chan */
470 ret = semaphore_release(chan, sema);
473 mutex_unlock(&chan->mutex);
475 kref_put(&sema->ref, semaphore_free);
478 nouveau_channel_put_unlocked(&chan);
483 __nouveau_fence_flush(void *sync_obj, void *sync_arg)
489 nouveau_fence_channel_init(struct nouveau_channel *chan)
491 struct drm_device *dev = chan->dev;
492 struct drm_nouveau_private *dev_priv = dev->dev_private;
493 struct nouveau_gpuobj *obj = NULL;
496 if (dev_priv->card_type < NV_C0) {
497 ret = RING_SPACE(chan, 2);
501 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
502 OUT_RING (chan, NvSw);
506 /* Setup area of memory shared between all channels for x-chan sync */
507 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
508 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
510 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
511 mem->start << PAGE_SHIFT,
512 mem->size, NV_MEM_ACCESS_RW,
513 NV_MEM_TARGET_VRAM, &obj);
517 ret = nouveau_ramht_insert(chan, NvSema, obj);
518 nouveau_gpuobj_ref(NULL, &obj);
523 /* map fence bo into channel's vm */
524 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
530 atomic_set(&chan->fence.last_sequence_irq, 0);
535 nouveau_fence_channel_fini(struct nouveau_channel *chan)
537 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
538 struct nouveau_fence *tmp, *fence;
540 spin_lock(&chan->fence.lock);
541 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
542 fence->signalled = true;
543 list_del(&fence->entry);
545 if (unlikely(fence->work))
546 fence->work(fence->priv, false);
548 kref_put(&fence->refcount, nouveau_fence_del);
550 spin_unlock(&chan->fence.lock);
552 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
556 nouveau_fence_init(struct drm_device *dev)
558 struct drm_nouveau_private *dev_priv = dev->dev_private;
559 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
562 /* Create a shared VRAM heap for cross-channel sync. */
564 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
565 0, 0, NULL, &dev_priv->fence.bo);
569 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
573 ret = nouveau_bo_map(dev_priv->fence.bo);
577 ret = drm_mm_init(&dev_priv->fence.heap, 0,
578 dev_priv->fence.bo->bo.mem.size);
582 spin_lock_init(&dev_priv->fence.lock);
587 nouveau_bo_unmap(dev_priv->fence.bo);
588 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
593 nouveau_fence_fini(struct drm_device *dev)
595 struct drm_nouveau_private *dev_priv = dev->dev_private;
598 drm_mm_takedown(&dev_priv->fence.heap);
599 nouveau_bo_unmap(dev_priv->fence.bo);
600 nouveau_bo_unpin(dev_priv->fence.bo);
601 nouveau_bo_ref(NULL, &dev_priv->fence.bo);