2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "nouveau_drv.h"
31 #include "nouveau_ramht.h"
32 #include "nouveau_dma.h"
34 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
35 #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
37 struct nouveau_fence {
38 struct nouveau_channel *channel;
40 struct list_head entry;
45 void (*work)(void *priv, bool signalled);
49 struct nouveau_semaphore {
51 struct drm_device *dev;
52 struct drm_mm_node *mem;
55 static inline struct nouveau_fence *
56 nouveau_fence(void *sync_obj)
58 return (struct nouveau_fence *)sync_obj;
62 nouveau_fence_del(struct kref *ref)
64 struct nouveau_fence *fence =
65 container_of(ref, struct nouveau_fence, refcount);
71 nouveau_fence_update(struct nouveau_channel *chan)
73 struct drm_device *dev = chan->dev;
74 struct nouveau_fence *tmp, *fence;
77 spin_lock(&chan->fence.lock);
80 sequence = nvchan_rd32(chan, 0x48);
82 sequence = atomic_read(&chan->fence.last_sequence_irq);
84 if (chan->fence.sequence_ack == sequence)
86 chan->fence.sequence_ack = sequence;
88 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
89 sequence = fence->sequence;
90 fence->signalled = true;
91 list_del(&fence->entry);
93 if (unlikely(fence->work))
94 fence->work(fence->priv, true);
96 kref_put(&fence->refcount, nouveau_fence_del);
98 if (sequence == chan->fence.sequence_ack)
102 spin_unlock(&chan->fence.lock);
106 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
109 struct nouveau_fence *fence;
112 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
115 kref_init(&fence->refcount);
116 fence->channel = chan;
119 ret = nouveau_fence_emit(fence);
122 nouveau_fence_unref((void *)&fence);
127 struct nouveau_channel *
128 nouveau_fence_channel(struct nouveau_fence *fence)
130 return fence ? fence->channel : NULL;
134 nouveau_fence_emit(struct nouveau_fence *fence)
136 struct nouveau_channel *chan = fence->channel;
137 struct drm_device *dev = chan->dev;
140 ret = RING_SPACE(chan, 2);
144 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
145 nouveau_fence_update(chan);
147 BUG_ON(chan->fence.sequence ==
148 chan->fence.sequence_ack - 1);
151 fence->sequence = ++chan->fence.sequence;
153 kref_get(&fence->refcount);
154 spin_lock(&chan->fence.lock);
155 list_add_tail(&fence->entry, &chan->fence.pending);
156 spin_unlock(&chan->fence.lock);
158 BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
159 OUT_RING(chan, fence->sequence);
166 nouveau_fence_work(struct nouveau_fence *fence,
167 void (*work)(void *priv, bool signalled),
172 spin_lock(&fence->channel->fence.lock);
174 if (fence->signalled) {
181 spin_unlock(&fence->channel->fence.lock);
185 nouveau_fence_unref(void **sync_obj)
187 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
190 kref_put(&fence->refcount, nouveau_fence_del);
195 nouveau_fence_ref(void *sync_obj)
197 struct nouveau_fence *fence = nouveau_fence(sync_obj);
199 kref_get(&fence->refcount);
204 nouveau_fence_signalled(void *sync_obj, void *sync_arg)
206 struct nouveau_fence *fence = nouveau_fence(sync_obj);
207 struct nouveau_channel *chan = fence->channel;
209 if (fence->signalled)
212 nouveau_fence_update(chan);
213 return fence->signalled;
217 nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
219 unsigned long timeout = jiffies + (3 * DRM_HZ);
223 if (nouveau_fence_signalled(sync_obj, sync_arg))
226 if (time_after_eq(jiffies, timeout)) {
231 __set_current_state(intr ? TASK_INTERRUPTIBLE
232 : TASK_UNINTERRUPTIBLE);
236 if (intr && signal_pending(current)) {
242 __set_current_state(TASK_RUNNING);
247 static struct nouveau_semaphore *
248 alloc_semaphore(struct drm_device *dev)
250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_semaphore *sema;
256 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
260 spin_lock(&dev_priv->fence.lock);
261 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
263 sema->mem = drm_mm_get_block(sema->mem, 4, 0);
264 spin_unlock(&dev_priv->fence.lock);
269 kref_init(&sema->ref);
271 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
280 free_semaphore(struct kref *ref)
282 struct nouveau_semaphore *sema =
283 container_of(ref, struct nouveau_semaphore, ref);
284 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
286 spin_lock(&dev_priv->fence.lock);
287 drm_mm_put_block(sema->mem);
288 spin_unlock(&dev_priv->fence.lock);
294 semaphore_work(void *priv, bool signalled)
296 struct nouveau_semaphore *sema = priv;
297 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
299 if (unlikely(!signalled))
300 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
302 kref_put(&sema->ref, free_semaphore);
306 emit_semaphore(struct nouveau_channel *chan, int method,
307 struct nouveau_semaphore *sema)
309 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
310 struct nouveau_fence *fence;
311 bool smart = (dev_priv->card_type >= NV_50);
314 ret = RING_SPACE(chan, smart ? 8 : 4);
319 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
320 OUT_RING(chan, NvSema);
322 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
323 OUT_RING(chan, sema->mem->start);
325 if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
327 * NV50 tries to be too smart and context-switch
328 * between semaphores instead of doing a "first come,
329 * first served" strategy like previous cards
332 * That's bad because the ACQUIRE latency can get as
333 * large as the PFIFO context time slice in the
334 * typical DRI2 case where you have several
335 * outstanding semaphores at the same moment.
337 * If we're going to ACQUIRE, force the card to
338 * context switch before, just in case the matching
339 * RELEASE is already scheduled to be executed in
342 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
346 BEGIN_RING(chan, NvSubSw, method, 1);
349 if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
351 * Force the card to context switch, there may be
352 * another channel waiting for the semaphore we just
355 BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
359 /* Delay semaphore destruction until its work is done */
360 ret = nouveau_fence_new(chan, &fence, true);
364 kref_get(&sema->ref);
365 nouveau_fence_work(fence, semaphore_work, sema);
366 nouveau_fence_unref((void *)&fence);
372 nouveau_fence_sync(struct nouveau_fence *fence,
373 struct nouveau_channel *wchan)
375 struct nouveau_channel *chan = nouveau_fence_channel(fence);
376 struct drm_device *dev = wchan->dev;
377 struct nouveau_semaphore *sema;
380 if (likely(!fence || chan == wchan ||
381 nouveau_fence_signalled(fence, NULL)))
384 sema = alloc_semaphore(dev);
386 /* Early card or broken userspace, fall back to
388 return nouveau_fence_wait(fence, NULL, false, false);
391 /* Make wchan wait until it gets signalled */
392 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
396 /* Signal the semaphore from chan */
397 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
399 kref_put(&sema->ref, free_semaphore);
404 nouveau_fence_flush(void *sync_obj, void *sync_arg)
410 nouveau_fence_channel_init(struct nouveau_channel *chan)
412 struct drm_device *dev = chan->dev;
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 struct nouveau_gpuobj *obj = NULL;
417 /* Create an NV_SW object for various sync purposes */
418 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
422 ret = nouveau_ramht_insert(chan, NvSw, obj);
423 nouveau_gpuobj_ref(NULL, &obj);
427 ret = RING_SPACE(chan, 2);
430 BEGIN_RING(chan, NvSubSw, 0, 1);
431 OUT_RING(chan, NvSw);
433 /* Create a DMA object for the shared cross-channel sync area. */
435 struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
437 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
438 mem->start << PAGE_SHIFT,
439 mem->size << PAGE_SHIFT,
441 NV_DMA_TARGET_VIDMEM, &obj);
445 ret = nouveau_ramht_insert(chan, NvSema, obj);
446 nouveau_gpuobj_ref(NULL, &obj);
450 ret = RING_SPACE(chan, 2);
453 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
454 OUT_RING(chan, NvSema);
459 INIT_LIST_HEAD(&chan->fence.pending);
460 spin_lock_init(&chan->fence.lock);
461 atomic_set(&chan->fence.last_sequence_irq, 0);
467 nouveau_fence_channel_fini(struct nouveau_channel *chan)
469 struct nouveau_fence *tmp, *fence;
471 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
472 fence->signalled = true;
473 list_del(&fence->entry);
475 if (unlikely(fence->work))
476 fence->work(fence->priv, false);
478 kref_put(&fence->refcount, nouveau_fence_del);
483 nouveau_fence_init(struct drm_device *dev)
485 struct drm_nouveau_private *dev_priv = dev->dev_private;
488 /* Create a shared VRAM heap for cross-channel sync. */
490 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
491 0, 0, false, true, &dev_priv->fence.bo);
495 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
499 ret = nouveau_bo_map(dev_priv->fence.bo);
503 ret = drm_mm_init(&dev_priv->fence.heap, 0,
504 dev_priv->fence.bo->bo.mem.size);
508 spin_lock_init(&dev_priv->fence.lock);
513 nouveau_bo_unmap(dev_priv->fence.bo);
514 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
519 nouveau_fence_fini(struct drm_device *dev)
521 struct drm_nouveau_private *dev_priv = dev->dev_private;
524 drm_mm_takedown(&dev_priv->fence.heap);
525 nouveau_bo_unmap(dev_priv->fence.bo);
526 nouveau_bo_unpin(dev_priv->fence.bo);
527 nouveau_bo_ref(NULL, &dev_priv->fence.bo);