2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "nouveau_drv.h"
31 #include "nouveau_dma.h"
33 #define USE_REFCNT (dev_priv->card_type >= NV_10)
35 struct nouveau_fence {
36 struct nouveau_channel *channel;
38 struct list_head entry;
44 static inline struct nouveau_fence *
45 nouveau_fence(void *sync_obj)
47 return (struct nouveau_fence *)sync_obj;
51 nouveau_fence_del(struct kref *ref)
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
60 nouveau_fence_update(struct nouveau_channel *chan)
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
68 sequence = nvchan_rd32(chan, 0x48);
70 sequence = atomic_read(&chan->fence.last_sequence_irq);
72 if (chan->fence.sequence_ack == sequence)
74 chan->fence.sequence_ack = sequence;
76 spin_lock(&chan->fence.lock);
77 list_for_each_safe(entry, tmp, &chan->fence.pending) {
78 fence = list_entry(entry, struct nouveau_fence, entry);
80 sequence = fence->sequence;
81 fence->signalled = true;
82 list_del(&fence->entry);
83 kref_put(&fence->refcount, nouveau_fence_del);
85 if (sequence == chan->fence.sequence_ack)
88 spin_unlock(&chan->fence.lock);
92 nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
95 struct nouveau_fence *fence;
98 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
101 kref_init(&fence->refcount);
102 fence->channel = chan;
105 ret = nouveau_fence_emit(fence);
108 nouveau_fence_unref((void *)&fence);
113 struct nouveau_channel *
114 nouveau_fence_channel(struct nouveau_fence *fence)
116 return fence ? fence->channel : NULL;
120 nouveau_fence_emit(struct nouveau_fence *fence)
122 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
123 struct nouveau_channel *chan = fence->channel;
126 ret = RING_SPACE(chan, 2);
130 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
131 nouveau_fence_update(chan);
133 BUG_ON(chan->fence.sequence ==
134 chan->fence.sequence_ack - 1);
137 fence->sequence = ++chan->fence.sequence;
139 kref_get(&fence->refcount);
140 spin_lock(&chan->fence.lock);
141 list_add_tail(&fence->entry, &chan->fence.pending);
142 spin_unlock(&chan->fence.lock);
144 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
145 OUT_RING(chan, fence->sequence);
152 nouveau_fence_unref(void **sync_obj)
154 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
157 kref_put(&fence->refcount, nouveau_fence_del);
162 nouveau_fence_ref(void *sync_obj)
164 struct nouveau_fence *fence = nouveau_fence(sync_obj);
166 kref_get(&fence->refcount);
171 nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173 struct nouveau_fence *fence = nouveau_fence(sync_obj);
174 struct nouveau_channel *chan = fence->channel;
176 if (fence->signalled)
179 nouveau_fence_update(chan);
180 return fence->signalled;
184 nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
186 unsigned long timeout = jiffies + (3 * DRM_HZ);
190 if (nouveau_fence_signalled(sync_obj, sync_arg))
193 if (time_after_eq(jiffies, timeout)) {
198 __set_current_state(intr ? TASK_INTERRUPTIBLE
199 : TASK_UNINTERRUPTIBLE);
203 if (intr && signal_pending(current)) {
209 __set_current_state(TASK_RUNNING);
215 nouveau_fence_flush(void *sync_obj, void *sync_arg)
221 nouveau_fence_init(struct nouveau_channel *chan)
223 INIT_LIST_HEAD(&chan->fence.pending);
224 spin_lock_init(&chan->fence.lock);
225 atomic_set(&chan->fence.last_sequence_irq, 0);
230 nouveau_fence_fini(struct nouveau_channel *chan)
232 struct list_head *entry, *tmp;
233 struct nouveau_fence *fence;
235 list_for_each_safe(entry, tmp, &chan->fence.pending) {
236 fence = list_entry(entry, struct nouveau_fence, entry);
238 fence->signalled = true;
239 list_del(&fence->entry);
240 kref_put(&fence->refcount, nouveau_fence_del);