2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <asm/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
39 #include "radeon_reg.h"
42 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
44 unsigned long irq_flags;
46 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
48 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
51 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
52 if (!rdev->cp.ready) {
53 /* FIXME: cp is not running assume everythings is done right
56 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
58 radeon_fence_ring_emit(rdev, fence);
61 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
67 static bool radeon_fence_poll_locked(struct radeon_device *rdev)
69 struct radeon_fence *fence;
70 struct list_head *i, *n;
73 unsigned long cjiffies;
75 seq = RREG32(rdev->fence_drv.scratch_reg);
76 if (seq != rdev->fence_drv.last_seq) {
77 rdev->fence_drv.last_seq = seq;
78 rdev->fence_drv.last_jiffies = jiffies;
79 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
82 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
83 cjiffies -= rdev->fence_drv.last_jiffies;
84 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
85 /* update the timeout */
86 rdev->fence_drv.last_timeout -= cjiffies;
88 /* the 500ms timeout is elapsed we should test
91 rdev->fence_drv.last_timeout = 1;
94 /* wrap around update last jiffies, we will just wait
97 rdev->fence_drv.last_jiffies = cjiffies;
102 list_for_each(i, &rdev->fence_drv.emited) {
103 fence = list_entry(i, struct radeon_fence, list);
104 if (fence->seq == seq) {
109 /* all fence previous to this one are considered as signaled */
115 list_add_tail(i, &rdev->fence_drv.signaled);
116 fence = list_entry(i, struct radeon_fence, list);
117 fence->signaled = true;
119 } while (i != &rdev->fence_drv.emited);
125 static void radeon_fence_destroy(struct kref *kref)
127 unsigned long irq_flags;
128 struct radeon_fence *fence;
130 fence = container_of(kref, struct radeon_fence, kref);
131 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
132 list_del(&fence->list);
133 fence->emited = false;
134 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
138 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
140 unsigned long irq_flags;
142 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
143 if ((*fence) == NULL) {
146 kref_init(&((*fence)->kref));
147 (*fence)->rdev = rdev;
148 (*fence)->emited = false;
149 (*fence)->signaled = false;
151 INIT_LIST_HEAD(&(*fence)->list);
153 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
154 list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
155 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
160 bool radeon_fence_signaled(struct radeon_fence *fence)
162 unsigned long irq_flags;
163 bool signaled = false;
168 if (fence->rdev->gpu_lockup)
171 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
172 signaled = fence->signaled;
173 /* if we are shuting down report all fence as signaled */
174 if (fence->rdev->shutdown) {
177 if (!fence->emited) {
178 WARN(1, "Querying an unemited fence : %p !\n", fence);
182 radeon_fence_poll_locked(fence->rdev);
183 signaled = fence->signaled;
185 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
189 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
191 struct radeon_device *rdev;
192 unsigned long irq_flags, timeout;
197 WARN(1, "Querying an invalid fence : %p !\n", fence);
201 if (radeon_fence_signaled(fence)) {
204 timeout = rdev->fence_drv.last_timeout;
206 /* save current sequence used to check for GPU lockup */
207 seq = rdev->fence_drv.last_seq;
209 radeon_irq_kms_sw_irq_get(rdev);
210 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
211 radeon_fence_signaled(fence), timeout);
212 radeon_irq_kms_sw_irq_put(rdev);
213 if (unlikely(r < 0)) {
217 radeon_irq_kms_sw_irq_get(rdev);
218 r = wait_event_timeout(rdev->fence_drv.queue,
219 radeon_fence_signaled(fence), timeout);
220 radeon_irq_kms_sw_irq_put(rdev);
222 if (unlikely(!radeon_fence_signaled(fence))) {
223 /* we were interrupted for some reason and fence isn't
224 * isn't signaled yet, resume wait
230 /* don't protect read access to rdev->fence_drv.last_seq
231 * if we experiencing a lockup the value doesn't change
233 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
234 /* good news we believe it's a lockup */
235 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
236 /* FIXME: what should we do ? marking everyone
237 * as signaled for now
239 rdev->gpu_lockup = true;
240 r = radeon_gpu_reset(rdev);
243 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
244 rdev->gpu_lockup = false;
246 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
247 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
248 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
249 rdev->fence_drv.last_jiffies = jiffies;
250 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
256 int radeon_fence_wait_next(struct radeon_device *rdev)
258 unsigned long irq_flags;
259 struct radeon_fence *fence;
262 if (rdev->gpu_lockup) {
265 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
266 if (list_empty(&rdev->fence_drv.emited)) {
267 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
270 fence = list_entry(rdev->fence_drv.emited.next,
271 struct radeon_fence, list);
272 radeon_fence_ref(fence);
273 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
274 r = radeon_fence_wait(fence, false);
275 radeon_fence_unref(&fence);
279 int radeon_fence_wait_last(struct radeon_device *rdev)
281 unsigned long irq_flags;
282 struct radeon_fence *fence;
285 if (rdev->gpu_lockup) {
288 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
289 if (list_empty(&rdev->fence_drv.emited)) {
290 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
293 fence = list_entry(rdev->fence_drv.emited.prev,
294 struct radeon_fence, list);
295 radeon_fence_ref(fence);
296 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
297 r = radeon_fence_wait(fence, false);
298 radeon_fence_unref(&fence);
302 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
304 kref_get(&fence->kref);
308 void radeon_fence_unref(struct radeon_fence **fence)
310 struct radeon_fence *tmp = *fence;
314 kref_put(&tmp->kref, &radeon_fence_destroy);
318 void radeon_fence_process(struct radeon_device *rdev)
320 unsigned long irq_flags;
323 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
324 wake = radeon_fence_poll_locked(rdev);
325 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
327 wake_up_all(&rdev->fence_drv.queue);
331 int radeon_fence_driver_init(struct radeon_device *rdev)
333 unsigned long irq_flags;
336 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
337 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
339 dev_err(rdev->dev, "fence failed to get scratch register\n");
340 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
343 WREG32(rdev->fence_drv.scratch_reg, 0);
344 atomic_set(&rdev->fence_drv.seq, 0);
345 INIT_LIST_HEAD(&rdev->fence_drv.created);
346 INIT_LIST_HEAD(&rdev->fence_drv.emited);
347 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
348 init_waitqueue_head(&rdev->fence_drv.queue);
349 rdev->fence_drv.initialized = true;
350 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
351 if (radeon_debugfs_fence_init(rdev)) {
352 dev_err(rdev->dev, "fence debugfs file creation failed\n");
357 void radeon_fence_driver_fini(struct radeon_device *rdev)
359 unsigned long irq_flags;
361 if (!rdev->fence_drv.initialized)
363 wake_up_all(&rdev->fence_drv.queue);
364 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
365 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
366 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
367 rdev->fence_drv.initialized = false;
374 #if defined(CONFIG_DEBUG_FS)
375 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
377 struct drm_info_node *node = (struct drm_info_node *)m->private;
378 struct drm_device *dev = node->minor->dev;
379 struct radeon_device *rdev = dev->dev_private;
380 struct radeon_fence *fence;
382 seq_printf(m, "Last signaled fence 0x%08X\n",
383 RREG32(rdev->fence_drv.scratch_reg));
384 if (!list_empty(&rdev->fence_drv.emited)) {
385 fence = list_entry(rdev->fence_drv.emited.prev,
386 struct radeon_fence, list);
387 seq_printf(m, "Last emited fence %p with 0x%08X\n",
393 static struct drm_info_list radeon_debugfs_fence_list[] = {
394 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
398 int radeon_debugfs_fence_init(struct radeon_device *rdev)
400 #if defined(CONFIG_DEBUG_FS)
401 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);