2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
39 #include "radeon_reg.h"
41 #include "radeon_trace.h"
43 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
47 if (rdev->wb.enabled) {
48 if (rdev->wb.use_event)
49 scratch_index = R600_WB_EVENT_OFFSET +
50 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
52 scratch_index = RADEON_WB_SCRATCH_OFFSET +
53 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
54 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
56 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
59 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
64 if (rdev->wb.enabled) {
65 if (rdev->wb.use_event)
66 scratch_index = R600_WB_EVENT_OFFSET +
67 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
69 scratch_index = RADEON_WB_SCRATCH_OFFSET +
70 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
71 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
73 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
77 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
79 unsigned long irq_flags;
81 write_lock_irqsave(&rdev->fence_lock, irq_flags);
83 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
86 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
87 if (!rdev->cp[fence->ring].ready)
88 /* FIXME: cp is not running assume everythings is done right
91 radeon_fence_write(rdev, fence->seq, fence->ring);
93 radeon_fence_ring_emit(rdev, fence);
95 trace_radeon_fence_emit(rdev->ddev, fence->seq);
96 fence->emitted = true;
97 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
98 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
102 static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
104 struct radeon_fence *fence;
105 struct list_head *i, *n;
108 unsigned long cjiffies;
110 seq = radeon_fence_read(rdev, ring);
111 if (seq != rdev->fence_drv[ring].last_seq) {
112 rdev->fence_drv[ring].last_seq = seq;
113 rdev->fence_drv[ring].last_jiffies = jiffies;
114 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
117 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
118 cjiffies -= rdev->fence_drv[ring].last_jiffies;
119 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
120 /* update the timeout */
121 rdev->fence_drv[ring].last_timeout -= cjiffies;
123 /* the 500ms timeout is elapsed we should test
126 rdev->fence_drv[ring].last_timeout = 1;
129 /* wrap around update last jiffies, we will just wait
132 rdev->fence_drv[ring].last_jiffies = cjiffies;
137 list_for_each(i, &rdev->fence_drv[ring].emitted) {
138 fence = list_entry(i, struct radeon_fence, list);
139 if (fence->seq == seq) {
144 /* all fence previous to this one are considered as signaled */
149 list_move_tail(i, &rdev->fence_drv[ring].signaled);
150 fence = list_entry(i, struct radeon_fence, list);
151 fence->signaled = true;
153 } while (i != &rdev->fence_drv[ring].emitted);
159 static void radeon_fence_destroy(struct kref *kref)
161 unsigned long irq_flags;
162 struct radeon_fence *fence;
164 fence = container_of(kref, struct radeon_fence, kref);
165 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
166 list_del(&fence->list);
167 fence->emitted = false;
168 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
172 int radeon_fence_create(struct radeon_device *rdev,
173 struct radeon_fence **fence,
176 unsigned long irq_flags;
178 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
179 if ((*fence) == NULL) {
182 kref_init(&((*fence)->kref));
183 (*fence)->rdev = rdev;
184 (*fence)->emitted = false;
185 (*fence)->signaled = false;
187 (*fence)->ring = ring;
188 INIT_LIST_HEAD(&(*fence)->list);
190 write_lock_irqsave(&rdev->fence_lock, irq_flags);
191 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
192 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
196 bool radeon_fence_signaled(struct radeon_fence *fence)
198 unsigned long irq_flags;
199 bool signaled = false;
204 if (fence->rdev->gpu_lockup)
207 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
208 signaled = fence->signaled;
209 /* if we are shuting down report all fence as signaled */
210 if (fence->rdev->shutdown) {
213 if (!fence->emitted) {
214 WARN(1, "Querying an unemitted fence : %p !\n", fence);
218 radeon_fence_poll_locked(fence->rdev, fence->ring);
219 signaled = fence->signaled;
221 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
225 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
227 struct radeon_device *rdev;
228 unsigned long irq_flags, timeout;
233 WARN(1, "Querying an invalid fence : %p !\n", fence);
237 if (radeon_fence_signaled(fence)) {
240 timeout = rdev->fence_drv[fence->ring].last_timeout;
242 /* save current sequence used to check for GPU lockup */
243 seq = rdev->fence_drv[fence->ring].last_seq;
244 trace_radeon_fence_wait_begin(rdev->ddev, seq);
246 radeon_irq_kms_sw_irq_get(rdev);
247 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
248 radeon_fence_signaled(fence), timeout);
249 radeon_irq_kms_sw_irq_put(rdev);
250 if (unlikely(r < 0)) {
254 radeon_irq_kms_sw_irq_get(rdev);
255 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
256 radeon_fence_signaled(fence), timeout);
257 radeon_irq_kms_sw_irq_put(rdev);
259 trace_radeon_fence_wait_end(rdev->ddev, seq);
260 if (unlikely(!radeon_fence_signaled(fence))) {
261 /* we were interrupted for some reason and fence isn't
262 * isn't signaled yet, resume wait
268 /* don't protect read access to rdev->fence_drv[t].last_seq
269 * if we experiencing a lockup the value doesn't change
271 if (seq == rdev->fence_drv[fence->ring].last_seq &&
272 radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
273 /* good news we believe it's a lockup */
274 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
276 /* FIXME: what should we do ? marking everyone
277 * as signaled for now
279 rdev->gpu_lockup = true;
280 r = radeon_gpu_reset(rdev);
283 radeon_fence_write(rdev, fence->seq, fence->ring);
284 rdev->gpu_lockup = false;
286 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
287 write_lock_irqsave(&rdev->fence_lock, irq_flags);
288 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
289 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
290 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
296 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
298 unsigned long irq_flags;
299 struct radeon_fence *fence;
302 if (rdev->gpu_lockup) {
305 write_lock_irqsave(&rdev->fence_lock, irq_flags);
306 if (list_empty(&rdev->fence_drv[ring].emitted)) {
307 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
310 fence = list_entry(rdev->fence_drv[ring].emitted.next,
311 struct radeon_fence, list);
312 radeon_fence_ref(fence);
313 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
314 r = radeon_fence_wait(fence, false);
315 radeon_fence_unref(&fence);
319 int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
321 unsigned long irq_flags;
322 struct radeon_fence *fence;
325 if (rdev->gpu_lockup) {
328 write_lock_irqsave(&rdev->fence_lock, irq_flags);
329 if (list_empty(&rdev->fence_drv[ring].emitted)) {
330 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
333 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
334 struct radeon_fence, list);
335 radeon_fence_ref(fence);
336 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
337 r = radeon_fence_wait(fence, false);
338 radeon_fence_unref(&fence);
342 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
344 kref_get(&fence->kref);
348 void radeon_fence_unref(struct radeon_fence **fence)
350 struct radeon_fence *tmp = *fence;
354 kref_put(&tmp->kref, radeon_fence_destroy);
358 void radeon_fence_process(struct radeon_device *rdev, int ring)
360 unsigned long irq_flags;
363 write_lock_irqsave(&rdev->fence_lock, irq_flags);
364 wake = radeon_fence_poll_locked(rdev, ring);
365 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
367 wake_up_all(&rdev->fence_drv[ring].queue);
371 int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings)
373 unsigned long irq_flags;
376 for (ring = 0; ring < num_rings; ring++) {
377 write_lock_irqsave(&rdev->fence_lock, irq_flags);
378 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
380 dev_err(rdev->dev, "fence failed to get scratch register\n");
381 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
384 radeon_fence_write(rdev, 0, ring);
385 atomic_set(&rdev->fence_drv[ring].seq, 0);
386 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
387 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
388 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
389 init_waitqueue_head(&rdev->fence_drv[ring].queue);
390 rdev->fence_drv[ring].initialized = true;
391 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
393 for (ring = num_rings; ring < RADEON_NUM_RINGS; ring++) {
394 write_lock_irqsave(&rdev->fence_lock, irq_flags);
395 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
396 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
397 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
398 rdev->fence_drv[ring].initialized = false;
399 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
401 if (radeon_debugfs_fence_init(rdev)) {
402 dev_err(rdev->dev, "fence debugfs file creation failed\n");
407 void radeon_fence_driver_fini(struct radeon_device *rdev)
409 unsigned long irq_flags;
412 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
413 if (!rdev->fence_drv[ring].initialized)
415 wake_up_all(&rdev->fence_drv[ring].queue);
416 write_lock_irqsave(&rdev->fence_lock, irq_flags);
417 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
418 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
419 rdev->fence_drv[ring].initialized = false;
427 #if defined(CONFIG_DEBUG_FS)
428 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
430 struct drm_info_node *node = (struct drm_info_node *)m->private;
431 struct drm_device *dev = node->minor->dev;
432 struct radeon_device *rdev = dev->dev_private;
433 struct radeon_fence *fence;
436 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
437 if (!rdev->fence_drv[i].initialized)
440 seq_printf(m, "--- ring %d ---\n", i);
441 seq_printf(m, "Last signaled fence 0x%08X\n",
442 radeon_fence_read(rdev, i));
443 if (!list_empty(&rdev->fence_drv[i].emitted)) {
444 fence = list_entry(rdev->fence_drv[i].emitted.prev,
445 struct radeon_fence, list);
446 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
453 static struct drm_info_list radeon_debugfs_fence_list[] = {
454 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
458 int radeon_debugfs_fence_init(struct radeon_device *rdev)
460 #if defined(CONFIG_DEBUG_FS)
461 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);