2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
39 #include "radeon_reg.h"
41 #include "radeon_trace.h"
45 * Fences mark an event in the GPUs pipeline and are used
46 * for GPU/CPU synchronization. When the fence is written,
47 * it is expected that all buffers associated with that fence
48 * are no longer in use by the associated ring on the GPU and
49 * that the the relevant GPU caches have been flushed. Whether
50 * we use a scratch register or memory location depends on the asic
51 * and whether writeback is enabled.
55 * radeon_fence_write - write a fence value
57 * @rdev: radeon_device pointer
58 * @seq: sequence number to write
59 * @ring: ring index the fence is associated with
61 * Writes a fence value to memory or a scratch register (all asics).
63 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
65 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
66 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
67 *drv->cpu_addr = cpu_to_le32(seq);
69 WREG32(drv->scratch_reg, seq);
74 * radeon_fence_read - read a fence value
76 * @rdev: radeon_device pointer
77 * @ring: ring index the fence is associated with
79 * Reads a fence value from memory or a scratch register (all asics).
80 * Returns the value of the fence read from memory or register.
82 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
84 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
87 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
88 seq = le32_to_cpu(*drv->cpu_addr);
90 seq = RREG32(drv->scratch_reg);
96 * radeon_fence_emit - emit a fence on the requested ring
98 * @rdev: radeon_device pointer
99 * @fence: radeon fence object
100 * @ring: ring index the fence is associated with
102 * Emits a fence command on the requested ring (all asics).
103 * Returns 0 on success, -ENOMEM on failure.
105 int radeon_fence_emit(struct radeon_device *rdev,
106 struct radeon_fence **fence,
109 /* we are protected by the ring emission mutex */
110 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
111 if ((*fence) == NULL) {
114 kref_init(&((*fence)->kref));
115 (*fence)->rdev = rdev;
116 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
117 (*fence)->ring = ring;
118 radeon_fence_ring_emit(rdev, ring, *fence);
119 trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
124 * radeon_fence_process - process a fence
126 * @rdev: radeon_device pointer
127 * @ring: ring index the fence is associated with
129 * Checks the current fence value and wakes the fence queue
130 * if the sequence number has increased (all asics).
132 void radeon_fence_process(struct radeon_device *rdev, int ring)
134 uint64_t seq, last_seq;
135 unsigned count_loop = 0;
138 /* Note there is a scenario here for an infinite loop but it's
139 * very unlikely to happen. For it to happen, the current polling
140 * process need to be interrupted by another process and another
141 * process needs to update the last_seq btw the atomic read and
142 * xchg of the current process.
144 * More over for this to go in infinite loop there need to be
145 * continuously new fence signaled ie radeon_fence_read needs
146 * to return a different value each time for both the currently
147 * polling process and the other process that xchg the last_seq
148 * btw atomic read and xchg of the current process. And the
149 * value the other process set as last seq must be higher than
150 * the seq value we just read. Which means that current process
151 * need to be interrupted after radeon_fence_read and before
154 * To be even more safe we count the number of time we loop and
155 * we bail after 10 loop just accepting the fact that we might
156 * have temporarly set the last_seq not to the true real last
157 * seq but to an older one.
159 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
161 seq = radeon_fence_read(rdev, ring);
162 seq |= last_seq & 0xffffffff00000000LL;
163 if (seq < last_seq) {
164 seq += 0x100000000LL;
167 if (seq == last_seq) {
170 /* If we loop over we don't want to return without
171 * checking if a fence is signaled as it means that the
172 * seq we just read is different from the previous on.
176 if ((count_loop++) > 10) {
177 /* We looped over too many time leave with the
178 * fact that we might have set an older fence
179 * seq then the current real last seq as signaled
184 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
187 rdev->fence_drv[ring].last_activity = jiffies;
188 wake_up_all(&rdev->fence_queue);
193 * radeon_fence_destroy - destroy a fence
197 * Frees the fence object (all asics).
199 static void radeon_fence_destroy(struct kref *kref)
201 struct radeon_fence *fence;
203 fence = container_of(kref, struct radeon_fence, kref);
208 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
210 * @rdev: radeon device pointer
211 * @seq: sequence number
212 * @ring: ring index the fence is associated with
214 * Check if the last singled fence sequnce number is >= the requested
215 * sequence number (all asics).
216 * Returns true if the fence has signaled (current fence value
217 * is >= requested value) or false if it has not (current fence
218 * value is < the requested value. Helper function for
219 * radeon_fence_signaled().
221 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
222 u64 seq, unsigned ring)
224 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
227 /* poll new last sequence at least once */
228 radeon_fence_process(rdev, ring);
229 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
236 * radeon_fence_signaled - check if a fence has signaled
238 * @fence: radeon fence object
240 * Check if the requested fence has signaled (all asics).
241 * Returns true if the fence has signaled or false if it has not.
243 bool radeon_fence_signaled(struct radeon_fence *fence)
248 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
251 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
252 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
259 * radeon_fence_wait_seq - wait for a specific sequence number
261 * @rdev: radeon device pointer
262 * @target_seq: sequence number we want to wait for
263 * @ring: ring index the fence is associated with
264 * @intr: use interruptable sleep
265 * @lock_ring: whether the ring should be locked or not
267 * Wait for the requested sequence number to be written (all asics).
268 * @intr selects whether to use interruptable (true) or non-interruptable
269 * (false) sleep when waiting for the sequence number. Helper function
270 * for radeon_fence_wait(), et al.
271 * Returns 0 if the sequence number has passed, error for all other cases.
272 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
273 * marked as not ready so no further jobs get scheduled until a successful
276 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
277 unsigned ring, bool intr, bool lock_ring)
279 unsigned long timeout, last_activity;
285 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
286 if (!rdev->ring[ring].ready) {
290 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
291 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
292 /* the normal case, timeout is somewhere before last_activity */
293 timeout = rdev->fence_drv[ring].last_activity - timeout;
295 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
296 * anyway we will just wait for the minimum amount and then check for a lockup
300 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
301 /* Save current last activity valuee, used to check for GPU lockups */
302 last_activity = rdev->fence_drv[ring].last_activity;
304 trace_radeon_fence_wait_begin(rdev->ddev, seq);
305 radeon_irq_kms_sw_irq_get(rdev, ring);
307 r = wait_event_interruptible_timeout(rdev->fence_queue,
308 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
311 r = wait_event_timeout(rdev->fence_queue,
312 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
315 radeon_irq_kms_sw_irq_put(rdev, ring);
316 if (unlikely(r < 0)) {
319 trace_radeon_fence_wait_end(rdev->ddev, seq);
321 if (unlikely(!signaled)) {
322 /* we were interrupted for some reason and fence
323 * isn't signaled yet, resume waiting */
328 /* check if sequence value has changed since last_activity */
329 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
334 mutex_lock(&rdev->ring_lock);
337 /* test if somebody else has already decided that this is a lockup */
338 if (last_activity != rdev->fence_drv[ring].last_activity) {
340 mutex_unlock(&rdev->ring_lock);
345 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
346 /* good news we believe it's a lockup */
347 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
350 /* change last activity so nobody else think there is a lockup */
351 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
352 rdev->fence_drv[i].last_activity = jiffies;
355 /* mark the ring as not ready any more */
356 rdev->ring[ring].ready = false;
358 mutex_unlock(&rdev->ring_lock);
364 mutex_unlock(&rdev->ring_lock);
372 * radeon_fence_wait - wait for a fence to signal
374 * @fence: radeon fence object
375 * @intr: use interruptable sleep
377 * Wait for the requested fence to signal (all asics).
378 * @intr selects whether to use interruptable (true) or non-interruptable
379 * (false) sleep when waiting for the fence.
380 * Returns 0 if the fence has passed, error for all other cases.
382 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
387 WARN(1, "Querying an invalid fence : %p !\n", fence);
391 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
392 fence->ring, intr, true);
396 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
400 bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
404 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
405 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
413 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
415 * @rdev: radeon device pointer
416 * @target_seq: sequence number(s) we want to wait for
417 * @intr: use interruptable sleep
419 * Wait for the requested sequence number(s) to be written by any ring
420 * (all asics). Sequnce number array is indexed by ring id.
421 * @intr selects whether to use interruptable (true) or non-interruptable
422 * (false) sleep when waiting for the sequence number. Helper function
423 * for radeon_fence_wait_any(), et al.
424 * Returns 0 if the sequence number has passed, error for all other cases.
426 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
427 u64 *target_seq, bool intr)
429 unsigned long timeout, last_activity, tmp;
430 unsigned i, ring = RADEON_NUM_RINGS;
434 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
435 if (!target_seq[i]) {
439 /* use the most recent one as indicator */
440 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
441 last_activity = rdev->fence_drv[i].last_activity;
444 /* For lockup detection just pick the lowest ring we are
445 * actively waiting for
452 /* nothing to wait for ? */
453 if (ring == RADEON_NUM_RINGS) {
457 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
458 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
459 if (time_after(last_activity, timeout)) {
460 /* the normal case, timeout is somewhere before last_activity */
461 timeout = last_activity - timeout;
463 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
464 * anyway we will just wait for the minimum amount and then check for a lockup
469 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
470 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
472 radeon_irq_kms_sw_irq_get(rdev, i);
476 r = wait_event_interruptible_timeout(rdev->fence_queue,
477 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
480 r = wait_event_timeout(rdev->fence_queue,
481 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
484 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
486 radeon_irq_kms_sw_irq_put(rdev, i);
489 if (unlikely(r < 0)) {
492 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
494 if (unlikely(!signaled)) {
495 /* we were interrupted for some reason and fence
496 * isn't signaled yet, resume waiting */
501 mutex_lock(&rdev->ring_lock);
502 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
503 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
504 tmp = rdev->fence_drv[i].last_activity;
507 /* test if somebody else has already decided that this is a lockup */
508 if (last_activity != tmp) {
510 mutex_unlock(&rdev->ring_lock);
514 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
515 /* good news we believe it's a lockup */
516 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
519 /* change last activity so nobody else think there is a lockup */
520 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
521 rdev->fence_drv[i].last_activity = jiffies;
524 /* mark the ring as not ready any more */
525 rdev->ring[ring].ready = false;
526 mutex_unlock(&rdev->ring_lock);
529 mutex_unlock(&rdev->ring_lock);
536 * radeon_fence_wait_any - wait for a fence to signal on any ring
538 * @rdev: radeon device pointer
539 * @fences: radeon fence object(s)
540 * @intr: use interruptable sleep
542 * Wait for any requested fence to signal (all asics). Fence
543 * array is indexed by ring id. @intr selects whether to use
544 * interruptable (true) or non-interruptable (false) sleep when
545 * waiting for the fences. Used by the suballocator.
546 * Returns 0 if any fence has passed, error for all other cases.
548 int radeon_fence_wait_any(struct radeon_device *rdev,
549 struct radeon_fence **fences,
552 uint64_t seq[RADEON_NUM_RINGS];
556 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
563 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
564 /* something was allready signaled */
568 seq[i] = fences[i]->seq;
571 r = radeon_fence_wait_any_seq(rdev, seq, intr);
579 * radeon_fence_wait_next_locked - wait for the next fence to signal
581 * @rdev: radeon device pointer
582 * @ring: ring index the fence is associated with
584 * Wait for the next fence on the requested ring to signal (all asics).
585 * Returns 0 if the next fence has passed, error for all other cases.
586 * Caller must hold ring lock.
588 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
592 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
593 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
594 /* nothing to wait for, last_seq is
595 already the last emited fence */
598 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
602 * radeon_fence_wait_empty_locked - wait for all fences to signal
604 * @rdev: radeon device pointer
605 * @ring: ring index the fence is associated with
607 * Wait for all fences on the requested ring to signal (all asics).
608 * Returns 0 if the fences have passed, error for all other cases.
609 * Caller must hold ring lock.
611 void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
613 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
617 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
619 mutex_unlock(&rdev->ring_lock);
620 r = radeon_gpu_reset(rdev);
621 mutex_lock(&rdev->ring_lock);
626 dev_err(rdev->dev, "error waiting for ring to become"
634 * radeon_fence_ref - take a ref on a fence
636 * @fence: radeon fence object
638 * Take a reference on a fence (all asics).
641 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
643 kref_get(&fence->kref);
648 * radeon_fence_unref - remove a ref on a fence
650 * @fence: radeon fence object
652 * Remove a reference on a fence (all asics).
654 void radeon_fence_unref(struct radeon_fence **fence)
656 struct radeon_fence *tmp = *fence;
660 kref_put(&tmp->kref, radeon_fence_destroy);
665 * radeon_fence_count_emitted - get the count of emitted fences
667 * @rdev: radeon device pointer
668 * @ring: ring index the fence is associated with
670 * Get the number of fences emitted on the requested ring (all asics).
671 * Returns the number of emitted fences on the ring. Used by the
672 * dynpm code to ring track activity.
674 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
678 /* We are not protected by ring lock when reading the last sequence
679 * but it's ok to report slightly wrong fence count here.
681 radeon_fence_process(rdev, ring);
682 emitted = rdev->fence_drv[ring].sync_seq[ring]
683 - atomic64_read(&rdev->fence_drv[ring].last_seq);
684 /* to avoid 32bits warp around */
685 if (emitted > 0x10000000) {
686 emitted = 0x10000000;
688 return (unsigned)emitted;
692 * radeon_fence_need_sync - do we need a semaphore
694 * @fence: radeon fence object
695 * @dst_ring: which ring to check against
697 * Check if the fence needs to be synced against another ring
698 * (all asics). If so, we need to emit a semaphore.
699 * Returns true if we need to sync with another ring, false if
702 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
704 struct radeon_fence_driver *fdrv;
710 if (fence->ring == dst_ring) {
714 /* we are protected by the ring mutex */
715 fdrv = &fence->rdev->fence_drv[dst_ring];
716 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
724 * radeon_fence_note_sync - record the sync point
726 * @fence: radeon fence object
727 * @dst_ring: which ring to check against
729 * Note the sequence number at which point the fence will
730 * be synced with the requested ring (all asics).
732 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
734 struct radeon_fence_driver *dst, *src;
741 if (fence->ring == dst_ring) {
745 /* we are protected by the ring mutex */
746 src = &fence->rdev->fence_drv[fence->ring];
747 dst = &fence->rdev->fence_drv[dst_ring];
748 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
752 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
757 * radeon_fence_driver_start_ring - make the fence driver
758 * ready for use on the requested ring.
760 * @rdev: radeon device pointer
761 * @ring: ring index to start the fence driver on
763 * Make the fence driver ready for processing (all asics).
764 * Not all asics have all rings, so each asic will only
765 * start the fence driver on the rings it has.
766 * Returns 0 for success, errors for failure.
768 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
773 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
774 if (rdev->wb.use_event) {
775 rdev->fence_drv[ring].scratch_reg = 0;
776 index = R600_WB_EVENT_OFFSET + ring * 4;
778 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
780 dev_err(rdev->dev, "fence failed to get scratch register\n");
783 index = RADEON_WB_SCRATCH_OFFSET +
784 rdev->fence_drv[ring].scratch_reg -
785 rdev->scratch.reg_base;
787 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
788 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
789 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
790 rdev->fence_drv[ring].initialized = true;
791 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
792 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
797 * radeon_fence_driver_init_ring - init the fence driver
798 * for the requested ring.
800 * @rdev: radeon device pointer
801 * @ring: ring index to start the fence driver on
803 * Init the fence driver for the requested ring (all asics).
804 * Helper function for radeon_fence_driver_init().
806 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
810 rdev->fence_drv[ring].scratch_reg = -1;
811 rdev->fence_drv[ring].cpu_addr = NULL;
812 rdev->fence_drv[ring].gpu_addr = 0;
813 for (i = 0; i < RADEON_NUM_RINGS; ++i)
814 rdev->fence_drv[ring].sync_seq[i] = 0;
815 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
816 rdev->fence_drv[ring].last_activity = jiffies;
817 rdev->fence_drv[ring].initialized = false;
821 * radeon_fence_driver_init - init the fence driver
822 * for all possible rings.
824 * @rdev: radeon device pointer
826 * Init the fence driver for all possible rings (all asics).
827 * Not all asics have all rings, so each asic will only
828 * start the fence driver on the rings it has using
829 * radeon_fence_driver_start_ring().
830 * Returns 0 for success.
832 int radeon_fence_driver_init(struct radeon_device *rdev)
836 init_waitqueue_head(&rdev->fence_queue);
837 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
838 radeon_fence_driver_init_ring(rdev, ring);
840 if (radeon_debugfs_fence_init(rdev)) {
841 dev_err(rdev->dev, "fence debugfs file creation failed\n");
847 * radeon_fence_driver_fini - tear down the fence driver
848 * for all possible rings.
850 * @rdev: radeon device pointer
852 * Tear down the fence driver for all possible rings (all asics).
854 void radeon_fence_driver_fini(struct radeon_device *rdev)
858 mutex_lock(&rdev->ring_lock);
859 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
860 if (!rdev->fence_drv[ring].initialized)
862 radeon_fence_wait_empty_locked(rdev, ring);
863 wake_up_all(&rdev->fence_queue);
864 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
865 rdev->fence_drv[ring].initialized = false;
867 mutex_unlock(&rdev->ring_lock);
874 #if defined(CONFIG_DEBUG_FS)
875 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
877 struct drm_info_node *node = (struct drm_info_node *)m->private;
878 struct drm_device *dev = node->minor->dev;
879 struct radeon_device *rdev = dev->dev_private;
882 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
883 if (!rdev->fence_drv[i].initialized)
886 seq_printf(m, "--- ring %d ---\n", i);
887 seq_printf(m, "Last signaled fence 0x%016llx\n",
888 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
889 seq_printf(m, "Last emitted 0x%016llx\n",
890 rdev->fence_drv[i].sync_seq[i]);
892 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
893 if (i != j && rdev->fence_drv[j].initialized)
894 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
895 j, rdev->fence_drv[i].sync_seq[j]);
901 static struct drm_info_list radeon_debugfs_fence_list[] = {
902 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
906 int radeon_debugfs_fence_init(struct radeon_device *rdev)
908 #if defined(CONFIG_DEBUG_FS)
909 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);