2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
39 #include "radeon_reg.h"
41 #include "radeon_trace.h"
43 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
45 if (rdev->wb.enabled) {
46 *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
48 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
52 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
56 if (rdev->wb.enabled) {
57 seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
59 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
64 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
66 unsigned long irq_flags;
68 write_lock_irqsave(&rdev->fence_lock, irq_flags);
70 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
73 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74 radeon_fence_ring_emit(rdev, fence->ring, fence);
75 trace_radeon_fence_emit(rdev->ddev, fence->seq);
76 fence->emitted = true;
77 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
78 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
82 static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
84 struct radeon_fence *fence;
85 struct list_head *i, *n;
88 unsigned long cjiffies;
90 seq = radeon_fence_read(rdev, ring);
91 if (seq != rdev->fence_drv[ring].last_seq) {
92 rdev->fence_drv[ring].last_seq = seq;
93 rdev->fence_drv[ring].last_jiffies = jiffies;
94 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
97 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
98 cjiffies -= rdev->fence_drv[ring].last_jiffies;
99 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
100 /* update the timeout */
101 rdev->fence_drv[ring].last_timeout -= cjiffies;
103 /* the 500ms timeout is elapsed we should test
106 rdev->fence_drv[ring].last_timeout = 1;
109 /* wrap around update last jiffies, we will just wait
112 rdev->fence_drv[ring].last_jiffies = cjiffies;
117 list_for_each(i, &rdev->fence_drv[ring].emitted) {
118 fence = list_entry(i, struct radeon_fence, list);
119 if (fence->seq == seq) {
124 /* all fence previous to this one are considered as signaled */
129 list_move_tail(i, &rdev->fence_drv[ring].signaled);
130 fence = list_entry(i, struct radeon_fence, list);
131 fence->signaled = true;
133 } while (i != &rdev->fence_drv[ring].emitted);
139 static void radeon_fence_destroy(struct kref *kref)
141 unsigned long irq_flags;
142 struct radeon_fence *fence;
144 fence = container_of(kref, struct radeon_fence, kref);
145 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
146 list_del(&fence->list);
147 fence->emitted = false;
148 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
149 if (fence->semaphore)
150 radeon_semaphore_free(fence->rdev, fence->semaphore);
154 int radeon_fence_create(struct radeon_device *rdev,
155 struct radeon_fence **fence,
158 unsigned long irq_flags;
160 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
161 if ((*fence) == NULL) {
164 kref_init(&((*fence)->kref));
165 (*fence)->rdev = rdev;
166 (*fence)->emitted = false;
167 (*fence)->signaled = false;
169 (*fence)->ring = ring;
170 (*fence)->semaphore = NULL;
171 INIT_LIST_HEAD(&(*fence)->list);
173 write_lock_irqsave(&rdev->fence_lock, irq_flags);
174 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
175 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
179 bool radeon_fence_signaled(struct radeon_fence *fence)
181 unsigned long irq_flags;
182 bool signaled = false;
187 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
188 signaled = fence->signaled;
189 /* if we are shuting down report all fence as signaled */
190 if (fence->rdev->shutdown) {
193 if (!fence->emitted) {
194 WARN(1, "Querying an unemitted fence : %p !\n", fence);
198 radeon_fence_poll_locked(fence->rdev, fence->ring);
199 signaled = fence->signaled;
201 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
205 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
207 struct radeon_device *rdev;
208 unsigned long irq_flags, timeout;
213 WARN(1, "Querying an invalid fence : %p !\n", fence);
217 if (radeon_fence_signaled(fence)) {
220 timeout = rdev->fence_drv[fence->ring].last_timeout;
222 /* save current sequence used to check for GPU lockup */
223 seq = rdev->fence_drv[fence->ring].last_seq;
224 trace_radeon_fence_wait_begin(rdev->ddev, seq);
226 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
227 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
228 radeon_fence_signaled(fence), timeout);
229 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
230 if (unlikely(r < 0)) {
234 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
235 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
236 radeon_fence_signaled(fence), timeout);
237 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
239 trace_radeon_fence_wait_end(rdev->ddev, seq);
240 if (unlikely(!radeon_fence_signaled(fence))) {
241 /* we were interrupted for some reason and fence isn't
242 * isn't signaled yet, resume wait
248 /* don't protect read access to rdev->fence_drv[t].last_seq
249 * if we experiencing a lockup the value doesn't change
251 if (seq == rdev->fence_drv[fence->ring].last_seq &&
252 radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
254 /* good news we believe it's a lockup */
255 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
258 /* mark the ring as not ready any more */
259 rdev->ring[fence->ring].ready = false;
260 r = radeon_gpu_reset(rdev);
264 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
265 write_lock_irqsave(&rdev->fence_lock, irq_flags);
266 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
267 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
268 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
274 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
276 unsigned long irq_flags;
277 struct radeon_fence *fence;
280 write_lock_irqsave(&rdev->fence_lock, irq_flags);
281 if (!rdev->ring[ring].ready) {
282 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
285 if (list_empty(&rdev->fence_drv[ring].emitted)) {
286 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
289 fence = list_entry(rdev->fence_drv[ring].emitted.next,
290 struct radeon_fence, list);
291 radeon_fence_ref(fence);
292 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
293 r = radeon_fence_wait(fence, false);
294 radeon_fence_unref(&fence);
298 int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
300 unsigned long irq_flags;
301 struct radeon_fence *fence;
304 write_lock_irqsave(&rdev->fence_lock, irq_flags);
305 if (!rdev->ring[ring].ready) {
306 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
309 if (list_empty(&rdev->fence_drv[ring].emitted)) {
310 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
313 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
314 struct radeon_fence, list);
315 radeon_fence_ref(fence);
316 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
317 r = radeon_fence_wait(fence, false);
318 radeon_fence_unref(&fence);
322 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
324 kref_get(&fence->kref);
328 void radeon_fence_unref(struct radeon_fence **fence)
330 struct radeon_fence *tmp = *fence;
334 kref_put(&tmp->kref, radeon_fence_destroy);
338 void radeon_fence_process(struct radeon_device *rdev, int ring)
340 unsigned long irq_flags;
343 write_lock_irqsave(&rdev->fence_lock, irq_flags);
344 wake = radeon_fence_poll_locked(rdev, ring);
345 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
347 wake_up_all(&rdev->fence_drv[ring].queue);
351 int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
353 unsigned long irq_flags;
354 int not_processed = 0;
356 read_lock_irqsave(&rdev->fence_lock, irq_flags);
357 if (!rdev->fence_drv[ring].initialized) {
358 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
362 if (!list_empty(&rdev->fence_drv[ring].emitted)) {
363 struct list_head *ptr;
364 list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
365 /* count up to 3, that's enought info */
366 if (++not_processed >= 3)
370 read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
371 return not_processed;
374 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
376 unsigned long irq_flags;
380 write_lock_irqsave(&rdev->fence_lock, irq_flags);
381 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
382 if (rdev->wb.use_event) {
383 rdev->fence_drv[ring].scratch_reg = 0;
384 index = R600_WB_EVENT_OFFSET + ring * 4;
386 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
388 dev_err(rdev->dev, "fence failed to get scratch register\n");
389 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
392 index = RADEON_WB_SCRATCH_OFFSET +
393 rdev->fence_drv[ring].scratch_reg -
394 rdev->scratch.reg_base;
396 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
397 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
398 radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
399 rdev->fence_drv[ring].initialized = true;
400 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
401 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
402 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
406 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
408 rdev->fence_drv[ring].scratch_reg = -1;
409 rdev->fence_drv[ring].cpu_addr = NULL;
410 rdev->fence_drv[ring].gpu_addr = 0;
411 atomic_set(&rdev->fence_drv[ring].seq, 0);
412 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
413 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
414 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
415 init_waitqueue_head(&rdev->fence_drv[ring].queue);
416 rdev->fence_drv[ring].initialized = false;
419 int radeon_fence_driver_init(struct radeon_device *rdev)
421 unsigned long irq_flags;
424 write_lock_irqsave(&rdev->fence_lock, irq_flags);
425 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
426 radeon_fence_driver_init_ring(rdev, ring);
428 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
429 if (radeon_debugfs_fence_init(rdev)) {
430 dev_err(rdev->dev, "fence debugfs file creation failed\n");
435 void radeon_fence_driver_fini(struct radeon_device *rdev)
437 unsigned long irq_flags;
440 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
441 if (!rdev->fence_drv[ring].initialized)
443 radeon_fence_wait_last(rdev, ring);
444 wake_up_all(&rdev->fence_drv[ring].queue);
445 write_lock_irqsave(&rdev->fence_lock, irq_flags);
446 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
447 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
448 rdev->fence_drv[ring].initialized = false;
456 #if defined(CONFIG_DEBUG_FS)
457 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
459 struct drm_info_node *node = (struct drm_info_node *)m->private;
460 struct drm_device *dev = node->minor->dev;
461 struct radeon_device *rdev = dev->dev_private;
462 struct radeon_fence *fence;
465 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
466 if (!rdev->fence_drv[i].initialized)
469 seq_printf(m, "--- ring %d ---\n", i);
470 seq_printf(m, "Last signaled fence 0x%08X\n",
471 radeon_fence_read(rdev, i));
472 if (!list_empty(&rdev->fence_drv[i].emitted)) {
473 fence = list_entry(rdev->fence_drv[i].emitted.prev,
474 struct radeon_fence, list);
475 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
482 static struct drm_info_list radeon_debugfs_fence_list[] = {
483 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
487 int radeon_debugfs_fence_init(struct radeon_device *rdev)
489 #if defined(CONFIG_DEBUG_FS)
490 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);