2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
38 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
41 u32 pg_idx, pg_offset;
45 pg_idx = (idx * 4) / PAGE_SIZE;
46 pg_offset = (idx * 4) % PAGE_SIZE;
48 if (ibc->kpage_idx[0] == pg_idx)
49 return ibc->kpage[0][pg_offset/4];
50 if (ibc->kpage_idx[1] == pg_idx)
51 return ibc->kpage[1][pg_offset/4];
53 new_page = radeon_cs_update_pages(p, pg_idx);
55 p->parser_error = new_page;
59 idx_value = ibc->kpage[new_page][pg_offset/4];
63 void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
66 if (cp->count_dw <= 0) {
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
70 cp->ring[cp->wptr++] = v;
71 cp->wptr &= cp->ptr_mask;
76 void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
78 struct radeon_ib *ib, *n;
80 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
87 void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
89 struct radeon_ib *bib;
91 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
94 bib->ptr = vmalloc(ib->length_dw * 4);
95 if (bib->ptr == NULL) {
99 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
100 bib->length_dw = ib->length_dw;
101 mutex_lock(&rdev->ib_pool.mutex);
102 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
103 mutex_unlock(&rdev->ib_pool.mutex);
109 int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
111 struct radeon_fence *fence;
112 struct radeon_ib *nib;
116 r = radeon_fence_create(rdev, &fence, ring);
118 dev_err(rdev->dev, "failed to create fence for new IB\n");
121 mutex_lock(&rdev->ib_pool.mutex);
122 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
123 i &= (RADEON_IB_POOL_SIZE - 1);
124 if (rdev->ib_pool.ibs[i].free) {
125 nib = &rdev->ib_pool.ibs[i];
130 /* This should never happen, it means we allocated all
131 * IB and haven't scheduled one yet, return EBUSY to
132 * userspace hoping that on ioctl recall we get better
135 dev_err(rdev->dev, "no free indirect buffer !\n");
136 mutex_unlock(&rdev->ib_pool.mutex);
137 radeon_fence_unref(&fence);
140 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
143 mutex_unlock(&rdev->ib_pool.mutex);
144 r = radeon_fence_wait(nib->fence, false);
146 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
147 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
148 mutex_lock(&rdev->ib_pool.mutex);
150 mutex_unlock(&rdev->ib_pool.mutex);
151 radeon_fence_unref(&fence);
154 mutex_lock(&rdev->ib_pool.mutex);
156 radeon_fence_unref(&nib->fence);
159 mutex_unlock(&rdev->ib_pool.mutex);
164 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
166 struct radeon_ib *tmp = *ib;
172 if (!tmp->fence->emitted)
173 radeon_fence_unref(&tmp->fence);
174 mutex_lock(&rdev->ib_pool.mutex);
176 mutex_unlock(&rdev->ib_pool.mutex);
179 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
181 struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
184 if (!ib->length_dw || !cp->ready) {
185 /* TODO: Nothings in the ib we should report. */
186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
190 /* 64 dwords should be enough for fence too */
191 r = radeon_ring_lock(rdev, cp, 64);
193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
196 radeon_ring_ib_execute(rdev, ib);
197 radeon_fence_emit(rdev, ib->fence);
198 mutex_lock(&rdev->ib_pool.mutex);
199 /* once scheduled IB is considered free and protected by the fence */
201 mutex_unlock(&rdev->ib_pool.mutex);
202 radeon_ring_unlock_commit(rdev, cp);
206 int radeon_ib_pool_init(struct radeon_device *rdev)
213 if (rdev->ib_pool.robj)
215 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
216 /* Allocate 1M object buffer */
217 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
218 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
219 &rdev->ib_pool.robj);
221 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
224 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
225 if (unlikely(r != 0))
227 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
229 radeon_bo_unreserve(rdev->ib_pool.robj);
230 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
233 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
234 radeon_bo_unreserve(rdev->ib_pool.robj);
236 DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
239 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
242 offset = i * 64 * 1024;
243 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
244 rdev->ib_pool.ibs[i].ptr = ptr + offset;
245 rdev->ib_pool.ibs[i].idx = i;
246 rdev->ib_pool.ibs[i].length_dw = 0;
247 rdev->ib_pool.ibs[i].free = true;
249 rdev->ib_pool.head_id = 0;
250 rdev->ib_pool.ready = true;
251 DRM_INFO("radeon: ib pool ready.\n");
252 if (radeon_debugfs_ib_init(rdev)) {
253 DRM_ERROR("Failed to register debugfs file for IB !\n");
258 void radeon_ib_pool_fini(struct radeon_device *rdev)
261 struct radeon_bo *robj;
263 if (!rdev->ib_pool.ready) {
266 mutex_lock(&rdev->ib_pool.mutex);
267 radeon_ib_bogus_cleanup(rdev);
268 robj = rdev->ib_pool.robj;
269 rdev->ib_pool.robj = NULL;
270 mutex_unlock(&rdev->ib_pool.mutex);
273 r = radeon_bo_reserve(robj, false);
274 if (likely(r == 0)) {
275 radeon_bo_kunmap(robj);
276 radeon_bo_unpin(robj);
277 radeon_bo_unreserve(robj);
279 radeon_bo_unref(&robj);
287 int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
289 /* r1xx-r5xx only has CP ring */
290 if (rdev->family < CHIP_R600)
291 return RADEON_RING_TYPE_GFX_INDEX;
293 if (rdev->family >= CHIP_CAYMAN) {
294 if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
295 return CAYMAN_RING_TYPE_CP1_INDEX;
296 else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
297 return CAYMAN_RING_TYPE_CP2_INDEX;
299 return RADEON_RING_TYPE_GFX_INDEX;
302 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
304 if (rdev->wb.enabled)
305 cp->rptr = le32_to_cpu(rdev->wb.wb[cp->rptr_offs/4]);
307 cp->rptr = RREG32(cp->rptr_reg);
308 /* This works because ring_size is a power of 2 */
309 cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
310 cp->ring_free_dw -= cp->wptr;
311 cp->ring_free_dw &= cp->ptr_mask;
312 if (!cp->ring_free_dw) {
313 cp->ring_free_dw = cp->ring_size / 4;
318 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
322 /* Align requested size with padding so unlock_commit can
324 ndw = (ndw + cp->align_mask) & ~cp->align_mask;
325 while (ndw > (cp->ring_free_dw - 1)) {
326 radeon_ring_free_size(rdev, cp);
327 if (ndw < cp->ring_free_dw) {
330 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
335 cp->wptr_old = cp->wptr;
339 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
343 mutex_lock(&cp->mutex);
344 r = radeon_ring_alloc(rdev, cp, ndw);
346 mutex_unlock(&cp->mutex);
352 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
354 unsigned count_dw_pad;
357 /* We pad to match fetch size */
358 count_dw_pad = (cp->align_mask + 1) -
359 (cp->wptr & cp->align_mask);
360 for (i = 0; i < count_dw_pad; i++) {
361 radeon_ring_write(cp, 2 << 30);
364 WREG32(cp->wptr_reg, cp->wptr);
365 (void)RREG32(cp->wptr_reg);
368 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
370 radeon_ring_commit(rdev, cp);
371 mutex_unlock(&cp->mutex);
374 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
376 cp->wptr = cp->wptr_old;
377 mutex_unlock(&cp->mutex);
380 int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size,
381 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg)
385 cp->ring_size = ring_size;
386 cp->rptr_offs = rptr_offs;
387 cp->rptr_reg = rptr_reg;
388 cp->wptr_reg = wptr_reg;
389 /* Allocate ring buffer */
390 if (cp->ring_obj == NULL) {
391 r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
392 RADEON_GEM_DOMAIN_GTT,
395 dev_err(rdev->dev, "(%d) ring create failed\n", r);
398 r = radeon_bo_reserve(cp->ring_obj, false);
399 if (unlikely(r != 0))
401 r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
404 radeon_bo_unreserve(cp->ring_obj);
405 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
408 r = radeon_bo_kmap(cp->ring_obj,
410 radeon_bo_unreserve(cp->ring_obj);
412 dev_err(rdev->dev, "(%d) ring map failed\n", r);
416 cp->ptr_mask = (cp->ring_size / 4) - 1;
417 cp->ring_free_dw = cp->ring_size / 4;
421 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
424 struct radeon_bo *ring_obj;
426 mutex_lock(&cp->mutex);
427 ring_obj = cp->ring_obj;
430 mutex_unlock(&cp->mutex);
433 r = radeon_bo_reserve(ring_obj, false);
434 if (likely(r == 0)) {
435 radeon_bo_kunmap(ring_obj);
436 radeon_bo_unpin(ring_obj);
437 radeon_bo_unreserve(ring_obj);
439 radeon_bo_unref(&ring_obj);
446 #if defined(CONFIG_DEBUG_FS)
447 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
449 struct drm_info_node *node = (struct drm_info_node *) m->private;
450 struct radeon_ib *ib = node->info_ent->data;
456 seq_printf(m, "IB %04u\n", ib->idx);
457 seq_printf(m, "IB fence %p\n", ib->fence);
458 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
459 for (i = 0; i < ib->length_dw; i++) {
460 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
465 static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
467 struct drm_info_node *node = (struct drm_info_node *) m->private;
468 struct radeon_device *rdev = node->info_ent->data;
469 struct radeon_ib *ib;
472 mutex_lock(&rdev->ib_pool.mutex);
473 if (list_empty(&rdev->ib_pool.bogus_ib)) {
474 mutex_unlock(&rdev->ib_pool.mutex);
475 seq_printf(m, "no bogus IB recorded\n");
478 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
479 list_del_init(&ib->list);
480 mutex_unlock(&rdev->ib_pool.mutex);
481 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
482 for (i = 0; i < ib->length_dw; i++) {
483 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
490 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
491 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
493 static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
494 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
498 int radeon_debugfs_ib_init(struct radeon_device *rdev)
500 #if defined(CONFIG_DEBUG_FS)
504 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
505 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
508 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
509 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
510 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
511 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
512 radeon_debugfs_ib_list[i].driver_features = 0;
513 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
515 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
516 RADEON_IB_POOL_SIZE);