2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
38 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
41 u32 pg_idx, pg_offset;
45 pg_idx = (idx * 4) / PAGE_SIZE;
46 pg_offset = (idx * 4) % PAGE_SIZE;
48 if (ibc->kpage_idx[0] == pg_idx)
49 return ibc->kpage[0][pg_offset/4];
50 if (ibc->kpage_idx[1] == pg_idx)
51 return ibc->kpage[1][pg_offset/4];
53 new_page = radeon_cs_update_pages(p, pg_idx);
55 p->parser_error = new_page;
59 idx_value = ibc->kpage[new_page][pg_offset/4];
63 void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
66 if (rdev->cp.count_dw <= 0) {
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
70 rdev->cp.ring[rdev->cp.wptr++] = v;
71 rdev->cp.wptr &= rdev->cp.ptr_mask;
73 rdev->cp.ring_free_dw--;
76 void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
78 struct radeon_ib *ib, *n;
80 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
87 void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
89 struct radeon_ib *bib;
91 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
94 bib->ptr = vmalloc(ib->length_dw * 4);
95 if (bib->ptr == NULL) {
99 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
100 bib->length_dw = ib->length_dw;
101 mutex_lock(&rdev->ib_pool.mutex);
102 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
103 mutex_unlock(&rdev->ib_pool.mutex);
109 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
111 struct radeon_fence *fence;
112 struct radeon_ib *nib;
116 r = radeon_fence_create(rdev, &fence);
118 dev_err(rdev->dev, "failed to create fence for new IB\n");
121 mutex_lock(&rdev->ib_pool.mutex);
122 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
123 i &= (RADEON_IB_POOL_SIZE - 1);
124 if (rdev->ib_pool.ibs[i].free) {
125 nib = &rdev->ib_pool.ibs[i];
130 /* This should never happen, it means we allocated all
131 * IB and haven't scheduled one yet, return EBUSY to
132 * userspace hoping that on ioctl recall we get better
135 dev_err(rdev->dev, "no free indirect buffer !\n");
136 mutex_unlock(&rdev->ib_pool.mutex);
137 radeon_fence_unref(&fence);
140 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
143 mutex_unlock(&rdev->ib_pool.mutex);
144 r = radeon_fence_wait(nib->fence, false);
146 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
147 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
148 mutex_lock(&rdev->ib_pool.mutex);
150 mutex_unlock(&rdev->ib_pool.mutex);
151 radeon_fence_unref(&fence);
154 mutex_lock(&rdev->ib_pool.mutex);
156 radeon_fence_unref(&nib->fence);
159 mutex_unlock(&rdev->ib_pool.mutex);
164 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
166 struct radeon_ib *tmp = *ib;
172 if (!tmp->fence->emited)
173 radeon_fence_unref(&tmp->fence);
174 mutex_lock(&rdev->ib_pool.mutex);
176 mutex_unlock(&rdev->ib_pool.mutex);
179 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
183 if (!ib->length_dw || !rdev->cp.ready) {
184 /* TODO: Nothings in the ib we should report. */
185 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
189 /* 64 dwords should be enough for fence too */
190 r = radeon_ring_lock(rdev, 64);
192 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
195 radeon_ring_ib_execute(rdev, ib);
196 radeon_fence_emit(rdev, ib->fence);
197 mutex_lock(&rdev->ib_pool.mutex);
198 /* once scheduled IB is considered free and protected by the fence */
200 mutex_unlock(&rdev->ib_pool.mutex);
201 radeon_ring_unlock_commit(rdev);
205 int radeon_ib_pool_init(struct radeon_device *rdev)
212 if (rdev->ib_pool.robj)
214 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
215 /* Allocate 1M object buffer */
216 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
217 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
218 &rdev->ib_pool.robj);
220 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
223 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
224 if (unlikely(r != 0))
226 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
228 radeon_bo_unreserve(rdev->ib_pool.robj);
229 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
232 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
233 radeon_bo_unreserve(rdev->ib_pool.robj);
235 DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
238 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
241 offset = i * 64 * 1024;
242 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
243 rdev->ib_pool.ibs[i].ptr = ptr + offset;
244 rdev->ib_pool.ibs[i].idx = i;
245 rdev->ib_pool.ibs[i].length_dw = 0;
246 rdev->ib_pool.ibs[i].free = true;
248 rdev->ib_pool.head_id = 0;
249 rdev->ib_pool.ready = true;
250 DRM_INFO("radeon: ib pool ready.\n");
251 if (radeon_debugfs_ib_init(rdev)) {
252 DRM_ERROR("Failed to register debugfs file for IB !\n");
257 void radeon_ib_pool_fini(struct radeon_device *rdev)
260 struct radeon_bo *robj;
262 if (!rdev->ib_pool.ready) {
265 mutex_lock(&rdev->ib_pool.mutex);
266 radeon_ib_bogus_cleanup(rdev);
267 robj = rdev->ib_pool.robj;
268 rdev->ib_pool.robj = NULL;
269 mutex_unlock(&rdev->ib_pool.mutex);
272 r = radeon_bo_reserve(robj, false);
273 if (likely(r == 0)) {
274 radeon_bo_kunmap(robj);
275 radeon_bo_unpin(robj);
276 radeon_bo_unreserve(robj);
278 radeon_bo_unref(&robj);
286 void radeon_ring_free_size(struct radeon_device *rdev)
288 if (rdev->wb.enabled)
289 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
291 if (rdev->family >= CHIP_R600)
292 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
294 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
296 /* This works because ring_size is a power of 2 */
297 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
298 rdev->cp.ring_free_dw -= rdev->cp.wptr;
299 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
300 if (!rdev->cp.ring_free_dw) {
301 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
305 int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
309 /* Align requested size with padding so unlock_commit can
311 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
312 while (ndw > (rdev->cp.ring_free_dw - 1)) {
313 radeon_ring_free_size(rdev);
314 if (ndw < rdev->cp.ring_free_dw) {
317 r = radeon_fence_wait_next(rdev);
321 rdev->cp.count_dw = ndw;
322 rdev->cp.wptr_old = rdev->cp.wptr;
326 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
330 mutex_lock(&rdev->cp.mutex);
331 r = radeon_ring_alloc(rdev, ndw);
333 mutex_unlock(&rdev->cp.mutex);
339 void radeon_ring_commit(struct radeon_device *rdev)
341 unsigned count_dw_pad;
344 /* We pad to match fetch size */
345 count_dw_pad = (rdev->cp.align_mask + 1) -
346 (rdev->cp.wptr & rdev->cp.align_mask);
347 for (i = 0; i < count_dw_pad; i++) {
348 radeon_ring_write(rdev, 2 << 30);
351 radeon_cp_commit(rdev);
354 void radeon_ring_unlock_commit(struct radeon_device *rdev)
356 radeon_ring_commit(rdev);
357 mutex_unlock(&rdev->cp.mutex);
360 void radeon_ring_unlock_undo(struct radeon_device *rdev)
362 rdev->cp.wptr = rdev->cp.wptr_old;
363 mutex_unlock(&rdev->cp.mutex);
366 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
370 rdev->cp.ring_size = ring_size;
371 /* Allocate ring buffer */
372 if (rdev->cp.ring_obj == NULL) {
373 r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
374 RADEON_GEM_DOMAIN_GTT,
377 dev_err(rdev->dev, "(%d) ring create failed\n", r);
380 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
381 if (unlikely(r != 0))
383 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
386 radeon_bo_unreserve(rdev->cp.ring_obj);
387 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
390 r = radeon_bo_kmap(rdev->cp.ring_obj,
391 (void **)&rdev->cp.ring);
392 radeon_bo_unreserve(rdev->cp.ring_obj);
394 dev_err(rdev->dev, "(%d) ring map failed\n", r);
398 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
399 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
403 void radeon_ring_fini(struct radeon_device *rdev)
406 struct radeon_bo *ring_obj;
408 mutex_lock(&rdev->cp.mutex);
409 ring_obj = rdev->cp.ring_obj;
410 rdev->cp.ring = NULL;
411 rdev->cp.ring_obj = NULL;
412 mutex_unlock(&rdev->cp.mutex);
415 r = radeon_bo_reserve(ring_obj, false);
416 if (likely(r == 0)) {
417 radeon_bo_kunmap(ring_obj);
418 radeon_bo_unpin(ring_obj);
419 radeon_bo_unreserve(ring_obj);
421 radeon_bo_unref(&ring_obj);
429 #if defined(CONFIG_DEBUG_FS)
430 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
432 struct drm_info_node *node = (struct drm_info_node *) m->private;
433 struct radeon_ib *ib = node->info_ent->data;
439 seq_printf(m, "IB %04u\n", ib->idx);
440 seq_printf(m, "IB fence %p\n", ib->fence);
441 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
442 for (i = 0; i < ib->length_dw; i++) {
443 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
448 static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
450 struct drm_info_node *node = (struct drm_info_node *) m->private;
451 struct radeon_device *rdev = node->info_ent->data;
452 struct radeon_ib *ib;
455 mutex_lock(&rdev->ib_pool.mutex);
456 if (list_empty(&rdev->ib_pool.bogus_ib)) {
457 mutex_unlock(&rdev->ib_pool.mutex);
458 seq_printf(m, "no bogus IB recorded\n");
461 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
462 list_del_init(&ib->list);
463 mutex_unlock(&rdev->ib_pool.mutex);
464 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
465 for (i = 0; i < ib->length_dw; i++) {
466 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
473 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
474 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
476 static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
477 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
481 int radeon_debugfs_ib_init(struct radeon_device *rdev)
483 #if defined(CONFIG_DEBUG_FS)
487 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
488 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
491 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
492 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
493 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
494 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
495 radeon_debugfs_ib_list[i].driver_features = 0;
496 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
498 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
499 RADEON_IB_POOL_SIZE);