2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
37 #include <drm/radeon_drm.h>
38 #include <linux/seq_file.h>
39 #include "radeon_reg.h"
42 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
44 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
46 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
48 struct radeon_mman *mman;
49 struct radeon_device *rdev;
51 mman = container_of(bdev, struct radeon_mman, bdev);
52 rdev = container_of(mman, struct radeon_device, mman);
60 static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
62 return ttm_mem_global_init(ref->object);
65 static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
67 ttm_mem_global_release(ref->object);
70 static int radeon_ttm_global_init(struct radeon_device *rdev)
72 struct ttm_global_reference *global_ref;
75 rdev->mman.mem_global_referenced = false;
76 global_ref = &rdev->mman.mem_global_ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78 global_ref->size = sizeof(struct ttm_mem_global);
79 global_ref->init = &radeon_ttm_mem_global_init;
80 global_ref->release = &radeon_ttm_mem_global_release;
81 r = ttm_global_item_ref(global_ref);
83 DRM_ERROR("Failed setting up TTM memory accounting "
88 rdev->mman.bo_global_ref.mem_glob =
89 rdev->mman.mem_global_ref.object;
90 global_ref = &rdev->mman.bo_global_ref.ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_BO;
92 global_ref->size = sizeof(struct ttm_bo_global);
93 global_ref->init = &ttm_bo_global_init;
94 global_ref->release = &ttm_bo_global_release;
95 r = ttm_global_item_ref(global_ref);
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev->mman.mem_global_ref);
102 rdev->mman.mem_global_referenced = true;
106 static void radeon_ttm_global_fini(struct radeon_device *rdev)
108 if (rdev->mman.mem_global_referenced) {
109 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
110 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111 rdev->mman.mem_global_referenced = false;
115 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
117 static struct ttm_backend*
118 radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
120 struct radeon_device *rdev;
122 rdev = radeon_get_rdev(bdev);
124 if (rdev->flags & RADEON_IS_AGP) {
125 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
129 return radeon_ttm_backend_create(rdev);
133 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
138 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139 struct ttm_mem_type_manager *man)
141 struct radeon_device *rdev;
143 rdev = radeon_get_rdev(bdev);
148 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->available_caching = TTM_PL_MASK_CACHING;
150 man->default_caching = TTM_PL_FLAG_CACHED;
153 man->gpu_offset = rdev->mc.gtt_location;
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
158 if (rdev->flags & RADEON_IS_AGP) {
159 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
164 man->io_offset = rdev->mc.agp_base;
165 man->io_size = rdev->mc.gtt_size;
167 if (!rdev->ddev->agp->cant_use_aperture)
168 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169 TTM_MEMTYPE_FLAG_MAPPABLE;
170 man->available_caching = TTM_PL_FLAG_UNCACHED |
172 man->default_caching = TTM_PL_FLAG_WC;
182 /* "On-card" video ram */
183 man->gpu_offset = rdev->mc.vram_location;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE;
187 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188 man->default_caching = TTM_PL_FLAG_WC;
190 man->io_offset = rdev->mc.aper_base;
191 man->io_size = rdev->mc.aper_size;
194 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
200 static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
202 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
204 switch (bo->mem.mem_type) {
206 return (cur_placement & ~TTM_PL_MASK_CACHING) |
212 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
217 static void radeon_move_null(struct ttm_buffer_object *bo,
218 struct ttm_mem_reg *new_mem)
220 struct ttm_mem_reg *old_mem = &bo->mem;
222 BUG_ON(old_mem->mm_node != NULL);
224 new_mem->mm_node = NULL;
227 static int radeon_move_blit(struct ttm_buffer_object *bo,
228 bool evict, int no_wait,
229 struct ttm_mem_reg *new_mem,
230 struct ttm_mem_reg *old_mem)
232 struct radeon_device *rdev;
233 uint64_t old_start, new_start;
234 struct radeon_fence *fence;
237 rdev = radeon_get_rdev(bo->bdev);
238 r = radeon_fence_create(rdev, &fence);
242 old_start = old_mem->mm_node->start << PAGE_SHIFT;
243 new_start = new_mem->mm_node->start << PAGE_SHIFT;
245 switch (old_mem->mem_type) {
247 old_start += rdev->mc.vram_location;
250 old_start += rdev->mc.gtt_location;
253 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
256 switch (new_mem->mem_type) {
258 new_start += rdev->mc.vram_location;
261 new_start += rdev->mc.gtt_location;
264 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
267 if (!rdev->cp.ready) {
268 DRM_ERROR("Trying to move memory with CP turned off.\n");
271 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
272 /* FIXME: handle copy error */
273 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
274 evict, no_wait, new_mem);
275 radeon_fence_unref(&fence);
279 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
280 bool evict, bool interruptible, bool no_wait,
281 struct ttm_mem_reg *new_mem)
283 struct radeon_device *rdev;
284 struct ttm_mem_reg *old_mem = &bo->mem;
285 struct ttm_mem_reg tmp_mem;
286 uint32_t proposed_placement;
289 rdev = radeon_get_rdev(bo->bdev);
291 tmp_mem.mm_node = NULL;
292 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
293 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
294 interruptible, no_wait);
299 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
304 r = ttm_tt_bind(bo->ttm, &tmp_mem);
308 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
312 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
314 if (tmp_mem.mm_node) {
315 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
317 spin_lock(&glob->lru_lock);
318 drm_mm_put_block(tmp_mem.mm_node);
319 spin_unlock(&glob->lru_lock);
325 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
326 bool evict, bool interruptible, bool no_wait,
327 struct ttm_mem_reg *new_mem)
329 struct radeon_device *rdev;
330 struct ttm_mem_reg *old_mem = &bo->mem;
331 struct ttm_mem_reg tmp_mem;
332 uint32_t proposed_flags;
335 rdev = radeon_get_rdev(bo->bdev);
337 tmp_mem.mm_node = NULL;
338 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
339 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
340 interruptible, no_wait);
344 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
348 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
353 if (tmp_mem.mm_node) {
354 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
356 spin_lock(&glob->lru_lock);
357 drm_mm_put_block(tmp_mem.mm_node);
358 spin_unlock(&glob->lru_lock);
364 static int radeon_bo_move(struct ttm_buffer_object *bo,
365 bool evict, bool interruptible, bool no_wait,
366 struct ttm_mem_reg *new_mem)
368 struct radeon_device *rdev;
369 struct ttm_mem_reg *old_mem = &bo->mem;
372 rdev = radeon_get_rdev(bo->bdev);
373 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
374 radeon_move_null(bo, new_mem);
377 if ((old_mem->mem_type == TTM_PL_TT &&
378 new_mem->mem_type == TTM_PL_SYSTEM) ||
379 (old_mem->mem_type == TTM_PL_SYSTEM &&
380 new_mem->mem_type == TTM_PL_TT)) {
381 /* bind is enought */
382 radeon_move_null(bo, new_mem);
385 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
390 if (old_mem->mem_type == TTM_PL_VRAM &&
391 new_mem->mem_type == TTM_PL_SYSTEM) {
392 r = radeon_move_vram_ram(bo, evict, interruptible,
394 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
395 new_mem->mem_type == TTM_PL_VRAM) {
396 r = radeon_move_ram_vram(bo, evict, interruptible,
399 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
404 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
410 const uint32_t radeon_mem_prios[] = {
416 const uint32_t radeon_busy_prios[] = {
422 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
423 bool lazy, bool interruptible)
425 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
428 static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
433 static void radeon_sync_obj_unref(void **sync_obj)
435 radeon_fence_unref((struct radeon_fence **)sync_obj);
438 static void *radeon_sync_obj_ref(void *sync_obj)
440 return radeon_fence_ref((struct radeon_fence *)sync_obj);
443 static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
445 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
448 static struct ttm_bo_driver radeon_bo_driver = {
449 .mem_type_prio = radeon_mem_prios,
450 .mem_busy_prio = radeon_busy_prios,
451 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
452 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
454 .invalidate_caches = &radeon_invalidate_caches,
455 .init_mem_type = &radeon_init_mem_type,
456 .evict_flags = &radeon_evict_flags,
457 .move = &radeon_bo_move,
458 .verify_access = &radeon_verify_access,
459 .sync_obj_signaled = &radeon_sync_obj_signaled,
460 .sync_obj_wait = &radeon_sync_obj_wait,
461 .sync_obj_flush = &radeon_sync_obj_flush,
462 .sync_obj_unref = &radeon_sync_obj_unref,
463 .sync_obj_ref = &radeon_sync_obj_ref,
464 .move_notify = &radeon_bo_move_notify,
465 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
468 int radeon_ttm_init(struct radeon_device *rdev)
472 r = radeon_ttm_global_init(rdev);
476 /* No others user of address space so set it to 0 */
477 r = ttm_bo_device_init(&rdev->mman.bdev,
478 rdev->mman.bo_global_ref.ref.object,
479 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
486 0, rdev->mc.real_vram_size >> PAGE_SHIFT);
488 DRM_ERROR("Failed initializing VRAM heap.\n");
491 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM,
493 &rdev->stollen_vga_memory);
497 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
500 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
501 radeon_bo_unreserve(rdev->stollen_vga_memory);
503 radeon_bo_unref(&rdev->stollen_vga_memory);
506 DRM_INFO("radeon: %uM of VRAM memory ready\n",
507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
509 0, rdev->mc.gtt_size >> PAGE_SHIFT);
511 DRM_ERROR("Failed initializing GTT heap.\n");
514 DRM_INFO("radeon: %uM of GTT memory ready.\n",
515 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
516 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
517 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
520 r = radeon_ttm_debugfs_init(rdev);
522 DRM_ERROR("Failed to init debugfs\n");
528 void radeon_ttm_fini(struct radeon_device *rdev)
532 if (rdev->stollen_vga_memory) {
533 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
535 radeon_bo_unpin(rdev->stollen_vga_memory);
536 radeon_bo_unreserve(rdev->stollen_vga_memory);
538 radeon_bo_unref(&rdev->stollen_vga_memory);
540 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
541 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
542 ttm_bo_device_release(&rdev->mman.bdev);
543 radeon_gart_fini(rdev);
544 radeon_ttm_global_fini(rdev);
545 DRM_INFO("radeon: ttm finalized\n");
548 static struct vm_operations_struct radeon_ttm_vm_ops;
549 static const struct vm_operations_struct *ttm_vm_ops = NULL;
551 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
553 struct ttm_buffer_object *bo;
556 bo = (struct ttm_buffer_object *)vma->vm_private_data;
558 return VM_FAULT_NOPAGE;
560 r = ttm_vm_ops->fault(vma, vmf);
564 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
566 struct drm_file *file_priv;
567 struct radeon_device *rdev;
570 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
571 return drm_mmap(filp, vma);
574 file_priv = (struct drm_file *)filp->private_data;
575 rdev = file_priv->minor->dev->dev_private;
579 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
580 if (unlikely(r != 0)) {
583 if (unlikely(ttm_vm_ops == NULL)) {
584 ttm_vm_ops = vma->vm_ops;
585 radeon_ttm_vm_ops = *ttm_vm_ops;
586 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
588 vma->vm_ops = &radeon_ttm_vm_ops;
594 * TTM backend functions.
596 struct radeon_ttm_backend {
597 struct ttm_backend backend;
598 struct radeon_device *rdev;
599 unsigned long num_pages;
601 struct page *dummy_read_page;
607 static int radeon_ttm_backend_populate(struct ttm_backend *backend,
608 unsigned long num_pages,
610 struct page *dummy_read_page)
612 struct radeon_ttm_backend *gtt;
614 gtt = container_of(backend, struct radeon_ttm_backend, backend);
616 gtt->num_pages = num_pages;
617 gtt->dummy_read_page = dummy_read_page;
618 gtt->populated = true;
622 static void radeon_ttm_backend_clear(struct ttm_backend *backend)
624 struct radeon_ttm_backend *gtt;
626 gtt = container_of(backend, struct radeon_ttm_backend, backend);
629 gtt->dummy_read_page = NULL;
630 gtt->populated = false;
635 static int radeon_ttm_backend_bind(struct ttm_backend *backend,
636 struct ttm_mem_reg *bo_mem)
638 struct radeon_ttm_backend *gtt;
641 gtt = container_of(backend, struct radeon_ttm_backend, backend);
642 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
643 if (!gtt->num_pages) {
644 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
646 r = radeon_gart_bind(gtt->rdev, gtt->offset,
647 gtt->num_pages, gtt->pages);
649 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
650 gtt->num_pages, gtt->offset);
657 static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
659 struct radeon_ttm_backend *gtt;
661 gtt = container_of(backend, struct radeon_ttm_backend, backend);
662 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
667 static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
669 struct radeon_ttm_backend *gtt;
671 gtt = container_of(backend, struct radeon_ttm_backend, backend);
673 radeon_ttm_backend_unbind(backend);
678 static struct ttm_backend_func radeon_backend_func = {
679 .populate = &radeon_ttm_backend_populate,
680 .clear = &radeon_ttm_backend_clear,
681 .bind = &radeon_ttm_backend_bind,
682 .unbind = &radeon_ttm_backend_unbind,
683 .destroy = &radeon_ttm_backend_destroy,
686 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
688 struct radeon_ttm_backend *gtt;
690 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
694 gtt->backend.bdev = &rdev->mman.bdev;
695 gtt->backend.flags = 0;
696 gtt->backend.func = &radeon_backend_func;
700 gtt->dummy_read_page = NULL;
701 gtt->populated = false;
703 return >t->backend;
706 #define RADEON_DEBUGFS_MEM_TYPES 2
708 #if defined(CONFIG_DEBUG_FS)
709 static int radeon_mm_dump_table(struct seq_file *m, void *data)
711 struct drm_info_node *node = (struct drm_info_node *)m->private;
712 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
713 struct drm_device *dev = node->minor->dev;
714 struct radeon_device *rdev = dev->dev_private;
716 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
718 spin_lock(&glob->lru_lock);
719 ret = drm_mm_dump_table(m, mm);
720 spin_unlock(&glob->lru_lock);
725 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
727 #if defined(CONFIG_DEBUG_FS)
728 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
729 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
732 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
734 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
736 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
737 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
738 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
739 radeon_mem_types_list[i].driver_features = 0;
741 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
743 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
746 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);