1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42 ttm_bo_mem_put(bo, &bo->mem);
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58 old_mem->mem_type = TTM_PL_SYSTEM;
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
72 new_mem->mm_node = NULL;
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
78 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
82 if (!mem->bus.io_reserved) {
83 mem->bus.io_reserved = true;
84 ret = bdev->driver->io_mem_reserve(bdev, mem);
85 if (unlikely(ret != 0))
91 void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
93 if (bdev->driver->io_mem_reserve) {
94 if (mem->bus.io_reserved) {
95 mem->bus.io_reserved = false;
96 bdev->driver->io_mem_free(bdev, mem);
101 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
108 ret = ttm_mem_io_reserve(bdev, mem);
109 if (ret || !mem->bus.is_iomem)
113 addr = mem->bus.addr;
115 if (mem->placement & TTM_PL_FLAG_WC)
116 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
120 ttm_mem_io_free(bdev, mem);
128 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
131 struct ttm_mem_type_manager *man;
133 man = &bdev->man[mem->mem_type];
135 if (virtual && mem->bus.addr == NULL)
137 ttm_mem_io_free(bdev, mem);
140 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
143 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
145 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
148 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
149 iowrite32(ioread32(srcP++), dstP++);
153 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
157 struct page *d = ttm_tt_get_page(ttm, page);
163 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
166 dst = kmap_atomic_prot(d, prot);
168 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
169 dst = vmap(&d, 1, 0, prot);
176 memcpy_fromio(dst, src, PAGE_SIZE);
181 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
194 struct page *s = ttm_tt_get_page(ttm, page);
200 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
202 src = kmap_atomic_prot(s, prot);
204 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
205 src = vmap(&s, 1, 0, prot);
212 memcpy_toio(dst, src, PAGE_SIZE);
217 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
226 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
227 bool evict, bool no_wait_reserve, bool no_wait_gpu,
228 struct ttm_mem_reg *new_mem)
230 struct ttm_bo_device *bdev = bo->bdev;
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem;
240 unsigned long add = 0;
243 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
246 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
250 if (old_iomap == NULL && new_iomap == NULL)
252 if (old_iomap == NULL && ttm == NULL)
258 if ((old_mem->mem_type == new_mem->mem_type) &&
259 (new_mem->start < old_mem->start + old_mem->size)) {
261 add = new_mem->num_pages - 1;
264 for (i = 0; i < new_mem->num_pages; ++i) {
265 page = i * dir + add;
266 if (old_iomap == NULL) {
267 pgprot_t prot = ttm_io_prot(old_mem->placement,
269 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
271 } else if (new_iomap == NULL) {
272 pgprot_t prot = ttm_io_prot(new_mem->placement,
274 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
277 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
283 ttm_bo_free_old_node(bo);
286 new_mem->mm_node = NULL;
288 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
295 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
297 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
300 EXPORT_SYMBOL(ttm_bo_move_memcpy);
302 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
308 * ttm_buffer_object_transfer
310 * @bo: A pointer to a struct ttm_buffer_object.
311 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
312 * holding the data of @bo with the old placement.
314 * This is a utility function that may be called after an accelerated move
315 * has been scheduled. A new buffer object is created as a placeholder for
316 * the old data while it's being copied. When that buffer object is idle,
317 * it can be destroyed, releasing the space of the old placement.
322 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
323 struct ttm_buffer_object **new_obj)
325 struct ttm_buffer_object *fbo;
326 struct ttm_bo_device *bdev = bo->bdev;
327 struct ttm_bo_driver *driver = bdev->driver;
329 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
336 * Fix up members that we shouldn't copy directly:
337 * TODO: Explicit member copy would probably be better here.
340 spin_lock_init(&fbo->lock);
341 init_waitqueue_head(&fbo->event_queue);
342 INIT_LIST_HEAD(&fbo->ddestroy);
343 INIT_LIST_HEAD(&fbo->lru);
344 INIT_LIST_HEAD(&fbo->swap);
346 atomic_set(&fbo->cpu_writers, 0);
348 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
349 kref_init(&fbo->list_kref);
350 kref_init(&fbo->kref);
351 fbo->destroy = &ttm_transfered_destroy;
357 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
359 #if defined(__i386__) || defined(__x86_64__)
360 if (caching_flags & TTM_PL_FLAG_WC)
361 tmp = pgprot_writecombine(tmp);
362 else if (boot_cpu_data.x86 > 3)
363 tmp = pgprot_noncached(tmp);
365 #elif defined(__powerpc__)
366 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
367 pgprot_val(tmp) |= _PAGE_NO_CACHE;
368 if (caching_flags & TTM_PL_FLAG_UNCACHED)
369 pgprot_val(tmp) |= _PAGE_GUARDED;
372 #if defined(__ia64__)
373 if (caching_flags & TTM_PL_FLAG_WC)
374 tmp = pgprot_writecombine(tmp);
376 tmp = pgprot_noncached(tmp);
378 #if defined(__sparc__)
379 if (!(caching_flags & TTM_PL_FLAG_CACHED))
380 tmp = pgprot_noncached(tmp);
384 EXPORT_SYMBOL(ttm_io_prot);
386 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
387 unsigned long offset,
389 struct ttm_bo_kmap_obj *map)
391 struct ttm_mem_reg *mem = &bo->mem;
393 if (bo->mem.bus.addr) {
394 map->bo_kmap_type = ttm_bo_map_premapped;
395 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
397 map->bo_kmap_type = ttm_bo_map_iomap;
398 if (mem->placement & TTM_PL_FLAG_WC)
399 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
402 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
405 return (!map->virtual) ? -ENOMEM : 0;
408 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
409 unsigned long start_page,
410 unsigned long num_pages,
411 struct ttm_bo_kmap_obj *map)
413 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
414 struct ttm_tt *ttm = bo->ttm;
419 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
421 * We're mapping a single page, and the desired
422 * page protection is consistent with the bo.
425 map->bo_kmap_type = ttm_bo_map_kmap;
426 map->page = ttm_tt_get_page(ttm, start_page);
427 map->virtual = kmap(map->page);
430 * Populate the part we're mapping;
432 for (i = start_page; i < start_page + num_pages; ++i) {
433 d = ttm_tt_get_page(ttm, i);
439 * We need to use vmap to get the desired page protection
440 * or to make the buffer object look contiguous.
442 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
444 ttm_io_prot(mem->placement, PAGE_KERNEL);
445 map->bo_kmap_type = ttm_bo_map_vmap;
446 map->virtual = vmap(ttm->pages + start_page, num_pages,
449 return (!map->virtual) ? -ENOMEM : 0;
452 int ttm_bo_kmap(struct ttm_buffer_object *bo,
453 unsigned long start_page, unsigned long num_pages,
454 struct ttm_bo_kmap_obj *map)
456 unsigned long offset, size;
459 BUG_ON(!list_empty(&bo->swap));
462 if (num_pages > bo->num_pages)
464 if (start_page > bo->num_pages)
467 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
470 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
473 if (!bo->mem.bus.is_iomem) {
474 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
476 offset = start_page << PAGE_SHIFT;
477 size = num_pages << PAGE_SHIFT;
478 return ttm_bo_ioremap(bo, offset, size, map);
481 EXPORT_SYMBOL(ttm_bo_kmap);
483 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
487 switch (map->bo_kmap_type) {
488 case ttm_bo_map_iomap:
489 iounmap(map->virtual);
490 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
492 case ttm_bo_map_vmap:
493 vunmap(map->virtual);
495 case ttm_bo_map_kmap:
498 case ttm_bo_map_premapped:
506 EXPORT_SYMBOL(ttm_bo_kunmap);
508 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
511 bool evict, bool no_wait_reserve,
513 struct ttm_mem_reg *new_mem)
515 struct ttm_bo_device *bdev = bo->bdev;
516 struct ttm_bo_driver *driver = bdev->driver;
517 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
518 struct ttm_mem_reg *old_mem = &bo->mem;
520 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL;
523 spin_lock(&bo->lock);
525 tmp_obj = bo->sync_obj;
528 bo->sync_obj = driver->sync_obj_ref(sync_obj);
529 bo->sync_obj_arg = sync_obj_arg;
531 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock);
534 driver->sync_obj_unref(&tmp_obj);
538 ttm_bo_free_old_node(bo);
539 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
541 ttm_tt_unbind(bo->ttm);
542 ttm_tt_destroy(bo->ttm);
547 * This should help pipeline ordinary buffer moves.
549 * Hang old buffer memory on a new buffer object,
550 * and leave it to be released when the GPU
551 * operation has completed.
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock);
557 driver->sync_obj_unref(&tmp_obj);
559 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
564 * If we're not moving to fixed memory, the TTM object
565 * needs to stay alive. Otherwhise hang it on the ghost
566 * bo to be unbound and destroyed.
569 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
570 ghost_obj->ttm = NULL;
574 ttm_bo_unreserve(ghost_obj);
575 ttm_bo_unref(&ghost_obj);
579 new_mem->mm_node = NULL;
583 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);