2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <core/engine.h>
31 #include <linux/swiotlb.h>
33 #include <subdev/fb.h>
34 #include <subdev/vm.h>
35 #include <subdev/bar.h>
37 #include "nouveau_drm.h"
38 #include "nouveau_dma.h"
39 #include "nouveau_fence.h"
41 #include "nouveau_bo.h"
42 #include "nouveau_ttm.h"
43 #include "nouveau_gem.h"
46 * NV10-NV40 tiling helpers
50 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51 u32 addr, u32 size, u32 pitch, u32 flags)
53 struct nouveau_drm *drm = nouveau_drm(dev);
54 int i = reg - drm->tile.reg;
55 struct nouveau_fb *pfb = nouveau_fb(drm->device);
56 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine;
59 nouveau_fence_unref(®->fence);
62 pfb->tile.fini(pfb, i, tile);
65 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
67 pfb->tile.prog(pfb, i, tile);
69 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70 engine->tile_prog(engine, i);
71 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72 engine->tile_prog(engine, i);
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
78 struct nouveau_drm *drm = nouveau_drm(dev);
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
81 spin_lock(&drm->tile.lock);
84 (!tile->fence || nouveau_fence_done(tile->fence)))
89 spin_unlock(&drm->tile.lock);
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct nouveau_fence *fence)
97 struct nouveau_drm *drm = nouveau_drm(dev);
100 spin_lock(&drm->tile.lock);
101 tile->fence = nouveau_fence_ref(fence);
103 spin_unlock(&drm->tile.lock);
107 static struct nouveau_drm_tile *
108 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 flags)
111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nouveau_fb *pfb = nouveau_fb(drm->device);
113 struct nouveau_drm_tile *tile, *found = NULL;
116 for (i = 0; i < pfb->tile.regions; i++) {
117 tile = nv10_bo_get_tile_region(dev, i);
119 if (pitch && !found) {
123 } else if (tile && pfb->tile.region[i].pitch) {
124 /* Kill an unused tile region. */
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
128 nv10_bo_put_tile_region(dev, tile, NULL);
132 nv10_bo_update_tile_region(dev, found, addr, size,
138 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
140 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
141 struct drm_device *dev = drm->dev;
142 struct nouveau_bo *nvbo = nouveau_bo(bo);
144 if (unlikely(nvbo->gem.filp))
145 DRM_ERROR("bo %p still attached to GEM object\n", bo);
146 WARN_ON(nvbo->pin_refcnt > 0);
147 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
152 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
153 int *align, int *size)
155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
156 struct nouveau_device *device = nv_device(drm->device);
158 if (device->card_type < NV_50) {
159 if (nvbo->tile_mode) {
160 if (device->chipset >= 0x40) {
162 *size = roundup(*size, 64 * nvbo->tile_mode);
164 } else if (device->chipset >= 0x30) {
166 *size = roundup(*size, 64 * nvbo->tile_mode);
168 } else if (device->chipset >= 0x20) {
170 *size = roundup(*size, 64 * nvbo->tile_mode);
172 } else if (device->chipset >= 0x10) {
174 *size = roundup(*size, 32 * nvbo->tile_mode);
178 *size = roundup(*size, (1 << nvbo->page_shift));
179 *align = max((1 << nvbo->page_shift), *align);
182 *size = roundup(*size, PAGE_SIZE);
186 nouveau_bo_new(struct drm_device *dev, int size, int align,
187 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
189 struct nouveau_bo **pnvbo)
191 struct nouveau_drm *drm = nouveau_drm(dev);
192 struct nouveau_bo *nvbo;
195 int type = ttm_bo_type_device;
199 if (drm->client.base.vm)
200 lpg_shift = drm->client.base.vm->vmm->lpg_shift;
201 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
203 if (size <= 0 || size > max_size) {
204 nv_warn(drm, "skipped size %x\n", (u32)size);
209 type = ttm_bo_type_sg;
211 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
214 INIT_LIST_HEAD(&nvbo->head);
215 INIT_LIST_HEAD(&nvbo->entry);
216 INIT_LIST_HEAD(&nvbo->vma_list);
217 nvbo->tile_mode = tile_mode;
218 nvbo->tile_flags = tile_flags;
219 nvbo->bo.bdev = &drm->ttm.bdev;
221 nvbo->page_shift = 12;
222 if (drm->client.base.vm) {
223 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
224 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
227 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
228 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
229 nouveau_bo_placement_set(nvbo, flags, 0);
231 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
232 sizeof(struct nouveau_bo));
234 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
235 type, &nvbo->placement,
236 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
239 /* ttm will call nouveau_bo_del_ttm if it fails.. */
248 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
252 if (type & TTM_PL_FLAG_VRAM)
253 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
254 if (type & TTM_PL_FLAG_TT)
255 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
256 if (type & TTM_PL_FLAG_SYSTEM)
257 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
261 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
263 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
264 struct nouveau_fb *pfb = nouveau_fb(drm->device);
265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
267 if ((nv_device(drm->device)->card_type == NV_10 ||
268 nv_device(drm->device)->card_type == NV_11) &&
269 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
270 nvbo->bo.mem.num_pages < vram_pages / 4) {
272 * Make sure that the color and depth buffers are handled
273 * by independent memory controller units. Up to a 9x
274 * speed up when alpha-blending and depth-test are enabled
277 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
278 nvbo->placement.fpfn = vram_pages / 2;
279 nvbo->placement.lpfn = ~0;
281 nvbo->placement.fpfn = 0;
282 nvbo->placement.lpfn = vram_pages / 2;
288 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
290 struct ttm_placement *pl = &nvbo->placement;
291 uint32_t flags = TTM_PL_MASK_CACHING |
292 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
294 pl->placement = nvbo->placements;
295 set_placement_list(nvbo->placements, &pl->num_placement,
298 pl->busy_placement = nvbo->busy_placements;
299 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
302 set_placement_range(nvbo, type);
306 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
308 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
309 struct ttm_buffer_object *bo = &nvbo->bo;
312 ret = ttm_bo_reserve(bo, false, false, false, 0);
316 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
317 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
318 1 << bo->mem.mem_type, memtype);
323 if (nvbo->pin_refcnt++)
326 nouveau_bo_placement_set(nvbo, memtype, 0);
328 ret = nouveau_bo_validate(nvbo, false, false);
330 switch (bo->mem.mem_type) {
332 drm->gem.vram_available -= bo->mem.size;
335 drm->gem.gart_available -= bo->mem.size;
342 ttm_bo_unreserve(bo);
347 nouveau_bo_unpin(struct nouveau_bo *nvbo)
349 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
350 struct ttm_buffer_object *bo = &nvbo->bo;
353 ret = ttm_bo_reserve(bo, false, false, false, 0);
357 ref = --nvbo->pin_refcnt;
358 WARN_ON_ONCE(ref < 0);
362 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
364 ret = nouveau_bo_validate(nvbo, false, false);
366 switch (bo->mem.mem_type) {
368 drm->gem.vram_available += bo->mem.size;
371 drm->gem.gart_available += bo->mem.size;
379 ttm_bo_unreserve(bo);
384 nouveau_bo_map(struct nouveau_bo *nvbo)
388 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
392 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
393 ttm_bo_unreserve(&nvbo->bo);
398 nouveau_bo_unmap(struct nouveau_bo *nvbo)
401 ttm_bo_kunmap(&nvbo->kmap);
405 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
410 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
411 interruptible, no_wait_gpu);
419 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
422 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
425 return ioread16_native((void __force __iomem *)mem);
431 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
434 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
437 iowrite16_native(val, (void __force __iomem *)mem);
443 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
446 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
449 return ioread32_native((void __force __iomem *)mem);
455 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
458 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
461 iowrite32_native(val, (void __force __iomem *)mem);
466 static struct ttm_tt *
467 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
468 uint32_t page_flags, struct page *dummy_read)
471 struct nouveau_drm *drm = nouveau_bdev(bdev);
472 struct drm_device *dev = drm->dev;
474 if (drm->agp.stat == ENABLED) {
475 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
476 page_flags, dummy_read);
480 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
484 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
486 /* We'll do this from user space. */
491 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
492 struct ttm_mem_type_manager *man)
494 struct nouveau_drm *drm = nouveau_bdev(bdev);
498 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
499 man->available_caching = TTM_PL_MASK_CACHING;
500 man->default_caching = TTM_PL_FLAG_CACHED;
503 if (nv_device(drm->device)->card_type >= NV_50) {
504 man->func = &nouveau_vram_manager;
505 man->io_reserve_fastpath = false;
506 man->use_io_reserve_lru = true;
508 man->func = &ttm_bo_manager_func;
510 man->flags = TTM_MEMTYPE_FLAG_FIXED |
511 TTM_MEMTYPE_FLAG_MAPPABLE;
512 man->available_caching = TTM_PL_FLAG_UNCACHED |
514 man->default_caching = TTM_PL_FLAG_WC;
517 if (nv_device(drm->device)->card_type >= NV_50)
518 man->func = &nouveau_gart_manager;
520 if (drm->agp.stat != ENABLED)
521 man->func = &nv04_gart_manager;
523 man->func = &ttm_bo_manager_func;
525 if (drm->agp.stat == ENABLED) {
526 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
527 man->available_caching = TTM_PL_FLAG_UNCACHED |
529 man->default_caching = TTM_PL_FLAG_WC;
531 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
532 TTM_MEMTYPE_FLAG_CMA;
533 man->available_caching = TTM_PL_MASK_CACHING;
534 man->default_caching = TTM_PL_FLAG_CACHED;
545 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
547 struct nouveau_bo *nvbo = nouveau_bo(bo);
549 switch (bo->mem.mem_type) {
551 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
555 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
559 *pl = nvbo->placement;
563 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
564 * TTM_PL_{VRAM,TT} directly.
568 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
569 struct nouveau_bo *nvbo, bool evict,
570 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
572 struct nouveau_fence *fence = NULL;
575 ret = nouveau_fence_new(chan, false, &fence);
579 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
580 no_wait_gpu, new_mem);
581 nouveau_fence_unref(&fence);
586 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
588 int ret = RING_SPACE(chan, 2);
590 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
591 OUT_RING (chan, handle & 0x0000ffff);
598 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
599 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
601 struct nouveau_mem *node = old_mem->mm_node;
602 int ret = RING_SPACE(chan, 10);
604 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
605 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
606 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
607 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
608 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
609 OUT_RING (chan, PAGE_SIZE);
610 OUT_RING (chan, PAGE_SIZE);
611 OUT_RING (chan, PAGE_SIZE);
612 OUT_RING (chan, new_mem->num_pages);
613 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
619 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
621 int ret = RING_SPACE(chan, 2);
623 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
624 OUT_RING (chan, handle);
630 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
631 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
633 struct nouveau_mem *node = old_mem->mm_node;
634 u64 src_offset = node->vma[0].offset;
635 u64 dst_offset = node->vma[1].offset;
636 u32 page_count = new_mem->num_pages;
639 page_count = new_mem->num_pages;
641 int line_count = (page_count > 8191) ? 8191 : page_count;
643 ret = RING_SPACE(chan, 11);
647 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
648 OUT_RING (chan, upper_32_bits(src_offset));
649 OUT_RING (chan, lower_32_bits(src_offset));
650 OUT_RING (chan, upper_32_bits(dst_offset));
651 OUT_RING (chan, lower_32_bits(dst_offset));
652 OUT_RING (chan, PAGE_SIZE);
653 OUT_RING (chan, PAGE_SIZE);
654 OUT_RING (chan, PAGE_SIZE);
655 OUT_RING (chan, line_count);
656 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
657 OUT_RING (chan, 0x00000110);
659 page_count -= line_count;
660 src_offset += (PAGE_SIZE * line_count);
661 dst_offset += (PAGE_SIZE * line_count);
668 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
669 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
671 struct nouveau_mem *node = old_mem->mm_node;
672 u64 src_offset = node->vma[0].offset;
673 u64 dst_offset = node->vma[1].offset;
674 u32 page_count = new_mem->num_pages;
677 page_count = new_mem->num_pages;
679 int line_count = (page_count > 2047) ? 2047 : page_count;
681 ret = RING_SPACE(chan, 12);
685 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
686 OUT_RING (chan, upper_32_bits(dst_offset));
687 OUT_RING (chan, lower_32_bits(dst_offset));
688 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
689 OUT_RING (chan, upper_32_bits(src_offset));
690 OUT_RING (chan, lower_32_bits(src_offset));
691 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
692 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
693 OUT_RING (chan, PAGE_SIZE); /* line_length */
694 OUT_RING (chan, line_count);
695 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
696 OUT_RING (chan, 0x00100110);
698 page_count -= line_count;
699 src_offset += (PAGE_SIZE * line_count);
700 dst_offset += (PAGE_SIZE * line_count);
707 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
708 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
710 struct nouveau_mem *node = old_mem->mm_node;
711 u64 src_offset = node->vma[0].offset;
712 u64 dst_offset = node->vma[1].offset;
713 u32 page_count = new_mem->num_pages;
716 page_count = new_mem->num_pages;
718 int line_count = (page_count > 8191) ? 8191 : page_count;
720 ret = RING_SPACE(chan, 11);
724 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
725 OUT_RING (chan, upper_32_bits(src_offset));
726 OUT_RING (chan, lower_32_bits(src_offset));
727 OUT_RING (chan, upper_32_bits(dst_offset));
728 OUT_RING (chan, lower_32_bits(dst_offset));
729 OUT_RING (chan, PAGE_SIZE);
730 OUT_RING (chan, PAGE_SIZE);
731 OUT_RING (chan, PAGE_SIZE);
732 OUT_RING (chan, line_count);
733 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
734 OUT_RING (chan, 0x00000110);
736 page_count -= line_count;
737 src_offset += (PAGE_SIZE * line_count);
738 dst_offset += (PAGE_SIZE * line_count);
745 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
746 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
748 struct nouveau_mem *node = old_mem->mm_node;
749 int ret = RING_SPACE(chan, 7);
751 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
752 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
753 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
754 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
755 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
756 OUT_RING (chan, 0x00000000 /* COPY */);
757 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
763 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
764 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
766 struct nouveau_mem *node = old_mem->mm_node;
767 int ret = RING_SPACE(chan, 7);
769 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
770 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
771 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
772 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
773 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
774 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
775 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
781 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
783 int ret = RING_SPACE(chan, 6);
785 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
786 OUT_RING (chan, handle);
787 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
788 OUT_RING (chan, NvNotify0);
789 OUT_RING (chan, NvDmaFB);
790 OUT_RING (chan, NvDmaFB);
797 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
798 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
800 struct nouveau_mem *node = old_mem->mm_node;
801 struct nouveau_bo *nvbo = nouveau_bo(bo);
802 u64 length = (new_mem->num_pages << PAGE_SHIFT);
803 u64 src_offset = node->vma[0].offset;
804 u64 dst_offset = node->vma[1].offset;
808 u32 amount, stride, height;
810 amount = min(length, (u64)(4 * 1024 * 1024));
812 height = amount / stride;
814 if (old_mem->mem_type == TTM_PL_VRAM &&
815 nouveau_bo_tile_layout(nvbo)) {
816 ret = RING_SPACE(chan, 8);
820 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
823 OUT_RING (chan, stride);
824 OUT_RING (chan, height);
829 ret = RING_SPACE(chan, 2);
833 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
836 if (new_mem->mem_type == TTM_PL_VRAM &&
837 nouveau_bo_tile_layout(nvbo)) {
838 ret = RING_SPACE(chan, 8);
842 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
845 OUT_RING (chan, stride);
846 OUT_RING (chan, height);
851 ret = RING_SPACE(chan, 2);
855 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
859 ret = RING_SPACE(chan, 14);
863 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
864 OUT_RING (chan, upper_32_bits(src_offset));
865 OUT_RING (chan, upper_32_bits(dst_offset));
866 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
867 OUT_RING (chan, lower_32_bits(src_offset));
868 OUT_RING (chan, lower_32_bits(dst_offset));
869 OUT_RING (chan, stride);
870 OUT_RING (chan, stride);
871 OUT_RING (chan, stride);
872 OUT_RING (chan, height);
873 OUT_RING (chan, 0x00000101);
874 OUT_RING (chan, 0x00000000);
875 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
879 src_offset += amount;
880 dst_offset += amount;
887 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
889 int ret = RING_SPACE(chan, 4);
891 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
892 OUT_RING (chan, handle);
893 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
894 OUT_RING (chan, NvNotify0);
900 static inline uint32_t
901 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
902 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
904 if (mem->mem_type == TTM_PL_TT)
910 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
911 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
913 u32 src_offset = old_mem->start << PAGE_SHIFT;
914 u32 dst_offset = new_mem->start << PAGE_SHIFT;
915 u32 page_count = new_mem->num_pages;
918 ret = RING_SPACE(chan, 3);
922 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
923 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
924 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
926 page_count = new_mem->num_pages;
928 int line_count = (page_count > 2047) ? 2047 : page_count;
930 ret = RING_SPACE(chan, 11);
934 BEGIN_NV04(chan, NvSubCopy,
935 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
936 OUT_RING (chan, src_offset);
937 OUT_RING (chan, dst_offset);
938 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
939 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
940 OUT_RING (chan, PAGE_SIZE); /* line_length */
941 OUT_RING (chan, line_count);
942 OUT_RING (chan, 0x00000101);
943 OUT_RING (chan, 0x00000000);
944 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
947 page_count -= line_count;
948 src_offset += (PAGE_SIZE * line_count);
949 dst_offset += (PAGE_SIZE * line_count);
956 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
957 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
959 struct nouveau_mem *node = mem->mm_node;
962 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
963 PAGE_SHIFT, node->page_shift,
964 NV_MEM_ACCESS_RW, vma);
968 if (mem->mem_type == TTM_PL_VRAM)
969 nouveau_vm_map(vma, node);
971 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
977 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
978 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
980 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
981 struct nouveau_channel *chan = drm->ttm.chan;
982 struct nouveau_bo *nvbo = nouveau_bo(bo);
983 struct ttm_mem_reg *old_mem = &bo->mem;
986 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
988 /* create temporary vmas for the transfer and attach them to the
989 * old nouveau_mem node, these will get cleaned up after ttm has
990 * destroyed the ttm_mem_reg
992 if (nv_device(drm->device)->card_type >= NV_50) {
993 struct nouveau_mem *node = old_mem->mm_node;
995 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
999 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
1004 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1006 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
1007 no_wait_gpu, new_mem);
1011 mutex_unlock(&chan->cli->mutex);
1016 nouveau_bo_move_init(struct nouveau_drm *drm)
1018 static const struct {
1022 int (*exec)(struct nouveau_channel *,
1023 struct ttm_buffer_object *,
1024 struct ttm_mem_reg *, struct ttm_mem_reg *);
1025 int (*init)(struct nouveau_channel *, u32 handle);
1027 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1028 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1029 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1030 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1031 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1032 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1033 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1034 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1035 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1037 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1038 }, *mthd = _methods;
1039 const char *name = "CPU";
1043 struct nouveau_object *object;
1044 struct nouveau_channel *chan;
1045 u32 handle = (mthd->engine << 16) | mthd->oclass;
1050 chan = drm->channel;
1054 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1055 mthd->oclass, NULL, 0, &object);
1057 ret = mthd->init(chan, handle);
1059 nouveau_object_del(nv_object(drm),
1060 chan->handle, handle);
1064 drm->ttm.move = mthd->exec;
1065 drm->ttm.chan = chan;
1069 } while ((++mthd)->exec);
1071 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1075 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1076 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1078 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1079 struct ttm_placement placement;
1080 struct ttm_mem_reg tmp_mem;
1083 placement.fpfn = placement.lpfn = 0;
1084 placement.num_placement = placement.num_busy_placement = 1;
1085 placement.placement = placement.busy_placement = &placement_memtype;
1088 tmp_mem.mm_node = NULL;
1089 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1093 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1097 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1101 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1103 ttm_bo_mem_put(bo, &tmp_mem);
1108 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1109 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1111 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1112 struct ttm_placement placement;
1113 struct ttm_mem_reg tmp_mem;
1116 placement.fpfn = placement.lpfn = 0;
1117 placement.num_placement = placement.num_busy_placement = 1;
1118 placement.placement = placement.busy_placement = &placement_memtype;
1121 tmp_mem.mm_node = NULL;
1122 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1126 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1130 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1135 ttm_bo_mem_put(bo, &tmp_mem);
1140 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1142 struct nouveau_bo *nvbo = nouveau_bo(bo);
1143 struct nouveau_vma *vma;
1145 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1146 if (bo->destroy != nouveau_bo_del_ttm)
1149 list_for_each_entry(vma, &nvbo->vma_list, head) {
1150 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1151 nouveau_vm_map(vma, new_mem->mm_node);
1153 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1154 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1155 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1156 nouveau_vm_map_sg_table(vma, 0, new_mem->
1157 num_pages << PAGE_SHIFT,
1160 nouveau_vm_map_sg(vma, 0, new_mem->
1161 num_pages << PAGE_SHIFT,
1164 nouveau_vm_unmap(vma);
1170 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1171 struct nouveau_drm_tile **new_tile)
1173 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1174 struct drm_device *dev = drm->dev;
1175 struct nouveau_bo *nvbo = nouveau_bo(bo);
1176 u64 offset = new_mem->start << PAGE_SHIFT;
1179 if (new_mem->mem_type != TTM_PL_VRAM)
1182 if (nv_device(drm->device)->card_type >= NV_10) {
1183 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1192 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1193 struct nouveau_drm_tile *new_tile,
1194 struct nouveau_drm_tile **old_tile)
1196 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1197 struct drm_device *dev = drm->dev;
1199 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1200 *old_tile = new_tile;
1204 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1205 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1207 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1208 struct nouveau_bo *nvbo = nouveau_bo(bo);
1209 struct ttm_mem_reg *old_mem = &bo->mem;
1210 struct nouveau_drm_tile *new_tile = NULL;
1213 if (nv_device(drm->device)->card_type < NV_50) {
1214 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1220 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1221 BUG_ON(bo->mem.mm_node != NULL);
1223 new_mem->mm_node = NULL;
1227 /* CPU copy if we have no accelerated method available */
1228 if (!drm->ttm.move) {
1229 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1233 /* Hardware assisted copy. */
1234 if (new_mem->mem_type == TTM_PL_SYSTEM)
1235 ret = nouveau_bo_move_flipd(bo, evict, intr,
1236 no_wait_gpu, new_mem);
1237 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1238 ret = nouveau_bo_move_flips(bo, evict, intr,
1239 no_wait_gpu, new_mem);
1241 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1242 no_wait_gpu, new_mem);
1247 /* Fallback to software copy. */
1248 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1251 if (nv_device(drm->device)->card_type < NV_50) {
1253 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1255 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1262 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1264 struct nouveau_bo *nvbo = nouveau_bo(bo);
1266 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1270 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1272 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1273 struct nouveau_drm *drm = nouveau_bdev(bdev);
1274 struct drm_device *dev = drm->dev;
1277 mem->bus.addr = NULL;
1278 mem->bus.offset = 0;
1279 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1281 mem->bus.is_iomem = false;
1282 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1284 switch (mem->mem_type) {
1290 if (drm->agp.stat == ENABLED) {
1291 mem->bus.offset = mem->start << PAGE_SHIFT;
1292 mem->bus.base = drm->agp.base;
1293 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1298 mem->bus.offset = mem->start << PAGE_SHIFT;
1299 mem->bus.base = pci_resource_start(dev->pdev, 1);
1300 mem->bus.is_iomem = true;
1301 if (nv_device(drm->device)->card_type >= NV_50) {
1302 struct nouveau_bar *bar = nouveau_bar(drm->device);
1303 struct nouveau_mem *node = mem->mm_node;
1305 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1310 mem->bus.offset = node->bar_vma.offset;
1320 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1322 struct nouveau_drm *drm = nouveau_bdev(bdev);
1323 struct nouveau_bar *bar = nouveau_bar(drm->device);
1324 struct nouveau_mem *node = mem->mm_node;
1326 if (!node->bar_vma.node)
1329 bar->unmap(bar, &node->bar_vma);
1333 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1335 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1336 struct nouveau_bo *nvbo = nouveau_bo(bo);
1337 struct nouveau_device *device = nv_device(drm->device);
1338 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1340 /* as long as the bo isn't in vram, and isn't tiled, we've got
1341 * nothing to do here.
1343 if (bo->mem.mem_type != TTM_PL_VRAM) {
1344 if (nv_device(drm->device)->card_type < NV_50 ||
1345 !nouveau_bo_tile_layout(nvbo))
1349 /* make sure bo is in mappable vram */
1350 if (bo->mem.start + bo->mem.num_pages < mappable)
1354 nvbo->placement.fpfn = 0;
1355 nvbo->placement.lpfn = mappable;
1356 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1357 return nouveau_bo_validate(nvbo, false, false);
1361 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1363 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1364 struct nouveau_drm *drm;
1365 struct drm_device *dev;
1368 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1370 if (ttm->state != tt_unpopulated)
1373 if (slave && ttm->sg) {
1374 /* make userspace faulting work */
1375 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1376 ttm_dma->dma_address, ttm->num_pages);
1377 ttm->state = tt_unbound;
1381 drm = nouveau_bdev(ttm->bdev);
1385 if (drm->agp.stat == ENABLED) {
1386 return ttm_agp_tt_populate(ttm);
1390 #ifdef CONFIG_SWIOTLB
1391 if (swiotlb_nr_tbl()) {
1392 return ttm_dma_populate((void *)ttm, dev->dev);
1396 r = ttm_pool_populate(ttm);
1401 for (i = 0; i < ttm->num_pages; i++) {
1402 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1404 PCI_DMA_BIDIRECTIONAL);
1405 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1407 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1408 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1409 ttm_dma->dma_address[i] = 0;
1411 ttm_pool_unpopulate(ttm);
1419 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1421 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1422 struct nouveau_drm *drm;
1423 struct drm_device *dev;
1425 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1430 drm = nouveau_bdev(ttm->bdev);
1434 if (drm->agp.stat == ENABLED) {
1435 ttm_agp_tt_unpopulate(ttm);
1440 #ifdef CONFIG_SWIOTLB
1441 if (swiotlb_nr_tbl()) {
1442 ttm_dma_unpopulate((void *)ttm, dev->dev);
1447 for (i = 0; i < ttm->num_pages; i++) {
1448 if (ttm_dma->dma_address[i]) {
1449 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1450 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1454 ttm_pool_unpopulate(ttm);
1458 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1460 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
1461 struct nouveau_fence *old_fence = NULL;
1463 spin_lock(&nvbo->bo.bdev->fence_lock);
1464 old_fence = nvbo->bo.sync_obj;
1465 nvbo->bo.sync_obj = new_fence;
1466 spin_unlock(&nvbo->bo.bdev->fence_lock);
1468 nouveau_fence_unref(&old_fence);
1472 nouveau_bo_fence_unref(void **sync_obj)
1474 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1478 nouveau_bo_fence_ref(void *sync_obj)
1480 return nouveau_fence_ref(sync_obj);
1484 nouveau_bo_fence_signalled(void *sync_obj)
1486 return nouveau_fence_done(sync_obj);
1490 nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1492 return nouveau_fence_wait(sync_obj, lazy, intr);
1496 nouveau_bo_fence_flush(void *sync_obj)
1501 struct ttm_bo_driver nouveau_bo_driver = {
1502 .ttm_tt_create = &nouveau_ttm_tt_create,
1503 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1504 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1505 .invalidate_caches = nouveau_bo_invalidate_caches,
1506 .init_mem_type = nouveau_bo_init_mem_type,
1507 .evict_flags = nouveau_bo_evict_flags,
1508 .move_notify = nouveau_bo_move_ntfy,
1509 .move = nouveau_bo_move,
1510 .verify_access = nouveau_bo_verify_access,
1511 .sync_obj_signaled = nouveau_bo_fence_signalled,
1512 .sync_obj_wait = nouveau_bo_fence_wait,
1513 .sync_obj_flush = nouveau_bo_fence_flush,
1514 .sync_obj_unref = nouveau_bo_fence_unref,
1515 .sync_obj_ref = nouveau_bo_fence_ref,
1516 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1517 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1518 .io_mem_free = &nouveau_ttm_io_mem_free,
1521 struct nouveau_vma *
1522 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1524 struct nouveau_vma *vma;
1525 list_for_each_entry(vma, &nvbo->vma_list, head) {
1534 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1535 struct nouveau_vma *vma)
1537 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1538 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1541 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1542 NV_MEM_ACCESS_RW, vma);
1546 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1547 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1548 else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
1549 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1551 nouveau_vm_map_sg_table(vma, 0, size, node);
1553 nouveau_vm_map_sg(vma, 0, size, node);
1556 list_add_tail(&vma->head, &nvbo->vma_list);
1562 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1565 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1566 nouveau_vm_unmap(vma);
1567 nouveau_vm_put(vma);
1568 list_del(&vma->head);