2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
42 * NV10-NV40 tiling helpers
46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg;
51 struct nvkm_device *device = nvxx_device(&drm->client.device);
52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
55 nouveau_fence_unref(®->fence);
58 nvkm_fb_tile_fini(fb, i, tile);
61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
63 nvkm_fb_tile_prog(fb, i, tile);
66 static struct nouveau_drm_tile *
67 nv10_bo_get_tile_region(struct drm_device *dev, int i)
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
72 spin_lock(&drm->tile.lock);
75 (!tile->fence || nouveau_fence_done(tile->fence)))
80 spin_unlock(&drm->tile.lock);
85 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
86 struct dma_fence *fence)
88 struct nouveau_drm *drm = nouveau_drm(dev);
91 spin_lock(&drm->tile.lock);
92 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
94 spin_unlock(&drm->tile.lock);
98 static struct nouveau_drm_tile *
99 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
102 struct nouveau_drm *drm = nouveau_drm(dev);
103 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
104 struct nouveau_drm_tile *tile, *found = NULL;
107 for (i = 0; i < fb->tile.regions; i++) {
108 tile = nv10_bo_get_tile_region(dev, i);
110 if (pitch && !found) {
114 } else if (tile && fb->tile.region[i].pitch) {
115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
119 nv10_bo_put_tile_region(dev, tile, NULL);
123 nv10_bo_update_tile_region(dev, found, addr, size,
129 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
133 struct nouveau_bo *nvbo = nouveau_bo(bo);
135 if (unlikely(nvbo->gem.filp))
136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
137 WARN_ON(nvbo->pin_refcnt > 0);
138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
143 roundup_64(u64 x, u32 y)
151 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
152 int *align, u64 *size)
154 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
155 struct nvif_device *device = &drm->client.device;
157 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
158 if (nvbo->tile_mode) {
159 if (device->info.chipset >= 0x40) {
161 *size = roundup_64(*size, 64 * nvbo->tile_mode);
163 } else if (device->info.chipset >= 0x30) {
165 *size = roundup_64(*size, 64 * nvbo->tile_mode);
167 } else if (device->info.chipset >= 0x20) {
169 *size = roundup_64(*size, 64 * nvbo->tile_mode);
171 } else if (device->info.chipset >= 0x10) {
173 *size = roundup_64(*size, 32 * nvbo->tile_mode);
177 *size = roundup_64(*size, (1 << nvbo->page_shift));
178 *align = max((1 << nvbo->page_shift), *align);
181 *size = roundup_64(*size, PAGE_SIZE);
185 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
186 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
187 struct sg_table *sg, struct reservation_object *robj,
188 struct nouveau_bo **pnvbo)
190 struct nouveau_drm *drm = nouveau_drm(cli->dev);
191 struct nouveau_bo *nvbo;
194 int type = ttm_bo_type_device;
197 NV_WARN(drm, "skipped size %016llx\n", size);
202 type = ttm_bo_type_sg;
204 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
207 INIT_LIST_HEAD(&nvbo->head);
208 INIT_LIST_HEAD(&nvbo->entry);
209 INIT_LIST_HEAD(&nvbo->vma_list);
210 nvbo->tile_mode = tile_mode;
211 nvbo->tile_flags = tile_flags;
212 nvbo->bo.bdev = &drm->ttm.bdev;
215 if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
216 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
218 nvbo->page_shift = 12;
219 if (drm->client.vm) {
220 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
221 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
224 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
225 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
226 nouveau_bo_placement_set(nvbo, flags, 0);
228 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
229 sizeof(struct nouveau_bo));
231 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
232 type, &nvbo->placement,
233 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
234 robj, nouveau_bo_del_ttm);
236 /* ttm will call nouveau_bo_del_ttm if it fails.. */
245 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
249 if (type & TTM_PL_FLAG_VRAM)
250 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
251 if (type & TTM_PL_FLAG_TT)
252 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
253 if (type & TTM_PL_FLAG_SYSTEM)
254 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
258 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
260 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
261 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
262 unsigned i, fpfn, lpfn;
264 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
265 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
266 nvbo->bo.mem.num_pages < vram_pages / 4) {
268 * Make sure that the color and depth buffers are handled
269 * by independent memory controller units. Up to a 9x
270 * speed up when alpha-blending and depth-test are enabled
273 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
274 fpfn = vram_pages / 2;
278 lpfn = vram_pages / 2;
280 for (i = 0; i < nvbo->placement.num_placement; ++i) {
281 nvbo->placements[i].fpfn = fpfn;
282 nvbo->placements[i].lpfn = lpfn;
284 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
285 nvbo->busy_placements[i].fpfn = fpfn;
286 nvbo->busy_placements[i].lpfn = lpfn;
292 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
294 struct ttm_placement *pl = &nvbo->placement;
295 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
296 TTM_PL_MASK_CACHING) |
297 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
299 pl->placement = nvbo->placements;
300 set_placement_list(nvbo->placements, &pl->num_placement,
303 pl->busy_placement = nvbo->busy_placements;
304 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
307 set_placement_range(nvbo, type);
311 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
313 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
314 struct ttm_buffer_object *bo = &nvbo->bo;
315 bool force = false, evict = false;
318 ret = ttm_bo_reserve(bo, false, false, NULL);
322 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
323 memtype == TTM_PL_FLAG_VRAM && contig) {
324 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
325 if (bo->mem.mem_type == TTM_PL_VRAM) {
326 struct nvkm_mem *mem = bo->mem.mm_node;
327 if (!nvkm_mm_contiguous(mem->mem))
330 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
335 if (nvbo->pin_refcnt) {
336 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
337 NV_ERROR(drm, "bo %p pinned elsewhere: "
338 "0x%08x vs 0x%08x\n", bo,
339 1 << bo->mem.mem_type, memtype);
347 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
348 ret = nouveau_bo_validate(nvbo, false, false);
354 nouveau_bo_placement_set(nvbo, memtype, 0);
356 /* drop pin_refcnt temporarily, so we don't trip the assertion
357 * in nouveau_bo_move() that makes sure we're not trying to
358 * move a pinned buffer
361 ret = nouveau_bo_validate(nvbo, false, false);
366 switch (bo->mem.mem_type) {
368 drm->gem.vram_available -= bo->mem.size;
371 drm->gem.gart_available -= bo->mem.size;
379 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
380 ttm_bo_unreserve(bo);
385 nouveau_bo_unpin(struct nouveau_bo *nvbo)
387 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
388 struct ttm_buffer_object *bo = &nvbo->bo;
391 ret = ttm_bo_reserve(bo, false, false, NULL);
395 ref = --nvbo->pin_refcnt;
396 WARN_ON_ONCE(ref < 0);
400 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
402 ret = nouveau_bo_validate(nvbo, false, false);
404 switch (bo->mem.mem_type) {
406 drm->gem.vram_available += bo->mem.size;
409 drm->gem.gart_available += bo->mem.size;
417 ttm_bo_unreserve(bo);
422 nouveau_bo_map(struct nouveau_bo *nvbo)
426 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
430 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
432 ttm_bo_unreserve(&nvbo->bo);
437 nouveau_bo_unmap(struct nouveau_bo *nvbo)
442 ttm_bo_kunmap(&nvbo->kmap);
446 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
448 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
449 struct nvkm_device *device = nvxx_device(&drm->client.device);
450 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
456 /* Don't waste time looping if the object is coherent */
457 if (nvbo->force_coherent)
460 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
461 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
462 PAGE_SIZE, DMA_TO_DEVICE);
466 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
468 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
469 struct nvkm_device *device = nvxx_device(&drm->client.device);
470 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
476 /* Don't waste time looping if the object is coherent */
477 if (nvbo->force_coherent)
480 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
481 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
482 PAGE_SIZE, DMA_FROM_DEVICE);
486 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
491 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
492 interruptible, no_wait_gpu);
496 nouveau_bo_sync_for_device(nvbo);
502 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
505 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
510 iowrite16_native(val, (void __force __iomem *)mem);
516 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
519 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
524 return ioread32_native((void __force __iomem *)mem);
530 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
533 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
538 iowrite32_native(val, (void __force __iomem *)mem);
543 static struct ttm_tt *
544 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
545 uint32_t page_flags, struct page *dummy_read)
547 #if IS_ENABLED(CONFIG_AGP)
548 struct nouveau_drm *drm = nouveau_bdev(bdev);
550 if (drm->agp.bridge) {
551 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
552 page_flags, dummy_read);
556 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
560 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
562 /* We'll do this from user space. */
567 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
568 struct ttm_mem_type_manager *man)
570 struct nouveau_drm *drm = nouveau_bdev(bdev);
574 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
575 man->available_caching = TTM_PL_MASK_CACHING;
576 man->default_caching = TTM_PL_FLAG_CACHED;
579 man->flags = TTM_MEMTYPE_FLAG_FIXED |
580 TTM_MEMTYPE_FLAG_MAPPABLE;
581 man->available_caching = TTM_PL_FLAG_UNCACHED |
583 man->default_caching = TTM_PL_FLAG_WC;
585 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
586 /* Some BARs do not support being ioremapped WC */
587 if (nvxx_bar(&drm->client.device)->iomap_uncached) {
588 man->available_caching = TTM_PL_FLAG_UNCACHED;
589 man->default_caching = TTM_PL_FLAG_UNCACHED;
592 man->func = &nouveau_vram_manager;
593 man->io_reserve_fastpath = false;
594 man->use_io_reserve_lru = true;
596 man->func = &ttm_bo_manager_func;
600 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
601 man->func = &nouveau_gart_manager;
603 if (!drm->agp.bridge)
604 man->func = &nv04_gart_manager;
606 man->func = &ttm_bo_manager_func;
608 if (drm->agp.bridge) {
609 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
610 man->available_caching = TTM_PL_FLAG_UNCACHED |
612 man->default_caching = TTM_PL_FLAG_WC;
614 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
615 TTM_MEMTYPE_FLAG_CMA;
616 man->available_caching = TTM_PL_MASK_CACHING;
617 man->default_caching = TTM_PL_FLAG_CACHED;
628 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
630 struct nouveau_bo *nvbo = nouveau_bo(bo);
632 switch (bo->mem.mem_type) {
634 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
638 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
642 *pl = nvbo->placement;
647 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
649 int ret = RING_SPACE(chan, 2);
651 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
652 OUT_RING (chan, handle & 0x0000ffff);
659 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
660 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
662 struct nvkm_mem *mem = old_reg->mm_node;
663 int ret = RING_SPACE(chan, 10);
665 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
666 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
667 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
668 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
669 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
670 OUT_RING (chan, PAGE_SIZE);
671 OUT_RING (chan, PAGE_SIZE);
672 OUT_RING (chan, PAGE_SIZE);
673 OUT_RING (chan, new_reg->num_pages);
674 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
680 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
682 int ret = RING_SPACE(chan, 2);
684 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
685 OUT_RING (chan, handle);
691 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
692 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
694 struct nvkm_mem *mem = old_reg->mm_node;
695 u64 src_offset = mem->vma[0].offset;
696 u64 dst_offset = mem->vma[1].offset;
697 u32 page_count = new_reg->num_pages;
700 page_count = new_reg->num_pages;
702 int line_count = (page_count > 8191) ? 8191 : page_count;
704 ret = RING_SPACE(chan, 11);
708 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
709 OUT_RING (chan, upper_32_bits(src_offset));
710 OUT_RING (chan, lower_32_bits(src_offset));
711 OUT_RING (chan, upper_32_bits(dst_offset));
712 OUT_RING (chan, lower_32_bits(dst_offset));
713 OUT_RING (chan, PAGE_SIZE);
714 OUT_RING (chan, PAGE_SIZE);
715 OUT_RING (chan, PAGE_SIZE);
716 OUT_RING (chan, line_count);
717 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
718 OUT_RING (chan, 0x00000110);
720 page_count -= line_count;
721 src_offset += (PAGE_SIZE * line_count);
722 dst_offset += (PAGE_SIZE * line_count);
729 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
730 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
732 struct nvkm_mem *mem = old_reg->mm_node;
733 u64 src_offset = mem->vma[0].offset;
734 u64 dst_offset = mem->vma[1].offset;
735 u32 page_count = new_reg->num_pages;
738 page_count = new_reg->num_pages;
740 int line_count = (page_count > 2047) ? 2047 : page_count;
742 ret = RING_SPACE(chan, 12);
746 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
747 OUT_RING (chan, upper_32_bits(dst_offset));
748 OUT_RING (chan, lower_32_bits(dst_offset));
749 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
750 OUT_RING (chan, upper_32_bits(src_offset));
751 OUT_RING (chan, lower_32_bits(src_offset));
752 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
753 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
754 OUT_RING (chan, PAGE_SIZE); /* line_length */
755 OUT_RING (chan, line_count);
756 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
757 OUT_RING (chan, 0x00100110);
759 page_count -= line_count;
760 src_offset += (PAGE_SIZE * line_count);
761 dst_offset += (PAGE_SIZE * line_count);
768 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
769 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
771 struct nvkm_mem *mem = old_reg->mm_node;
772 u64 src_offset = mem->vma[0].offset;
773 u64 dst_offset = mem->vma[1].offset;
774 u32 page_count = new_reg->num_pages;
777 page_count = new_reg->num_pages;
779 int line_count = (page_count > 8191) ? 8191 : page_count;
781 ret = RING_SPACE(chan, 11);
785 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
786 OUT_RING (chan, upper_32_bits(src_offset));
787 OUT_RING (chan, lower_32_bits(src_offset));
788 OUT_RING (chan, upper_32_bits(dst_offset));
789 OUT_RING (chan, lower_32_bits(dst_offset));
790 OUT_RING (chan, PAGE_SIZE);
791 OUT_RING (chan, PAGE_SIZE);
792 OUT_RING (chan, PAGE_SIZE);
793 OUT_RING (chan, line_count);
794 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
795 OUT_RING (chan, 0x00000110);
797 page_count -= line_count;
798 src_offset += (PAGE_SIZE * line_count);
799 dst_offset += (PAGE_SIZE * line_count);
806 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
807 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
809 struct nvkm_mem *mem = old_reg->mm_node;
810 int ret = RING_SPACE(chan, 7);
812 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
813 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
814 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
815 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
816 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
817 OUT_RING (chan, 0x00000000 /* COPY */);
818 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
824 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
825 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
827 struct nvkm_mem *mem = old_reg->mm_node;
828 int ret = RING_SPACE(chan, 7);
830 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
831 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
832 OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
833 OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
834 OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
835 OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
836 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
842 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
844 int ret = RING_SPACE(chan, 6);
846 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
847 OUT_RING (chan, handle);
848 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
849 OUT_RING (chan, chan->drm->ntfy.handle);
850 OUT_RING (chan, chan->vram.handle);
851 OUT_RING (chan, chan->vram.handle);
858 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
859 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
861 struct nvkm_mem *mem = old_reg->mm_node;
862 u64 length = (new_reg->num_pages << PAGE_SHIFT);
863 u64 src_offset = mem->vma[0].offset;
864 u64 dst_offset = mem->vma[1].offset;
865 int src_tiled = !!mem->memtype;
866 int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
870 u32 amount, stride, height;
872 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
876 amount = min(length, (u64)(4 * 1024 * 1024));
878 height = amount / stride;
881 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
884 OUT_RING (chan, stride);
885 OUT_RING (chan, height);
890 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
894 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
897 OUT_RING (chan, stride);
898 OUT_RING (chan, height);
903 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
907 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
908 OUT_RING (chan, upper_32_bits(src_offset));
909 OUT_RING (chan, upper_32_bits(dst_offset));
910 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
911 OUT_RING (chan, lower_32_bits(src_offset));
912 OUT_RING (chan, lower_32_bits(dst_offset));
913 OUT_RING (chan, stride);
914 OUT_RING (chan, stride);
915 OUT_RING (chan, stride);
916 OUT_RING (chan, height);
917 OUT_RING (chan, 0x00000101);
918 OUT_RING (chan, 0x00000000);
919 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
923 src_offset += amount;
924 dst_offset += amount;
931 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
933 int ret = RING_SPACE(chan, 4);
935 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
936 OUT_RING (chan, handle);
937 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
938 OUT_RING (chan, chan->drm->ntfy.handle);
944 static inline uint32_t
945 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
946 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
948 if (reg->mem_type == TTM_PL_TT)
950 return chan->vram.handle;
954 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
955 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
957 u32 src_offset = old_reg->start << PAGE_SHIFT;
958 u32 dst_offset = new_reg->start << PAGE_SHIFT;
959 u32 page_count = new_reg->num_pages;
962 ret = RING_SPACE(chan, 3);
966 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
967 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
968 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
970 page_count = new_reg->num_pages;
972 int line_count = (page_count > 2047) ? 2047 : page_count;
974 ret = RING_SPACE(chan, 11);
978 BEGIN_NV04(chan, NvSubCopy,
979 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
980 OUT_RING (chan, src_offset);
981 OUT_RING (chan, dst_offset);
982 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
983 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
984 OUT_RING (chan, PAGE_SIZE); /* line_length */
985 OUT_RING (chan, line_count);
986 OUT_RING (chan, 0x00000101);
987 OUT_RING (chan, 0x00000000);
988 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
991 page_count -= line_count;
992 src_offset += (PAGE_SIZE * line_count);
993 dst_offset += (PAGE_SIZE * line_count);
1000 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1001 struct ttm_mem_reg *reg)
1003 struct nvkm_mem *old_mem = bo->mem.mm_node;
1004 struct nvkm_mem *new_mem = reg->mm_node;
1005 u64 size = (u64)reg->num_pages << PAGE_SHIFT;
1008 ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
1009 NV_MEM_ACCESS_RW, &old_mem->vma[0]);
1013 ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
1014 NV_MEM_ACCESS_RW, &old_mem->vma[1]);
1016 nvkm_vm_put(&old_mem->vma[0]);
1020 nvkm_vm_map(&old_mem->vma[0], old_mem);
1021 nvkm_vm_map(&old_mem->vma[1], new_mem);
1026 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1027 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1029 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1030 struct nouveau_channel *chan = drm->ttm.chan;
1031 struct nouveau_cli *cli = (void *)chan->user.client;
1032 struct nouveau_fence *fence;
1035 /* create temporary vmas for the transfer and attach them to the
1036 * old nvkm_mem node, these will get cleaned up after ttm has
1037 * destroyed the ttm_mem_reg
1039 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1040 ret = nouveau_bo_move_prep(drm, bo, new_reg);
1045 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1046 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1048 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1050 ret = nouveau_fence_new(chan, false, &fence);
1052 ret = ttm_bo_move_accel_cleanup(bo,
1056 nouveau_fence_unref(&fence);
1060 mutex_unlock(&cli->mutex);
1065 nouveau_bo_move_init(struct nouveau_drm *drm)
1067 static const struct {
1071 int (*exec)(struct nouveau_channel *,
1072 struct ttm_buffer_object *,
1073 struct ttm_mem_reg *, struct ttm_mem_reg *);
1074 int (*init)(struct nouveau_channel *, u32 handle);
1076 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1077 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1078 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1079 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1080 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1081 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1082 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1083 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1084 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1085 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1086 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1087 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1088 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1089 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1090 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1092 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1093 }, *mthd = _methods;
1094 const char *name = "CPU";
1098 struct nouveau_channel *chan;
1103 chan = drm->channel;
1107 ret = nvif_object_init(&chan->user,
1108 mthd->oclass | (mthd->engine << 16),
1109 mthd->oclass, NULL, 0,
1112 ret = mthd->init(chan, drm->ttm.copy.handle);
1114 nvif_object_fini(&drm->ttm.copy);
1118 drm->ttm.move = mthd->exec;
1119 drm->ttm.chan = chan;
1123 } while ((++mthd)->exec);
1125 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1129 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1130 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1132 struct ttm_place placement_memtype = {
1135 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1137 struct ttm_placement placement;
1138 struct ttm_mem_reg tmp_reg;
1141 placement.num_placement = placement.num_busy_placement = 1;
1142 placement.placement = placement.busy_placement = &placement_memtype;
1145 tmp_reg.mm_node = NULL;
1146 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1150 ret = ttm_tt_bind(bo->ttm, &tmp_reg);
1154 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1158 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
1160 ttm_bo_mem_put(bo, &tmp_reg);
1165 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1166 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1168 struct ttm_place placement_memtype = {
1171 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1173 struct ttm_placement placement;
1174 struct ttm_mem_reg tmp_reg;
1177 placement.num_placement = placement.num_busy_placement = 1;
1178 placement.placement = placement.busy_placement = &placement_memtype;
1181 tmp_reg.mm_node = NULL;
1182 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1186 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
1190 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1195 ttm_bo_mem_put(bo, &tmp_reg);
1200 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1201 struct ttm_mem_reg *new_reg)
1203 struct nouveau_bo *nvbo = nouveau_bo(bo);
1204 struct nvkm_vma *vma;
1206 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1207 if (bo->destroy != nouveau_bo_del_ttm)
1210 list_for_each_entry(vma, &nvbo->vma_list, head) {
1211 if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
1212 (new_reg->mem_type == TTM_PL_VRAM ||
1213 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1214 nvkm_vm_map(vma, new_reg->mm_node);
1216 WARN_ON(ttm_bo_wait(bo, false, false));
1223 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1224 struct nouveau_drm_tile **new_tile)
1226 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1227 struct drm_device *dev = drm->dev;
1228 struct nouveau_bo *nvbo = nouveau_bo(bo);
1229 u64 offset = new_reg->start << PAGE_SHIFT;
1232 if (new_reg->mem_type != TTM_PL_VRAM)
1235 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1236 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1245 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1246 struct nouveau_drm_tile *new_tile,
1247 struct nouveau_drm_tile **old_tile)
1249 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1250 struct drm_device *dev = drm->dev;
1251 struct dma_fence *fence = reservation_object_get_excl(bo->resv);
1253 nv10_bo_put_tile_region(dev, *old_tile, fence);
1254 *old_tile = new_tile;
1258 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1259 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1261 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1262 struct nouveau_bo *nvbo = nouveau_bo(bo);
1263 struct ttm_mem_reg *old_reg = &bo->mem;
1264 struct nouveau_drm_tile *new_tile = NULL;
1267 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1271 if (nvbo->pin_refcnt)
1272 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1274 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1275 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1281 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1282 BUG_ON(bo->mem.mm_node != NULL);
1284 new_reg->mm_node = NULL;
1288 /* Hardware assisted copy. */
1289 if (drm->ttm.move) {
1290 if (new_reg->mem_type == TTM_PL_SYSTEM)
1291 ret = nouveau_bo_move_flipd(bo, evict, intr,
1292 no_wait_gpu, new_reg);
1293 else if (old_reg->mem_type == TTM_PL_SYSTEM)
1294 ret = nouveau_bo_move_flips(bo, evict, intr,
1295 no_wait_gpu, new_reg);
1297 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1298 no_wait_gpu, new_reg);
1303 /* Fallback to software copy. */
1304 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1306 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
1309 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1311 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1313 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1320 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1322 struct nouveau_bo *nvbo = nouveau_bo(bo);
1324 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1325 filp->private_data);
1329 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1331 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1332 struct nouveau_drm *drm = nouveau_bdev(bdev);
1333 struct nvkm_device *device = nvxx_device(&drm->client.device);
1334 struct nvkm_mem *mem = reg->mm_node;
1337 reg->bus.addr = NULL;
1338 reg->bus.offset = 0;
1339 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1341 reg->bus.is_iomem = false;
1342 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1344 switch (reg->mem_type) {
1349 #if IS_ENABLED(CONFIG_AGP)
1350 if (drm->agp.bridge) {
1351 reg->bus.offset = reg->start << PAGE_SHIFT;
1352 reg->bus.base = drm->agp.base;
1353 reg->bus.is_iomem = !drm->agp.cma;
1356 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
1359 /* fallthrough, tiled memory */
1361 reg->bus.offset = reg->start << PAGE_SHIFT;
1362 reg->bus.base = device->func->resource_addr(device, 1);
1363 reg->bus.is_iomem = true;
1364 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1365 struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
1366 int page_shift = 12;
1367 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1368 page_shift = mem->page_shift;
1370 ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
1375 nvkm_vm_map(&mem->bar_vma, mem);
1376 reg->bus.offset = mem->bar_vma.offset;
1386 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1388 struct nvkm_mem *mem = reg->mm_node;
1390 if (!mem->bar_vma.node)
1393 nvkm_vm_unmap(&mem->bar_vma);
1394 nvkm_vm_put(&mem->bar_vma);
1398 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1400 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1401 struct nouveau_bo *nvbo = nouveau_bo(bo);
1402 struct nvkm_device *device = nvxx_device(&drm->client.device);
1403 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1406 /* as long as the bo isn't in vram, and isn't tiled, we've got
1407 * nothing to do here.
1409 if (bo->mem.mem_type != TTM_PL_VRAM) {
1410 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1411 !nouveau_bo_tile_layout(nvbo))
1414 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1415 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1417 ret = nouveau_bo_validate(nvbo, false, false);
1424 /* make sure bo is in mappable vram */
1425 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1426 bo->mem.start + bo->mem.num_pages < mappable)
1429 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1430 nvbo->placements[i].fpfn = 0;
1431 nvbo->placements[i].lpfn = mappable;
1434 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1435 nvbo->busy_placements[i].fpfn = 0;
1436 nvbo->busy_placements[i].lpfn = mappable;
1439 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1440 return nouveau_bo_validate(nvbo, false, false);
1444 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1446 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1447 struct nouveau_drm *drm;
1448 struct nvkm_device *device;
1449 struct drm_device *dev;
1450 struct device *pdev;
1453 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1455 if (ttm->state != tt_unpopulated)
1458 if (slave && ttm->sg) {
1459 /* make userspace faulting work */
1460 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1461 ttm_dma->dma_address, ttm->num_pages);
1462 ttm->state = tt_unbound;
1466 drm = nouveau_bdev(ttm->bdev);
1467 device = nvxx_device(&drm->client.device);
1471 #if IS_ENABLED(CONFIG_AGP)
1472 if (drm->agp.bridge) {
1473 return ttm_agp_tt_populate(ttm);
1477 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1478 if (swiotlb_nr_tbl()) {
1479 return ttm_dma_populate((void *)ttm, dev->dev);
1483 r = ttm_pool_populate(ttm);
1488 for (i = 0; i < ttm->num_pages; i++) {
1491 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1494 if (dma_mapping_error(pdev, addr)) {
1496 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1497 PAGE_SIZE, DMA_BIDIRECTIONAL);
1498 ttm_dma->dma_address[i] = 0;
1500 ttm_pool_unpopulate(ttm);
1504 ttm_dma->dma_address[i] = addr;
1510 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1512 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1513 struct nouveau_drm *drm;
1514 struct nvkm_device *device;
1515 struct drm_device *dev;
1516 struct device *pdev;
1518 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1523 drm = nouveau_bdev(ttm->bdev);
1524 device = nvxx_device(&drm->client.device);
1528 #if IS_ENABLED(CONFIG_AGP)
1529 if (drm->agp.bridge) {
1530 ttm_agp_tt_unpopulate(ttm);
1535 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1536 if (swiotlb_nr_tbl()) {
1537 ttm_dma_unpopulate((void *)ttm, dev->dev);
1542 for (i = 0; i < ttm->num_pages; i++) {
1543 if (ttm_dma->dma_address[i]) {
1544 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1549 ttm_pool_unpopulate(ttm);
1553 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1555 struct reservation_object *resv = nvbo->bo.resv;
1558 reservation_object_add_excl_fence(resv, &fence->base);
1560 reservation_object_add_shared_fence(resv, &fence->base);
1563 struct ttm_bo_driver nouveau_bo_driver = {
1564 .ttm_tt_create = &nouveau_ttm_tt_create,
1565 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1566 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1567 .invalidate_caches = nouveau_bo_invalidate_caches,
1568 .init_mem_type = nouveau_bo_init_mem_type,
1569 .eviction_valuable = ttm_bo_eviction_valuable,
1570 .evict_flags = nouveau_bo_evict_flags,
1571 .move_notify = nouveau_bo_move_ntfy,
1572 .move = nouveau_bo_move,
1573 .verify_access = nouveau_bo_verify_access,
1574 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1575 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1576 .io_mem_free = &nouveau_ttm_io_mem_free,
1580 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1582 struct nvkm_vma *vma;
1583 list_for_each_entry(vma, &nvbo->vma_list, head) {
1592 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1593 struct nvkm_vma *vma)
1595 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1598 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1599 NV_MEM_ACCESS_RW, vma);
1603 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1604 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1605 nvbo->page_shift != vma->vm->mmu->lpg_shift))
1606 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1608 list_add_tail(&vma->head, &nvbo->vma_list);
1614 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1617 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1620 list_del(&vma->head);