2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
36 #include <linux/log2.h>
37 #include <linux/slab.h>
40 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
42 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
43 struct drm_device *dev = dev_priv->dev;
44 struct nouveau_bo *nvbo = nouveau_bo(bo);
46 if (unlikely(nvbo->gem))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo);
49 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
54 nouveau_bo_fixup_align(struct drm_device *dev,
55 uint32_t tile_mode, uint32_t tile_flags,
56 int *align, int *size)
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 * Some of the tile_flags have a periodic structure of N*4096 bytes,
62 * align to to that as well as the page size. Align the size to the
63 * appropriate boundaries. This does imply that sizes are rounded up
64 * 3-7 pages, so be aware of this and do not waste memory by allocating
67 if (dev_priv->card_type == NV_50) {
68 uint32_t block_size = dev_priv->vram_size >> 15;
76 if (is_power_of_2(block_size)) {
77 for (i = 1; i < 10; i++) {
78 *align = 12 * i * block_size;
79 if (!(*align % 65536))
83 for (i = 1; i < 10; i++) {
84 *align = 8 * i * block_size;
85 if (!(*align % 65536))
89 *size = roundup(*size, *align);
97 if (dev_priv->chipset >= 0x40) {
99 *size = roundup(*size, 64 * tile_mode);
101 } else if (dev_priv->chipset >= 0x30) {
103 *size = roundup(*size, 64 * tile_mode);
105 } else if (dev_priv->chipset >= 0x20) {
107 *size = roundup(*size, 64 * tile_mode);
109 } else if (dev_priv->chipset >= 0x10) {
111 *size = roundup(*size, 32 * tile_mode);
116 /* ALIGN works only on powers of two. */
117 *size = roundup(*size, PAGE_SIZE);
119 if (dev_priv->card_type == NV_50) {
120 *size = roundup(*size, 65536);
121 *align = max(65536, *align);
126 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
127 int size, int align, uint32_t flags, uint32_t tile_mode,
128 uint32_t tile_flags, bool no_vm, bool mappable,
129 struct nouveau_bo **pnvbo)
131 struct drm_nouveau_private *dev_priv = dev->dev_private;
132 struct nouveau_bo *nvbo;
135 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
138 INIT_LIST_HEAD(&nvbo->head);
139 INIT_LIST_HEAD(&nvbo->entry);
140 nvbo->mappable = mappable;
142 nvbo->tile_mode = tile_mode;
143 nvbo->tile_flags = tile_flags;
144 nvbo->bo.bdev = &dev_priv->ttm.bdev;
146 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
148 align >>= PAGE_SHIFT;
150 nouveau_bo_placement_set(nvbo, flags, 0);
152 nvbo->channel = chan;
153 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
154 ttm_bo_type_device, &nvbo->placement, align, 0,
155 false, NULL, size, nouveau_bo_del_ttm);
157 /* ttm will call nouveau_bo_del_ttm if it fails.. */
160 nvbo->channel = NULL;
167 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
171 if (type & TTM_PL_FLAG_VRAM)
172 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
173 if (type & TTM_PL_FLAG_TT)
174 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
175 if (type & TTM_PL_FLAG_SYSTEM)
176 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
180 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
182 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
184 if (dev_priv->card_type == NV_10 &&
185 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
187 * Make sure that the color and depth buffers are handled
188 * by independent memory controller units. Up to a 9x
189 * speed up when alpha-blending and depth-test are enabled
192 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
194 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
195 nvbo->placement.fpfn = vram_pages / 2;
196 nvbo->placement.lpfn = ~0;
198 nvbo->placement.fpfn = 0;
199 nvbo->placement.lpfn = vram_pages / 2;
205 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
207 struct ttm_placement *pl = &nvbo->placement;
208 uint32_t flags = TTM_PL_MASK_CACHING |
209 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
211 pl->placement = nvbo->placements;
212 set_placement_list(nvbo->placements, &pl->num_placement,
215 pl->busy_placement = nvbo->busy_placements;
216 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
219 set_placement_range(nvbo, type);
223 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
225 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
226 struct ttm_buffer_object *bo = &nvbo->bo;
229 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
230 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
231 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
232 1 << bo->mem.mem_type, memtype);
236 if (nvbo->pin_refcnt++)
239 ret = ttm_bo_reserve(bo, false, false, false, 0);
243 nouveau_bo_placement_set(nvbo, memtype, 0);
245 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
247 switch (bo->mem.mem_type) {
249 dev_priv->fb_aper_free -= bo->mem.size;
252 dev_priv->gart_info.aper_free -= bo->mem.size;
258 ttm_bo_unreserve(bo);
266 nouveau_bo_unpin(struct nouveau_bo *nvbo)
268 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
269 struct ttm_buffer_object *bo = &nvbo->bo;
272 if (--nvbo->pin_refcnt)
275 ret = ttm_bo_reserve(bo, false, false, false, 0);
279 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
281 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
283 switch (bo->mem.mem_type) {
285 dev_priv->fb_aper_free += bo->mem.size;
288 dev_priv->gart_info.aper_free += bo->mem.size;
295 ttm_bo_unreserve(bo);
300 nouveau_bo_map(struct nouveau_bo *nvbo)
304 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
308 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
309 ttm_bo_unreserve(&nvbo->bo);
314 nouveau_bo_unmap(struct nouveau_bo *nvbo)
317 ttm_bo_kunmap(&nvbo->kmap);
321 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
324 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
327 return ioread16_native((void __force __iomem *)mem);
333 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
336 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
339 iowrite16_native(val, (void __force __iomem *)mem);
345 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
348 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
351 return ioread32_native((void __force __iomem *)mem);
357 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
360 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
363 iowrite32_native(val, (void __force __iomem *)mem);
368 static struct ttm_backend *
369 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
371 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
372 struct drm_device *dev = dev_priv->dev;
374 switch (dev_priv->gart_info.type) {
376 case NOUVEAU_GART_AGP:
377 return ttm_agp_backend_init(bdev, dev->agp->bridge);
379 case NOUVEAU_GART_SGDMA:
380 return nouveau_sgdma_init_ttm(dev);
382 NV_ERROR(dev, "Unknown GART type %d\n",
383 dev_priv->gart_info.type);
391 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
393 /* We'll do this from user space. */
398 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
399 struct ttm_mem_type_manager *man)
401 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
402 struct drm_device *dev = dev_priv->dev;
406 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
407 man->available_caching = TTM_PL_MASK_CACHING;
408 man->default_caching = TTM_PL_FLAG_CACHED;
411 man->func = &ttm_bo_manager_func;
412 man->flags = TTM_MEMTYPE_FLAG_FIXED |
413 TTM_MEMTYPE_FLAG_MAPPABLE;
414 man->available_caching = TTM_PL_FLAG_UNCACHED |
416 man->default_caching = TTM_PL_FLAG_WC;
417 if (dev_priv->card_type == NV_50)
418 man->gpu_offset = 0x40000000;
423 man->func = &ttm_bo_manager_func;
424 switch (dev_priv->gart_info.type) {
425 case NOUVEAU_GART_AGP:
426 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
427 man->available_caching = TTM_PL_FLAG_UNCACHED |
429 man->default_caching = TTM_PL_FLAG_WC;
431 case NOUVEAU_GART_SGDMA:
432 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
433 TTM_MEMTYPE_FLAG_CMA;
434 man->available_caching = TTM_PL_MASK_CACHING;
435 man->default_caching = TTM_PL_FLAG_CACHED;
438 NV_ERROR(dev, "Unknown GART type: %d\n",
439 dev_priv->gart_info.type);
442 man->gpu_offset = dev_priv->vm_gart_base;
445 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
452 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
454 struct nouveau_bo *nvbo = nouveau_bo(bo);
456 switch (bo->mem.mem_type) {
458 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
462 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
466 *pl = nvbo->placement;
470 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
471 * TTM_PL_{VRAM,TT} directly.
475 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
476 struct nouveau_bo *nvbo, bool evict,
477 bool no_wait_reserve, bool no_wait_gpu,
478 struct ttm_mem_reg *new_mem)
480 struct nouveau_fence *fence = NULL;
483 ret = nouveau_fence_new(chan, &fence, true);
487 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
488 no_wait_reserve, no_wait_gpu, new_mem);
489 nouveau_fence_unref(&fence);
493 static inline uint32_t
494 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
495 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
497 struct nouveau_bo *nvbo = nouveau_bo(bo);
500 if (mem->mem_type == TTM_PL_TT)
505 if (mem->mem_type == TTM_PL_TT)
506 return chan->gart_handle;
507 return chan->vram_handle;
511 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
512 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
514 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
515 struct nouveau_bo *nvbo = nouveau_bo(bo);
516 u64 length = (new_mem->num_pages << PAGE_SHIFT);
517 u64 src_offset, dst_offset;
520 src_offset = old_mem->start << PAGE_SHIFT;
521 dst_offset = new_mem->start << PAGE_SHIFT;
523 if (old_mem->mem_type == TTM_PL_VRAM)
524 src_offset += dev_priv->vm_vram_base;
526 src_offset += dev_priv->vm_gart_base;
528 if (new_mem->mem_type == TTM_PL_VRAM)
529 dst_offset += dev_priv->vm_vram_base;
531 dst_offset += dev_priv->vm_gart_base;
534 ret = RING_SPACE(chan, 3);
538 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
539 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
540 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
543 u32 amount, stride, height;
545 amount = min(length, (u64)(4 * 1024 * 1024));
547 height = amount / stride;
549 if (new_mem->mem_type == TTM_PL_VRAM &&
550 nouveau_bo_tile_layout(nvbo)) {
551 ret = RING_SPACE(chan, 8);
555 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
558 OUT_RING (chan, stride);
559 OUT_RING (chan, height);
564 ret = RING_SPACE(chan, 2);
568 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
571 if (old_mem->mem_type == TTM_PL_VRAM &&
572 nouveau_bo_tile_layout(nvbo)) {
573 ret = RING_SPACE(chan, 8);
577 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
580 OUT_RING (chan, stride);
581 OUT_RING (chan, height);
586 ret = RING_SPACE(chan, 2);
590 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
594 ret = RING_SPACE(chan, 14);
598 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
599 OUT_RING (chan, upper_32_bits(src_offset));
600 OUT_RING (chan, upper_32_bits(dst_offset));
601 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
602 OUT_RING (chan, lower_32_bits(src_offset));
603 OUT_RING (chan, lower_32_bits(dst_offset));
604 OUT_RING (chan, stride);
605 OUT_RING (chan, stride);
606 OUT_RING (chan, stride);
607 OUT_RING (chan, height);
608 OUT_RING (chan, 0x00000101);
609 OUT_RING (chan, 0x00000000);
610 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
614 src_offset += amount;
615 dst_offset += amount;
622 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
623 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
625 u32 src_offset = old_mem->start << PAGE_SHIFT;
626 u32 dst_offset = new_mem->start << PAGE_SHIFT;
627 u32 page_count = new_mem->num_pages;
630 ret = RING_SPACE(chan, 3);
634 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
635 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
636 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
638 page_count = new_mem->num_pages;
640 int line_count = (page_count > 2047) ? 2047 : page_count;
642 ret = RING_SPACE(chan, 11);
646 BEGIN_RING(chan, NvSubM2MF,
647 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
648 OUT_RING (chan, src_offset);
649 OUT_RING (chan, dst_offset);
650 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
651 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
652 OUT_RING (chan, PAGE_SIZE); /* line_length */
653 OUT_RING (chan, line_count);
654 OUT_RING (chan, 0x00000101);
655 OUT_RING (chan, 0x00000000);
656 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
659 page_count -= line_count;
660 src_offset += (PAGE_SIZE * line_count);
661 dst_offset += (PAGE_SIZE * line_count);
668 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
669 bool no_wait_reserve, bool no_wait_gpu,
670 struct ttm_mem_reg *new_mem)
672 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
673 struct nouveau_bo *nvbo = nouveau_bo(bo);
674 struct nouveau_channel *chan;
677 chan = nvbo->channel;
678 if (!chan || nvbo->no_vm) {
679 chan = dev_priv->channel;
680 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
683 if (dev_priv->card_type < NV_50)
684 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
686 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
688 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
690 no_wait_gpu, new_mem);
693 if (chan == dev_priv->channel)
694 mutex_unlock(&chan->mutex);
699 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
700 bool no_wait_reserve, bool no_wait_gpu,
701 struct ttm_mem_reg *new_mem)
703 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
704 struct ttm_placement placement;
705 struct ttm_mem_reg tmp_mem;
708 placement.fpfn = placement.lpfn = 0;
709 placement.num_placement = placement.num_busy_placement = 1;
710 placement.placement = placement.busy_placement = &placement_memtype;
713 tmp_mem.mm_node = NULL;
714 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
718 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
722 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
726 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
728 ttm_bo_mem_put(bo, &tmp_mem);
733 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
734 bool no_wait_reserve, bool no_wait_gpu,
735 struct ttm_mem_reg *new_mem)
737 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
738 struct ttm_placement placement;
739 struct ttm_mem_reg tmp_mem;
742 placement.fpfn = placement.lpfn = 0;
743 placement.num_placement = placement.num_busy_placement = 1;
744 placement.placement = placement.busy_placement = &placement_memtype;
747 tmp_mem.mm_node = NULL;
748 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
752 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
756 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
761 ttm_bo_mem_put(bo, &tmp_mem);
766 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
767 struct nouveau_tile_reg **new_tile)
769 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
770 struct drm_device *dev = dev_priv->dev;
771 struct nouveau_bo *nvbo = nouveau_bo(bo);
775 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
781 offset = new_mem->start << PAGE_SHIFT;
783 if (dev_priv->card_type == NV_50) {
784 ret = nv50_mem_vm_bind_linear(dev,
785 offset + dev_priv->vm_vram_base,
787 nouveau_bo_tile_layout(nvbo),
792 } else if (dev_priv->card_type >= NV_10) {
793 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
802 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
803 struct nouveau_tile_reg *new_tile,
804 struct nouveau_tile_reg **old_tile)
806 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
807 struct drm_device *dev = dev_priv->dev;
809 if (dev_priv->card_type >= NV_10 &&
810 dev_priv->card_type < NV_50) {
811 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
812 *old_tile = new_tile;
817 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
818 bool no_wait_reserve, bool no_wait_gpu,
819 struct ttm_mem_reg *new_mem)
821 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
822 struct nouveau_bo *nvbo = nouveau_bo(bo);
823 struct ttm_mem_reg *old_mem = &bo->mem;
824 struct nouveau_tile_reg *new_tile = NULL;
827 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
832 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
833 BUG_ON(bo->mem.mm_node != NULL);
835 new_mem->mm_node = NULL;
839 /* Software copy if the card isn't up and running yet. */
840 if (!dev_priv->channel) {
841 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
845 /* Hardware assisted copy. */
846 if (new_mem->mem_type == TTM_PL_SYSTEM)
847 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
848 else if (old_mem->mem_type == TTM_PL_SYSTEM)
849 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
851 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
856 /* Fallback to software copy. */
857 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
861 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
863 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
869 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
875 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
877 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
878 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
879 struct drm_device *dev = dev_priv->dev;
881 mem->bus.addr = NULL;
883 mem->bus.size = mem->num_pages << PAGE_SHIFT;
885 mem->bus.is_iomem = false;
886 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
888 switch (mem->mem_type) {
894 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
895 mem->bus.offset = mem->start << PAGE_SHIFT;
896 mem->bus.base = dev_priv->gart_info.aper_base;
897 mem->bus.is_iomem = true;
902 mem->bus.offset = mem->start << PAGE_SHIFT;
903 mem->bus.base = pci_resource_start(dev->pdev, 1);
904 mem->bus.is_iomem = true;
913 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
918 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
920 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
921 struct nouveau_bo *nvbo = nouveau_bo(bo);
923 /* as long as the bo isn't in vram, and isn't tiled, we've got
924 * nothing to do here.
926 if (bo->mem.mem_type != TTM_PL_VRAM) {
927 if (dev_priv->card_type < NV_50 ||
928 !nouveau_bo_tile_layout(nvbo))
932 /* make sure bo is in mappable vram */
933 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
937 nvbo->placement.fpfn = 0;
938 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
939 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
940 return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
944 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
946 struct nouveau_fence *old_fence;
949 nouveau_fence_ref(fence);
951 spin_lock(&nvbo->bo.bdev->fence_lock);
952 old_fence = nvbo->bo.sync_obj;
953 nvbo->bo.sync_obj = fence;
954 spin_unlock(&nvbo->bo.bdev->fence_lock);
956 nouveau_fence_unref(&old_fence);
959 struct ttm_bo_driver nouveau_bo_driver = {
960 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
961 .invalidate_caches = nouveau_bo_invalidate_caches,
962 .init_mem_type = nouveau_bo_init_mem_type,
963 .evict_flags = nouveau_bo_evict_flags,
964 .move = nouveau_bo_move,
965 .verify_access = nouveau_bo_verify_access,
966 .sync_obj_signaled = __nouveau_fence_signalled,
967 .sync_obj_wait = __nouveau_fence_wait,
968 .sync_obj_flush = __nouveau_fence_flush,
969 .sync_obj_unref = __nouveau_fence_unref,
970 .sync_obj_ref = __nouveau_fence_ref,
971 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
972 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
973 .io_mem_free = &nouveau_ttm_io_mem_free,