2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
36 #include <linux/log2.h>
37 #include <linux/slab.h>
40 nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
48 spin_lock(&nvbo->bo.lock);
49 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
50 spin_unlock(&nvbo->bo.lock);
55 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
57 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
58 struct drm_device *dev = dev_priv->dev;
59 struct nouveau_bo *nvbo = nouveau_bo(bo);
61 ttm_bo_kunmap(&nvbo->kmap);
63 if (unlikely(nvbo->gem))
64 DRM_ERROR("bo %p still attached to GEM object\n", bo);
67 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
73 nouveau_bo_fixup_align(struct drm_device *dev,
74 uint32_t tile_mode, uint32_t tile_flags,
75 int *align, int *size)
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 * Some of the tile_flags have a periodic structure of N*4096 bytes,
81 * align to to that as well as the page size. Align the size to the
82 * appropriate boundaries. This does imply that sizes are rounded up
83 * 3-7 pages, so be aware of this and do not waste memory by allocating
86 if (dev_priv->card_type == NV_50) {
87 uint32_t block_size = dev_priv->vram_size >> 15;
95 if (is_power_of_2(block_size)) {
96 for (i = 1; i < 10; i++) {
97 *align = 12 * i * block_size;
98 if (!(*align % 65536))
102 for (i = 1; i < 10; i++) {
103 *align = 8 * i * block_size;
104 if (!(*align % 65536))
108 *size = roundup(*size, *align);
116 if (dev_priv->chipset >= 0x40) {
118 *size = roundup(*size, 64 * tile_mode);
120 } else if (dev_priv->chipset >= 0x30) {
122 *size = roundup(*size, 64 * tile_mode);
124 } else if (dev_priv->chipset >= 0x20) {
126 *size = roundup(*size, 64 * tile_mode);
128 } else if (dev_priv->chipset >= 0x10) {
130 *size = roundup(*size, 32 * tile_mode);
135 /* ALIGN works only on powers of two. */
136 *size = roundup(*size, PAGE_SIZE);
138 if (dev_priv->card_type == NV_50) {
139 *size = roundup(*size, 65536);
140 *align = max(65536, *align);
145 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
146 int size, int align, uint32_t flags, uint32_t tile_mode,
147 uint32_t tile_flags, bool no_vm, bool mappable,
148 struct nouveau_bo **pnvbo)
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nouveau_bo *nvbo;
154 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
157 INIT_LIST_HEAD(&nvbo->head);
158 INIT_LIST_HEAD(&nvbo->entry);
159 nvbo->mappable = mappable;
161 nvbo->tile_mode = tile_mode;
162 nvbo->tile_flags = tile_flags;
164 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
165 align >>= PAGE_SHIFT;
167 nvbo->placement.fpfn = 0;
168 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
169 nouveau_bo_placement_set(nvbo, flags, 0);
171 nvbo->channel = chan;
172 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
173 ttm_bo_type_device, &nvbo->placement, align, 0,
174 false, NULL, size, nouveau_bo_del_ttm);
176 /* ttm will call nouveau_bo_del_ttm if it fails.. */
179 nvbo->channel = NULL;
186 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
190 if (type & TTM_PL_FLAG_VRAM)
191 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
192 if (type & TTM_PL_FLAG_TT)
193 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
194 if (type & TTM_PL_FLAG_SYSTEM)
195 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
199 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
201 struct ttm_placement *pl = &nvbo->placement;
202 uint32_t flags = TTM_PL_MASK_CACHING |
203 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
205 pl->placement = nvbo->placements;
206 set_placement_list(nvbo->placements, &pl->num_placement,
209 pl->busy_placement = nvbo->busy_placements;
210 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
215 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
217 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
218 struct ttm_buffer_object *bo = &nvbo->bo;
221 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
222 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
223 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
224 1 << bo->mem.mem_type, memtype);
228 if (nvbo->pin_refcnt++)
231 ret = ttm_bo_reserve(bo, false, false, false, 0);
235 nouveau_bo_placement_set(nvbo, memtype, 0);
237 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
239 switch (bo->mem.mem_type) {
241 dev_priv->fb_aper_free -= bo->mem.size;
244 dev_priv->gart_info.aper_free -= bo->mem.size;
250 ttm_bo_unreserve(bo);
258 nouveau_bo_unpin(struct nouveau_bo *nvbo)
260 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
261 struct ttm_buffer_object *bo = &nvbo->bo;
264 if (--nvbo->pin_refcnt)
267 ret = ttm_bo_reserve(bo, false, false, false, 0);
271 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
273 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
275 switch (bo->mem.mem_type) {
277 dev_priv->fb_aper_free += bo->mem.size;
280 dev_priv->gart_info.aper_free += bo->mem.size;
287 ttm_bo_unreserve(bo);
292 nouveau_bo_map(struct nouveau_bo *nvbo)
296 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
300 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
301 ttm_bo_unreserve(&nvbo->bo);
306 nouveau_bo_unmap(struct nouveau_bo *nvbo)
308 ttm_bo_kunmap(&nvbo->kmap);
312 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
315 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
318 return ioread16_native((void __force __iomem *)mem);
324 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
327 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
330 iowrite16_native(val, (void __force __iomem *)mem);
336 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
339 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
342 return ioread32_native((void __force __iomem *)mem);
348 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
351 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
354 iowrite32_native(val, (void __force __iomem *)mem);
359 static struct ttm_backend *
360 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
362 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
363 struct drm_device *dev = dev_priv->dev;
365 switch (dev_priv->gart_info.type) {
367 case NOUVEAU_GART_AGP:
368 return ttm_agp_backend_init(bdev, dev->agp->bridge);
370 case NOUVEAU_GART_SGDMA:
371 return nouveau_sgdma_init_ttm(dev);
373 NV_ERROR(dev, "Unknown GART type %d\n",
374 dev_priv->gart_info.type);
382 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
384 /* We'll do this from user space. */
389 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
390 struct ttm_mem_type_manager *man)
392 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
393 struct drm_device *dev = dev_priv->dev;
397 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
398 man->available_caching = TTM_PL_MASK_CACHING;
399 man->default_caching = TTM_PL_FLAG_CACHED;
402 man->flags = TTM_MEMTYPE_FLAG_FIXED |
403 TTM_MEMTYPE_FLAG_MAPPABLE;
404 man->available_caching = TTM_PL_FLAG_UNCACHED |
406 man->default_caching = TTM_PL_FLAG_WC;
407 man->gpu_offset = dev_priv->vm_vram_base;
410 switch (dev_priv->gart_info.type) {
411 case NOUVEAU_GART_AGP:
412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
413 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED;
416 case NOUVEAU_GART_SGDMA:
417 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
418 TTM_MEMTYPE_FLAG_CMA;
419 man->available_caching = TTM_PL_MASK_CACHING;
420 man->default_caching = TTM_PL_FLAG_CACHED;
423 NV_ERROR(dev, "Unknown GART type: %d\n",
424 dev_priv->gart_info.type);
427 man->gpu_offset = dev_priv->vm_gart_base;
430 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
437 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
439 struct nouveau_bo *nvbo = nouveau_bo(bo);
441 switch (bo->mem.mem_type) {
443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
451 *pl = nvbo->placement;
455 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
456 * TTM_PL_{VRAM,TT} directly.
460 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
461 struct nouveau_bo *nvbo, bool evict,
462 bool no_wait_reserve, bool no_wait_gpu,
463 struct ttm_mem_reg *new_mem)
465 struct nouveau_fence *fence = NULL;
468 ret = nouveau_fence_new(chan, &fence, true);
472 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
473 evict || (nvbo->channel &&
474 nvbo->channel != chan),
475 no_wait_reserve, no_wait_gpu, new_mem);
476 nouveau_fence_unref((void *)&fence);
480 static inline uint32_t
481 nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
482 struct ttm_mem_reg *mem)
484 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
485 if (mem->mem_type == TTM_PL_TT)
490 if (mem->mem_type == TTM_PL_TT)
491 return chan->gart_handle;
492 return chan->vram_handle;
496 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
497 bool no_wait_reserve, bool no_wait_gpu,
498 struct ttm_mem_reg *new_mem)
500 struct nouveau_bo *nvbo = nouveau_bo(bo);
501 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
502 struct ttm_mem_reg *old_mem = &bo->mem;
503 struct nouveau_channel *chan;
504 uint64_t src_offset, dst_offset;
508 chan = nvbo->channel;
509 if (!chan || nvbo->tile_flags || nvbo->no_vm)
510 chan = dev_priv->channel;
512 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
513 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
514 if (chan != dev_priv->channel) {
515 if (old_mem->mem_type == TTM_PL_TT)
516 src_offset += dev_priv->vm_gart_base;
518 src_offset += dev_priv->vm_vram_base;
520 if (new_mem->mem_type == TTM_PL_TT)
521 dst_offset += dev_priv->vm_gart_base;
523 dst_offset += dev_priv->vm_vram_base;
526 ret = RING_SPACE(chan, 3);
529 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
530 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
531 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
533 if (dev_priv->card_type >= NV_50) {
534 ret = RING_SPACE(chan, 4);
537 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
539 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
543 page_count = new_mem->num_pages;
545 int line_count = (page_count > 2047) ? 2047 : page_count;
547 if (dev_priv->card_type >= NV_50) {
548 ret = RING_SPACE(chan, 3);
551 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
552 OUT_RING(chan, upper_32_bits(src_offset));
553 OUT_RING(chan, upper_32_bits(dst_offset));
555 ret = RING_SPACE(chan, 11);
558 BEGIN_RING(chan, NvSubM2MF,
559 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
560 OUT_RING(chan, lower_32_bits(src_offset));
561 OUT_RING(chan, lower_32_bits(dst_offset));
562 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
563 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
564 OUT_RING(chan, PAGE_SIZE); /* line_length */
565 OUT_RING(chan, line_count);
566 OUT_RING(chan, (1<<8)|(1<<0));
568 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
571 page_count -= line_count;
572 src_offset += (PAGE_SIZE * line_count);
573 dst_offset += (PAGE_SIZE * line_count);
576 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
580 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
581 bool no_wait_reserve, bool no_wait_gpu,
582 struct ttm_mem_reg *new_mem)
584 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
585 struct ttm_placement placement;
586 struct ttm_mem_reg tmp_mem;
589 placement.fpfn = placement.lpfn = 0;
590 placement.num_placement = placement.num_busy_placement = 1;
591 placement.placement = placement.busy_placement = &placement_memtype;
594 tmp_mem.mm_node = NULL;
595 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
599 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
603 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
607 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
609 if (tmp_mem.mm_node) {
610 spin_lock(&bo->bdev->glob->lru_lock);
611 drm_mm_put_block(tmp_mem.mm_node);
612 spin_unlock(&bo->bdev->glob->lru_lock);
619 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
620 bool no_wait_reserve, bool no_wait_gpu,
621 struct ttm_mem_reg *new_mem)
623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
624 struct ttm_placement placement;
625 struct ttm_mem_reg tmp_mem;
628 placement.fpfn = placement.lpfn = 0;
629 placement.num_placement = placement.num_busy_placement = 1;
630 placement.placement = placement.busy_placement = &placement_memtype;
633 tmp_mem.mm_node = NULL;
634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
638 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
647 if (tmp_mem.mm_node) {
648 spin_lock(&bo->bdev->glob->lru_lock);
649 drm_mm_put_block(tmp_mem.mm_node);
650 spin_unlock(&bo->bdev->glob->lru_lock);
657 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
658 struct nouveau_tile_reg **new_tile)
660 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
661 struct drm_device *dev = dev_priv->dev;
662 struct nouveau_bo *nvbo = nouveau_bo(bo);
666 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
672 offset = new_mem->mm_node->start << PAGE_SHIFT;
674 if (dev_priv->card_type == NV_50) {
675 ret = nv50_mem_vm_bind_linear(dev,
676 offset + dev_priv->vm_vram_base,
677 new_mem->size, nvbo->tile_flags,
682 } else if (dev_priv->card_type >= NV_10) {
683 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
691 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
692 struct nouveau_tile_reg *new_tile,
693 struct nouveau_tile_reg **old_tile)
695 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
696 struct drm_device *dev = dev_priv->dev;
698 if (dev_priv->card_type >= NV_10 &&
699 dev_priv->card_type < NV_50) {
701 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
703 *old_tile = new_tile;
708 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
709 bool no_wait_reserve, bool no_wait_gpu,
710 struct ttm_mem_reg *new_mem)
712 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
713 struct nouveau_bo *nvbo = nouveau_bo(bo);
714 struct ttm_mem_reg *old_mem = &bo->mem;
715 struct nouveau_tile_reg *new_tile = NULL;
718 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
722 /* Software copy if the card isn't up and running yet. */
723 if (!dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
729 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
730 BUG_ON(bo->mem.mm_node != NULL);
732 new_mem->mm_node = NULL;
736 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
739 else if (old_mem->mem_type == TTM_PL_SYSTEM)
740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
747 /* Fallback to software copy. */
748 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
752 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
754 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
760 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
766 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
768 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
769 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
770 struct drm_device *dev = dev_priv->dev;
772 mem->bus.addr = NULL;
774 mem->bus.size = mem->num_pages << PAGE_SHIFT;
776 mem->bus.is_iomem = false;
777 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
779 switch (mem->mem_type) {
785 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
786 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
787 mem->bus.base = dev_priv->gart_info.aper_base;
788 mem->bus.is_iomem = true;
793 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
794 mem->bus.base = pci_resource_start(dev->pdev, 1);
795 mem->bus.is_iomem = true;
804 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
809 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
814 struct ttm_bo_driver nouveau_bo_driver = {
815 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
816 .invalidate_caches = nouveau_bo_invalidate_caches,
817 .init_mem_type = nouveau_bo_init_mem_type,
818 .evict_flags = nouveau_bo_evict_flags,
819 .move = nouveau_bo_move,
820 .verify_access = nouveau_bo_verify_access,
821 .sync_obj_signaled = nouveau_fence_signalled,
822 .sync_obj_wait = nouveau_fence_wait,
823 .sync_obj_flush = nouveau_fence_flush,
824 .sync_obj_unref = nouveau_fence_unref,
825 .sync_obj_ref = nouveau_fence_ref,
826 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
827 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
828 .io_mem_free = &nouveau_ttm_io_mem_free,