2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/fb.h>
28 #include <subdev/vm.h>
29 #include <subdev/instmem.h>
31 #include "nouveau_drm.h"
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
36 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
43 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
50 nouveau_mem_node_cleanup(struct nouveau_mem *node)
52 if (node->vma[0].node) {
53 nouveau_vm_unmap(&node->vma[0]);
54 nouveau_vm_put(&node->vma[0]);
57 if (node->vma[1].node) {
58 nouveau_vm_unmap(&node->vma[1]);
59 nouveau_vm_put(&node->vma[1]);
64 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
65 struct ttm_mem_reg *mem)
67 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
68 struct nouveau_fb *pfb = nouveau_fb(drm->device);
69 nouveau_mem_node_cleanup(mem->mm_node);
70 pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
74 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
75 struct ttm_buffer_object *bo,
76 struct ttm_placement *placement,
77 struct ttm_mem_reg *mem)
79 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
80 struct nouveau_fb *pfb = nouveau_fb(drm->device);
81 struct nouveau_bo *nvbo = nouveau_bo(bo);
82 struct nouveau_mem *node;
86 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
87 size_nc = 1 << nvbo->page_shift;
89 ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
90 mem->page_alignment << PAGE_SHIFT, size_nc,
91 (nvbo->tile_flags >> 8) & 0x3ff, &node);
94 return (ret == -ENOSPC) ? 0 : ret;
97 node->page_shift = nvbo->page_shift;
100 mem->start = node->offset >> PAGE_SHIFT;
105 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
107 struct nouveau_mm *mm = man->priv;
108 struct nouveau_mm_node *r;
109 u32 total = 0, free = 0;
111 mutex_lock(&mm->mutex);
112 list_for_each_entry(r, &mm->nodes, nl_entry) {
113 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
114 prefix, r->type, ((u64)r->offset << 12),
115 (((u64)r->offset + r->length) << 12));
121 mutex_unlock(&mm->mutex);
123 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
124 prefix, (u64)total << 12, (u64)free << 12);
125 printk(KERN_DEBUG "%s block: 0x%08x\n",
126 prefix, mm->block_size << 12);
129 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
130 nouveau_vram_manager_init,
131 nouveau_vram_manager_fini,
132 nouveau_vram_manager_new,
133 nouveau_vram_manager_del,
134 nouveau_vram_manager_debug
138 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
144 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
150 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
151 struct ttm_mem_reg *mem)
153 nouveau_mem_node_cleanup(mem->mm_node);
159 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *bo,
161 struct ttm_placement *placement,
162 struct ttm_mem_reg *mem)
164 struct nouveau_mem *node;
166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
169 node = kzalloc(sizeof(*node), GFP_KERNEL);
172 node->page_shift = 12;
180 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
184 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
185 nouveau_gart_manager_init,
186 nouveau_gart_manager_fini,
187 nouveau_gart_manager_new,
188 nouveau_gart_manager_del,
189 nouveau_gart_manager_debug
192 #include <core/subdev/vm/nv04.h>
194 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
196 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
197 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
198 struct nv04_vmmgr_priv *priv = (void *)vmm;
199 struct nouveau_vm *vm = NULL;
200 nouveau_vm_ref(priv->vm, &vm, NULL);
206 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
208 struct nouveau_vm *vm = man->priv;
209 nouveau_vm_ref(NULL, &vm, NULL);
215 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
217 struct nouveau_mem *node = mem->mm_node;
218 if (node->vma[0].node)
219 nouveau_vm_put(&node->vma[0]);
225 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
226 struct ttm_buffer_object *bo,
227 struct ttm_placement *placement,
228 struct ttm_mem_reg *mem)
230 struct nouveau_mem *node;
233 node = kzalloc(sizeof(*node), GFP_KERNEL);
237 node->page_shift = 12;
239 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
240 NV_MEM_ACCESS_RW, &node->vma[0]);
247 mem->start = node->vma[0].offset >> PAGE_SHIFT;
252 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
256 const struct ttm_mem_type_manager_func nv04_gart_manager = {
257 nv04_gart_manager_init,
258 nv04_gart_manager_fini,
259 nv04_gart_manager_new,
260 nv04_gart_manager_del,
261 nv04_gart_manager_debug
265 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
267 struct drm_file *file_priv = filp->private_data;
268 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
270 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
271 return drm_mmap(filp, vma);
273 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
277 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
279 return ttm_mem_global_init(ref->object);
283 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
285 ttm_mem_global_release(ref->object);
289 nouveau_ttm_global_init(struct nouveau_drm *drm)
291 struct drm_global_reference *global_ref;
294 global_ref = &drm->ttm.mem_global_ref;
295 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
296 global_ref->size = sizeof(struct ttm_mem_global);
297 global_ref->init = &nouveau_ttm_mem_global_init;
298 global_ref->release = &nouveau_ttm_mem_global_release;
300 ret = drm_global_item_ref(global_ref);
301 if (unlikely(ret != 0)) {
302 DRM_ERROR("Failed setting up TTM memory accounting\n");
303 drm->ttm.mem_global_ref.release = NULL;
307 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
308 global_ref = &drm->ttm.bo_global_ref.ref;
309 global_ref->global_type = DRM_GLOBAL_TTM_BO;
310 global_ref->size = sizeof(struct ttm_bo_global);
311 global_ref->init = &ttm_bo_global_init;
312 global_ref->release = &ttm_bo_global_release;
314 ret = drm_global_item_ref(global_ref);
315 if (unlikely(ret != 0)) {
316 DRM_ERROR("Failed setting up TTM BO subsystem\n");
317 drm_global_item_unref(&drm->ttm.mem_global_ref);
318 drm->ttm.mem_global_ref.release = NULL;
326 nouveau_ttm_global_release(struct nouveau_drm *drm)
328 if (drm->ttm.mem_global_ref.release == NULL)
331 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
332 drm_global_item_unref(&drm->ttm.mem_global_ref);
333 drm->ttm.mem_global_ref.release = NULL;
337 nouveau_ttm_init(struct nouveau_drm *drm)
339 struct drm_device *dev = drm->dev;
343 bits = nouveau_vmmgr(drm->device)->dma_bits;
344 if ( drm->agp.stat == ENABLED ||
345 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
348 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
352 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
354 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
356 ret = nouveau_ttm_global_init(drm);
360 ret = ttm_bo_device_init(&drm->ttm.bdev,
361 drm->ttm.bo_global_ref.ref.object,
362 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
363 bits <= 32 ? true : false);
365 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
370 drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
371 drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
373 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
374 drm->gem.vram_available >> PAGE_SHIFT);
376 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
380 drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
381 pci_resource_len(dev->pdev, 1),
385 if (drm->agp.stat != ENABLED) {
386 drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
387 if (drm->gem.gart_available > 512 * 1024 * 1024)
388 drm->gem.gart_available = 512 * 1024 * 1024;
390 drm->gem.gart_available = drm->agp.size;
393 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
394 drm->gem.gart_available >> PAGE_SHIFT);
396 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
400 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
401 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
406 nouveau_ttm_fini(struct nouveau_drm *drm)
408 mutex_lock(&drm->dev->struct_mutex);
409 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
410 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
411 mutex_unlock(&drm->dev->struct_mutex);
413 ttm_bo_device_release(&drm->ttm.bdev);
415 nouveau_ttm_global_release(drm);
417 if (drm->ttm.mtrr >= 0) {
418 drm_mtrr_del(drm->ttm.mtrr,
419 pci_resource_start(drm->dev->pdev, 1),
420 pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);