]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nouveau_ttm.c
drm/nouveau: move some more code around to more appropriate places
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nouveau_ttm.c
1 /*
2  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3  * All Rights Reserved.
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sub license,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26
27 #include "drmP.h"
28
29 #include "nouveau_drv.h"
30
31 static int
32 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
33 {
34         /* nothing to do */
35         return 0;
36 }
37
38 static int
39 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
40 {
41         /* nothing to do */
42         return 0;
43 }
44
45 static inline void
46 nouveau_mem_node_cleanup(struct nouveau_mem *node)
47 {
48         if (node->vma[0].node) {
49                 nouveau_vm_unmap(&node->vma[0]);
50                 nouveau_vm_put(&node->vma[0]);
51         }
52
53         if (node->vma[1].node) {
54                 nouveau_vm_unmap(&node->vma[1]);
55                 nouveau_vm_put(&node->vma[1]);
56         }
57 }
58
59 static void
60 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
61                          struct ttm_mem_reg *mem)
62 {
63         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
64         struct drm_device *dev = dev_priv->dev;
65
66         nouveau_mem_node_cleanup(mem->mm_node);
67         nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
68 }
69
70 static int
71 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
72                          struct ttm_buffer_object *bo,
73                          struct ttm_placement *placement,
74                          struct ttm_mem_reg *mem)
75 {
76         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
77         struct drm_device *dev = dev_priv->dev;
78         struct nouveau_bo *nvbo = nouveau_bo(bo);
79         struct nouveau_mem *node;
80         u32 size_nc = 0;
81         int ret;
82
83         if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
84                 size_nc = 1 << nvbo->page_shift;
85
86         ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
87                         mem->page_alignment << PAGE_SHIFT, size_nc,
88                         (nvbo->tile_flags >> 8) & 0x3ff, &node);
89         if (ret) {
90                 mem->mm_node = NULL;
91                 return (ret == -ENOSPC) ? 0 : ret;
92         }
93
94         node->page_shift = nvbo->page_shift;
95
96         mem->mm_node = node;
97         mem->start   = node->offset >> PAGE_SHIFT;
98         return 0;
99 }
100
101 void
102 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
103 {
104         struct nouveau_mm *mm = man->priv;
105         struct nouveau_mm_node *r;
106         u32 total = 0, free = 0;
107
108         mutex_lock(&mm->mutex);
109         list_for_each_entry(r, &mm->nodes, nl_entry) {
110                 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
111                        prefix, r->type, ((u64)r->offset << 12),
112                        (((u64)r->offset + r->length) << 12));
113
114                 total += r->length;
115                 if (!r->type)
116                         free += r->length;
117         }
118         mutex_unlock(&mm->mutex);
119
120         printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
121                prefix, (u64)total << 12, (u64)free << 12);
122         printk(KERN_DEBUG "%s  block: 0x%08x\n",
123                prefix, mm->block_size << 12);
124 }
125
126 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
127         nouveau_vram_manager_init,
128         nouveau_vram_manager_fini,
129         nouveau_vram_manager_new,
130         nouveau_vram_manager_del,
131         nouveau_vram_manager_debug
132 };
133
134 static int
135 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
136 {
137         return 0;
138 }
139
140 static int
141 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
142 {
143         return 0;
144 }
145
146 static void
147 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
148                          struct ttm_mem_reg *mem)
149 {
150         nouveau_mem_node_cleanup(mem->mm_node);
151         kfree(mem->mm_node);
152         mem->mm_node = NULL;
153 }
154
155 static int
156 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
157                          struct ttm_buffer_object *bo,
158                          struct ttm_placement *placement,
159                          struct ttm_mem_reg *mem)
160 {
161         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
162         struct nouveau_mem *node;
163
164         if (unlikely((mem->num_pages << PAGE_SHIFT) >=
165                      dev_priv->gart_info.aper_size))
166                 return -ENOMEM;
167
168         node = kzalloc(sizeof(*node), GFP_KERNEL);
169         if (!node)
170                 return -ENOMEM;
171         node->page_shift = 12;
172
173         mem->mm_node = node;
174         mem->start   = 0;
175         return 0;
176 }
177
178 void
179 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
180 {
181 }
182
183 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
184         nouveau_gart_manager_init,
185         nouveau_gart_manager_fini,
186         nouveau_gart_manager_new,
187         nouveau_gart_manager_del,
188         nouveau_gart_manager_debug
189 };
190
191 static int
192 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
193 {
194         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
195         struct drm_device *dev = dev_priv->dev;
196         man->priv = nv04vm_ref(dev);
197         return (man->priv != NULL) ? 0 : -ENODEV;
198 }
199
200 static int
201 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
202 {
203         struct nouveau_vm *vm = man->priv;
204         nouveau_vm_ref(NULL, &vm, NULL);
205         man->priv = NULL;
206         return 0;
207 }
208
209 static void
210 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
211 {
212         struct nouveau_mem *node = mem->mm_node;
213         if (node->vma[0].node)
214                 nouveau_vm_put(&node->vma[0]);
215         kfree(mem->mm_node);
216         mem->mm_node = NULL;
217 }
218
219 static int
220 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
221                       struct ttm_buffer_object *bo,
222                       struct ttm_placement *placement,
223                       struct ttm_mem_reg *mem)
224 {
225         struct nouveau_mem *node;
226         int ret;
227
228         node = kzalloc(sizeof(*node), GFP_KERNEL);
229         if (!node)
230                 return -ENOMEM;
231
232         node->page_shift = 12;
233
234         ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
235                              NV_MEM_ACCESS_RW, &node->vma[0]);
236         if (ret) {
237                 kfree(node);
238                 return ret;
239         }
240
241         mem->mm_node = node;
242         mem->start   = node->vma[0].offset >> PAGE_SHIFT;
243         return 0;
244 }
245
246 void
247 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
248 {
249 }
250
251 const struct ttm_mem_type_manager_func nv04_gart_manager = {
252         nv04_gart_manager_init,
253         nv04_gart_manager_fini,
254         nv04_gart_manager_new,
255         nv04_gart_manager_del,
256         nv04_gart_manager_debug
257 };
258
259 int
260 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
261 {
262         struct drm_file *file_priv = filp->private_data;
263         struct drm_nouveau_private *dev_priv =
264                 file_priv->minor->dev->dev_private;
265
266         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
267                 return drm_mmap(filp, vma);
268
269         return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
270 }
271
272 static int
273 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
274 {
275         return ttm_mem_global_init(ref->object);
276 }
277
278 static void
279 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
280 {
281         ttm_mem_global_release(ref->object);
282 }
283
284 int
285 nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
286 {
287         struct drm_global_reference *global_ref;
288         int ret;
289
290         global_ref = &dev_priv->ttm.mem_global_ref;
291         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
292         global_ref->size = sizeof(struct ttm_mem_global);
293         global_ref->init = &nouveau_ttm_mem_global_init;
294         global_ref->release = &nouveau_ttm_mem_global_release;
295
296         ret = drm_global_item_ref(global_ref);
297         if (unlikely(ret != 0)) {
298                 DRM_ERROR("Failed setting up TTM memory accounting\n");
299                 dev_priv->ttm.mem_global_ref.release = NULL;
300                 return ret;
301         }
302
303         dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
304         global_ref = &dev_priv->ttm.bo_global_ref.ref;
305         global_ref->global_type = DRM_GLOBAL_TTM_BO;
306         global_ref->size = sizeof(struct ttm_bo_global);
307         global_ref->init = &ttm_bo_global_init;
308         global_ref->release = &ttm_bo_global_release;
309
310         ret = drm_global_item_ref(global_ref);
311         if (unlikely(ret != 0)) {
312                 DRM_ERROR("Failed setting up TTM BO subsystem\n");
313                 drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
314                 dev_priv->ttm.mem_global_ref.release = NULL;
315                 return ret;
316         }
317
318         return 0;
319 }
320
321 void
322 nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
323 {
324         if (dev_priv->ttm.mem_global_ref.release == NULL)
325                 return;
326
327         drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
328         drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
329         dev_priv->ttm.mem_global_ref.release = NULL;
330 }