]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/radeon/radeon_ttm.c
drm/radeon: make cp variable an array
[mv-sheeva.git] / drivers / gpu / drm / radeon / radeon_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/radeon_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include "radeon_reg.h"
42 #include "radeon.h"
43
44 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
45
46 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
47
48 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
49 {
50         struct radeon_mman *mman;
51         struct radeon_device *rdev;
52
53         mman = container_of(bdev, struct radeon_mman, bdev);
54         rdev = container_of(mman, struct radeon_device, mman);
55         return rdev;
56 }
57
58
59 /*
60  * Global memory.
61  */
62 static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
63 {
64         return ttm_mem_global_init(ref->object);
65 }
66
67 static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
68 {
69         ttm_mem_global_release(ref->object);
70 }
71
72 static int radeon_ttm_global_init(struct radeon_device *rdev)
73 {
74         struct drm_global_reference *global_ref;
75         int r;
76
77         rdev->mman.mem_global_referenced = false;
78         global_ref = &rdev->mman.mem_global_ref;
79         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
80         global_ref->size = sizeof(struct ttm_mem_global);
81         global_ref->init = &radeon_ttm_mem_global_init;
82         global_ref->release = &radeon_ttm_mem_global_release;
83         r = drm_global_item_ref(global_ref);
84         if (r != 0) {
85                 DRM_ERROR("Failed setting up TTM memory accounting "
86                           "subsystem.\n");
87                 return r;
88         }
89
90         rdev->mman.bo_global_ref.mem_glob =
91                 rdev->mman.mem_global_ref.object;
92         global_ref = &rdev->mman.bo_global_ref.ref;
93         global_ref->global_type = DRM_GLOBAL_TTM_BO;
94         global_ref->size = sizeof(struct ttm_bo_global);
95         global_ref->init = &ttm_bo_global_init;
96         global_ref->release = &ttm_bo_global_release;
97         r = drm_global_item_ref(global_ref);
98         if (r != 0) {
99                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
100                 drm_global_item_unref(&rdev->mman.mem_global_ref);
101                 return r;
102         }
103
104         rdev->mman.mem_global_referenced = true;
105         return 0;
106 }
107
108 static void radeon_ttm_global_fini(struct radeon_device *rdev)
109 {
110         if (rdev->mman.mem_global_referenced) {
111                 drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
112                 drm_global_item_unref(&rdev->mman.mem_global_ref);
113                 rdev->mman.mem_global_referenced = false;
114         }
115 }
116
117 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
118 {
119         return 0;
120 }
121
122 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
123                                 struct ttm_mem_type_manager *man)
124 {
125         struct radeon_device *rdev;
126
127         rdev = radeon_get_rdev(bdev);
128
129         switch (type) {
130         case TTM_PL_SYSTEM:
131                 /* System memory */
132                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
133                 man->available_caching = TTM_PL_MASK_CACHING;
134                 man->default_caching = TTM_PL_FLAG_CACHED;
135                 break;
136         case TTM_PL_TT:
137                 man->func = &ttm_bo_manager_func;
138                 man->gpu_offset = rdev->mc.gtt_start;
139                 man->available_caching = TTM_PL_MASK_CACHING;
140                 man->default_caching = TTM_PL_FLAG_CACHED;
141                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
142 #if __OS_HAS_AGP
143                 if (rdev->flags & RADEON_IS_AGP) {
144                         if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
145                                 DRM_ERROR("AGP is not enabled for memory type %u\n",
146                                           (unsigned)type);
147                                 return -EINVAL;
148                         }
149                         if (!rdev->ddev->agp->cant_use_aperture)
150                                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
151                         man->available_caching = TTM_PL_FLAG_UNCACHED |
152                                                  TTM_PL_FLAG_WC;
153                         man->default_caching = TTM_PL_FLAG_WC;
154                 }
155 #endif
156                 break;
157         case TTM_PL_VRAM:
158                 /* "On-card" video ram */
159                 man->func = &ttm_bo_manager_func;
160                 man->gpu_offset = rdev->mc.vram_start;
161                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
162                              TTM_MEMTYPE_FLAG_MAPPABLE;
163                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
164                 man->default_caching = TTM_PL_FLAG_WC;
165                 break;
166         default:
167                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
168                 return -EINVAL;
169         }
170         return 0;
171 }
172
173 static void radeon_evict_flags(struct ttm_buffer_object *bo,
174                                 struct ttm_placement *placement)
175 {
176         struct radeon_bo *rbo;
177         static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
178
179         if (!radeon_ttm_bo_is_radeon_bo(bo)) {
180                 placement->fpfn = 0;
181                 placement->lpfn = 0;
182                 placement->placement = &placements;
183                 placement->busy_placement = &placements;
184                 placement->num_placement = 1;
185                 placement->num_busy_placement = 1;
186                 return;
187         }
188         rbo = container_of(bo, struct radeon_bo, tbo);
189         switch (bo->mem.mem_type) {
190         case TTM_PL_VRAM:
191                 if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false)
192                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
193                 else
194                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
195                 break;
196         case TTM_PL_TT:
197         default:
198                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
199         }
200         *placement = rbo->placement;
201 }
202
203 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
204 {
205         return 0;
206 }
207
208 static void radeon_move_null(struct ttm_buffer_object *bo,
209                              struct ttm_mem_reg *new_mem)
210 {
211         struct ttm_mem_reg *old_mem = &bo->mem;
212
213         BUG_ON(old_mem->mm_node != NULL);
214         *old_mem = *new_mem;
215         new_mem->mm_node = NULL;
216 }
217
218 static int radeon_move_blit(struct ttm_buffer_object *bo,
219                         bool evict, int no_wait_reserve, bool no_wait_gpu,
220                         struct ttm_mem_reg *new_mem,
221                         struct ttm_mem_reg *old_mem)
222 {
223         struct radeon_device *rdev;
224         uint64_t old_start, new_start;
225         struct radeon_fence *fence;
226         int r;
227
228         rdev = radeon_get_rdev(bo->bdev);
229         r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
230         if (unlikely(r)) {
231                 return r;
232         }
233         old_start = old_mem->start << PAGE_SHIFT;
234         new_start = new_mem->start << PAGE_SHIFT;
235
236         switch (old_mem->mem_type) {
237         case TTM_PL_VRAM:
238                 old_start += rdev->mc.vram_start;
239                 break;
240         case TTM_PL_TT:
241                 old_start += rdev->mc.gtt_start;
242                 break;
243         default:
244                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
245                 return -EINVAL;
246         }
247         switch (new_mem->mem_type) {
248         case TTM_PL_VRAM:
249                 new_start += rdev->mc.vram_start;
250                 break;
251         case TTM_PL_TT:
252                 new_start += rdev->mc.gtt_start;
253                 break;
254         default:
255                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
256                 return -EINVAL;
257         }
258         if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) {
259                 DRM_ERROR("Trying to move memory with CP turned off.\n");
260                 return -EINVAL;
261         }
262
263         BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
264
265         r = radeon_copy(rdev, old_start, new_start,
266                         new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
267                         fence);
268         /* FIXME: handle copy error */
269         r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
270                                       evict, no_wait_reserve, no_wait_gpu, new_mem);
271         radeon_fence_unref(&fence);
272         return r;
273 }
274
275 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
276                                 bool evict, bool interruptible,
277                                 bool no_wait_reserve, bool no_wait_gpu,
278                                 struct ttm_mem_reg *new_mem)
279 {
280         struct radeon_device *rdev;
281         struct ttm_mem_reg *old_mem = &bo->mem;
282         struct ttm_mem_reg tmp_mem;
283         u32 placements;
284         struct ttm_placement placement;
285         int r;
286
287         rdev = radeon_get_rdev(bo->bdev);
288         tmp_mem = *new_mem;
289         tmp_mem.mm_node = NULL;
290         placement.fpfn = 0;
291         placement.lpfn = 0;
292         placement.num_placement = 1;
293         placement.placement = &placements;
294         placement.num_busy_placement = 1;
295         placement.busy_placement = &placements;
296         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
297         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
298                              interruptible, no_wait_reserve, no_wait_gpu);
299         if (unlikely(r)) {
300                 return r;
301         }
302
303         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
304         if (unlikely(r)) {
305                 goto out_cleanup;
306         }
307
308         r = ttm_tt_bind(bo->ttm, &tmp_mem);
309         if (unlikely(r)) {
310                 goto out_cleanup;
311         }
312         r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
313         if (unlikely(r)) {
314                 goto out_cleanup;
315         }
316         r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
317 out_cleanup:
318         ttm_bo_mem_put(bo, &tmp_mem);
319         return r;
320 }
321
322 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
323                                 bool evict, bool interruptible,
324                                 bool no_wait_reserve, bool no_wait_gpu,
325                                 struct ttm_mem_reg *new_mem)
326 {
327         struct radeon_device *rdev;
328         struct ttm_mem_reg *old_mem = &bo->mem;
329         struct ttm_mem_reg tmp_mem;
330         struct ttm_placement placement;
331         u32 placements;
332         int r;
333
334         rdev = radeon_get_rdev(bo->bdev);
335         tmp_mem = *new_mem;
336         tmp_mem.mm_node = NULL;
337         placement.fpfn = 0;
338         placement.lpfn = 0;
339         placement.num_placement = 1;
340         placement.placement = &placements;
341         placement.num_busy_placement = 1;
342         placement.busy_placement = &placements;
343         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
344         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
345         if (unlikely(r)) {
346                 return r;
347         }
348         r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
349         if (unlikely(r)) {
350                 goto out_cleanup;
351         }
352         r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
353         if (unlikely(r)) {
354                 goto out_cleanup;
355         }
356 out_cleanup:
357         ttm_bo_mem_put(bo, &tmp_mem);
358         return r;
359 }
360
361 static int radeon_bo_move(struct ttm_buffer_object *bo,
362                         bool evict, bool interruptible,
363                         bool no_wait_reserve, bool no_wait_gpu,
364                         struct ttm_mem_reg *new_mem)
365 {
366         struct radeon_device *rdev;
367         struct ttm_mem_reg *old_mem = &bo->mem;
368         int r;
369
370         rdev = radeon_get_rdev(bo->bdev);
371         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
372                 radeon_move_null(bo, new_mem);
373                 return 0;
374         }
375         if ((old_mem->mem_type == TTM_PL_TT &&
376              new_mem->mem_type == TTM_PL_SYSTEM) ||
377             (old_mem->mem_type == TTM_PL_SYSTEM &&
378              new_mem->mem_type == TTM_PL_TT)) {
379                 /* bind is enough */
380                 radeon_move_null(bo, new_mem);
381                 return 0;
382         }
383         if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
384                 /* use memcpy */
385                 goto memcpy;
386         }
387
388         if (old_mem->mem_type == TTM_PL_VRAM &&
389             new_mem->mem_type == TTM_PL_SYSTEM) {
390                 r = radeon_move_vram_ram(bo, evict, interruptible,
391                                         no_wait_reserve, no_wait_gpu, new_mem);
392         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
393                    new_mem->mem_type == TTM_PL_VRAM) {
394                 r = radeon_move_ram_vram(bo, evict, interruptible,
395                                             no_wait_reserve, no_wait_gpu, new_mem);
396         } else {
397                 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
398         }
399
400         if (r) {
401 memcpy:
402                 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
403         }
404         return r;
405 }
406
407 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
408 {
409         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
410         struct radeon_device *rdev = radeon_get_rdev(bdev);
411
412         mem->bus.addr = NULL;
413         mem->bus.offset = 0;
414         mem->bus.size = mem->num_pages << PAGE_SHIFT;
415         mem->bus.base = 0;
416         mem->bus.is_iomem = false;
417         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
418                 return -EINVAL;
419         switch (mem->mem_type) {
420         case TTM_PL_SYSTEM:
421                 /* system memory */
422                 return 0;
423         case TTM_PL_TT:
424 #if __OS_HAS_AGP
425                 if (rdev->flags & RADEON_IS_AGP) {
426                         /* RADEON_IS_AGP is set only if AGP is active */
427                         mem->bus.offset = mem->start << PAGE_SHIFT;
428                         mem->bus.base = rdev->mc.agp_base;
429                         mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
430                 }
431 #endif
432                 break;
433         case TTM_PL_VRAM:
434                 mem->bus.offset = mem->start << PAGE_SHIFT;
435                 /* check if it's visible */
436                 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
437                         return -EINVAL;
438                 mem->bus.base = rdev->mc.aper_base;
439                 mem->bus.is_iomem = true;
440 #ifdef __alpha__
441                 /*
442                  * Alpha: use bus.addr to hold the ioremap() return,
443                  * so we can modify bus.base below.
444                  */
445                 if (mem->placement & TTM_PL_FLAG_WC)
446                         mem->bus.addr =
447                                 ioremap_wc(mem->bus.base + mem->bus.offset,
448                                            mem->bus.size);
449                 else
450                         mem->bus.addr =
451                                 ioremap_nocache(mem->bus.base + mem->bus.offset,
452                                                 mem->bus.size);
453
454                 /*
455                  * Alpha: Use just the bus offset plus
456                  * the hose/domain memory base for bus.base.
457                  * It then can be used to build PTEs for VRAM
458                  * access, as done in ttm_bo_vm_fault().
459                  */
460                 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
461                         rdev->ddev->hose->dense_mem_base;
462 #endif
463                 break;
464         default:
465                 return -EINVAL;
466         }
467         return 0;
468 }
469
470 static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
471 {
472 }
473
474 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
475                                 bool lazy, bool interruptible)
476 {
477         return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
478 }
479
480 static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
481 {
482         return 0;
483 }
484
485 static void radeon_sync_obj_unref(void **sync_obj)
486 {
487         radeon_fence_unref((struct radeon_fence **)sync_obj);
488 }
489
490 static void *radeon_sync_obj_ref(void *sync_obj)
491 {
492         return radeon_fence_ref((struct radeon_fence *)sync_obj);
493 }
494
495 static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
496 {
497         return radeon_fence_signaled((struct radeon_fence *)sync_obj);
498 }
499
500 /*
501  * TTM backend functions.
502  */
503 struct radeon_ttm_tt {
504         struct ttm_dma_tt               ttm;
505         struct radeon_device            *rdev;
506         u64                             offset;
507 };
508
509 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
510                                    struct ttm_mem_reg *bo_mem)
511 {
512         struct radeon_ttm_tt *gtt = (void*)ttm;
513         int r;
514
515         gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
516         if (!ttm->num_pages) {
517                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
518                      ttm->num_pages, bo_mem, ttm);
519         }
520         r = radeon_gart_bind(gtt->rdev, gtt->offset,
521                              ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
522         if (r) {
523                 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
524                           ttm->num_pages, (unsigned)gtt->offset);
525                 return r;
526         }
527         return 0;
528 }
529
530 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
531 {
532         struct radeon_ttm_tt *gtt = (void *)ttm;
533
534         radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
535         return 0;
536 }
537
538 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
539 {
540         struct radeon_ttm_tt *gtt = (void *)ttm;
541
542         ttm_dma_tt_fini(&gtt->ttm);
543         kfree(gtt);
544 }
545
546 static struct ttm_backend_func radeon_backend_func = {
547         .bind = &radeon_ttm_backend_bind,
548         .unbind = &radeon_ttm_backend_unbind,
549         .destroy = &radeon_ttm_backend_destroy,
550 };
551
552 struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
553                                     unsigned long size, uint32_t page_flags,
554                                     struct page *dummy_read_page)
555 {
556         struct radeon_device *rdev;
557         struct radeon_ttm_tt *gtt;
558
559         rdev = radeon_get_rdev(bdev);
560 #if __OS_HAS_AGP
561         if (rdev->flags & RADEON_IS_AGP) {
562                 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
563                                          size, page_flags, dummy_read_page);
564         }
565 #endif
566
567         gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
568         if (gtt == NULL) {
569                 return NULL;
570         }
571         gtt->ttm.ttm.func = &radeon_backend_func;
572         gtt->rdev = rdev;
573         if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
574                 kfree(gtt);
575                 return NULL;
576         }
577         return &gtt->ttm.ttm;
578 }
579
580 static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
581 {
582         struct radeon_device *rdev;
583         struct radeon_ttm_tt *gtt = (void *)ttm;
584         unsigned i;
585         int r;
586
587         if (ttm->state != tt_unpopulated)
588                 return 0;
589
590         rdev = radeon_get_rdev(ttm->bdev);
591
592 #ifdef CONFIG_SWIOTLB
593         if (swiotlb_nr_tbl()) {
594                 return ttm_dma_populate(&gtt->ttm, rdev->dev);
595         }
596 #endif
597
598         r = ttm_pool_populate(ttm);
599         if (r) {
600                 return r;
601         }
602
603         for (i = 0; i < ttm->num_pages; i++) {
604                 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
605                                                        0, PAGE_SIZE,
606                                                        PCI_DMA_BIDIRECTIONAL);
607                 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
608                         while (--i) {
609                                 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
610                                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
611                                 gtt->ttm.dma_address[i] = 0;
612                         }
613                         ttm_pool_unpopulate(ttm);
614                         return -EFAULT;
615                 }
616         }
617         return 0;
618 }
619
620 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
621 {
622         struct radeon_device *rdev;
623         struct radeon_ttm_tt *gtt = (void *)ttm;
624         unsigned i;
625
626         rdev = radeon_get_rdev(ttm->bdev);
627
628 #ifdef CONFIG_SWIOTLB
629         if (swiotlb_nr_tbl()) {
630                 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
631                 return;
632         }
633 #endif
634
635         for (i = 0; i < ttm->num_pages; i++) {
636                 if (gtt->ttm.dma_address[i]) {
637                         pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
638                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
639                 }
640         }
641
642         ttm_pool_unpopulate(ttm);
643 }
644
645 static struct ttm_bo_driver radeon_bo_driver = {
646         .ttm_tt_create = &radeon_ttm_tt_create,
647         .ttm_tt_populate = &radeon_ttm_tt_populate,
648         .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
649         .invalidate_caches = &radeon_invalidate_caches,
650         .init_mem_type = &radeon_init_mem_type,
651         .evict_flags = &radeon_evict_flags,
652         .move = &radeon_bo_move,
653         .verify_access = &radeon_verify_access,
654         .sync_obj_signaled = &radeon_sync_obj_signaled,
655         .sync_obj_wait = &radeon_sync_obj_wait,
656         .sync_obj_flush = &radeon_sync_obj_flush,
657         .sync_obj_unref = &radeon_sync_obj_unref,
658         .sync_obj_ref = &radeon_sync_obj_ref,
659         .move_notify = &radeon_bo_move_notify,
660         .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
661         .io_mem_reserve = &radeon_ttm_io_mem_reserve,
662         .io_mem_free = &radeon_ttm_io_mem_free,
663 };
664
665 int radeon_ttm_init(struct radeon_device *rdev)
666 {
667         int r;
668
669         r = radeon_ttm_global_init(rdev);
670         if (r) {
671                 return r;
672         }
673         /* No others user of address space so set it to 0 */
674         r = ttm_bo_device_init(&rdev->mman.bdev,
675                                rdev->mman.bo_global_ref.ref.object,
676                                &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
677                                rdev->need_dma32);
678         if (r) {
679                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
680                 return r;
681         }
682         rdev->mman.initialized = true;
683         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
684                                 rdev->mc.real_vram_size >> PAGE_SHIFT);
685         if (r) {
686                 DRM_ERROR("Failed initializing VRAM heap.\n");
687                 return r;
688         }
689         r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
690                                 RADEON_GEM_DOMAIN_VRAM,
691                                 &rdev->stollen_vga_memory);
692         if (r) {
693                 return r;
694         }
695         r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
696         if (r)
697                 return r;
698         r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
699         radeon_bo_unreserve(rdev->stollen_vga_memory);
700         if (r) {
701                 radeon_bo_unref(&rdev->stollen_vga_memory);
702                 return r;
703         }
704         DRM_INFO("radeon: %uM of VRAM memory ready\n",
705                  (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
706         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
707                                 rdev->mc.gtt_size >> PAGE_SHIFT);
708         if (r) {
709                 DRM_ERROR("Failed initializing GTT heap.\n");
710                 return r;
711         }
712         DRM_INFO("radeon: %uM of GTT memory ready.\n",
713                  (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
714         if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
715                 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
716         }
717
718         r = radeon_ttm_debugfs_init(rdev);
719         if (r) {
720                 DRM_ERROR("Failed to init debugfs\n");
721                 return r;
722         }
723         return 0;
724 }
725
726 void radeon_ttm_fini(struct radeon_device *rdev)
727 {
728         int r;
729
730         if (!rdev->mman.initialized)
731                 return;
732         if (rdev->stollen_vga_memory) {
733                 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
734                 if (r == 0) {
735                         radeon_bo_unpin(rdev->stollen_vga_memory);
736                         radeon_bo_unreserve(rdev->stollen_vga_memory);
737                 }
738                 radeon_bo_unref(&rdev->stollen_vga_memory);
739         }
740         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
741         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
742         ttm_bo_device_release(&rdev->mman.bdev);
743         radeon_gart_fini(rdev);
744         radeon_ttm_global_fini(rdev);
745         rdev->mman.initialized = false;
746         DRM_INFO("radeon: ttm finalized\n");
747 }
748
749 /* this should only be called at bootup or when userspace
750  * isn't running */
751 void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
752 {
753         struct ttm_mem_type_manager *man;
754
755         if (!rdev->mman.initialized)
756                 return;
757
758         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
759         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
760         man->size = size >> PAGE_SHIFT;
761 }
762
763 static struct vm_operations_struct radeon_ttm_vm_ops;
764 static const struct vm_operations_struct *ttm_vm_ops = NULL;
765
766 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
767 {
768         struct ttm_buffer_object *bo;
769         struct radeon_device *rdev;
770         int r;
771
772         bo = (struct ttm_buffer_object *)vma->vm_private_data;  
773         if (bo == NULL) {
774                 return VM_FAULT_NOPAGE;
775         }
776         rdev = radeon_get_rdev(bo->bdev);
777         mutex_lock(&rdev->vram_mutex);
778         r = ttm_vm_ops->fault(vma, vmf);
779         mutex_unlock(&rdev->vram_mutex);
780         return r;
781 }
782
783 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
784 {
785         struct drm_file *file_priv;
786         struct radeon_device *rdev;
787         int r;
788
789         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
790                 return drm_mmap(filp, vma);
791         }
792
793         file_priv = filp->private_data;
794         rdev = file_priv->minor->dev->dev_private;
795         if (rdev == NULL) {
796                 return -EINVAL;
797         }
798         r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
799         if (unlikely(r != 0)) {
800                 return r;
801         }
802         if (unlikely(ttm_vm_ops == NULL)) {
803                 ttm_vm_ops = vma->vm_ops;
804                 radeon_ttm_vm_ops = *ttm_vm_ops;
805                 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
806         }
807         vma->vm_ops = &radeon_ttm_vm_ops;
808         return 0;
809 }
810
811
812 #define RADEON_DEBUGFS_MEM_TYPES 2
813
814 #if defined(CONFIG_DEBUG_FS)
815 static int radeon_mm_dump_table(struct seq_file *m, void *data)
816 {
817         struct drm_info_node *node = (struct drm_info_node *)m->private;
818         struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
819         struct drm_device *dev = node->minor->dev;
820         struct radeon_device *rdev = dev->dev_private;
821         int ret;
822         struct ttm_bo_global *glob = rdev->mman.bdev.glob;
823
824         spin_lock(&glob->lru_lock);
825         ret = drm_mm_dump_table(m, mm);
826         spin_unlock(&glob->lru_lock);
827         return ret;
828 }
829 #endif
830
831 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
832 {
833 #if defined(CONFIG_DEBUG_FS)
834         static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
835         static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
836         unsigned i;
837
838         for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
839                 if (i == 0)
840                         sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
841                 else
842                         sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
843                 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
844                 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
845                 radeon_mem_types_list[i].driver_features = 0;
846                 if (i == 0)
847                         radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
848                 else
849                         radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
850
851         }
852         /* Add ttm page pool to debugfs */
853         sprintf(radeon_mem_types_names[i], "ttm_page_pool");
854         radeon_mem_types_list[i].name = radeon_mem_types_names[i];
855         radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
856         radeon_mem_types_list[i].driver_features = 0;
857         radeon_mem_types_list[i++].data = NULL;
858 #ifdef CONFIG_SWIOTLB
859         if (swiotlb_nr_tbl()) {
860                 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
861                 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
862                 radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
863                 radeon_mem_types_list[i].driver_features = 0;
864                 radeon_mem_types_list[i++].data = NULL;
865         }
866 #endif
867         return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
868
869 #endif
870         return 0;
871 }