]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/nouveau/nouveau_bo.c
drm/nv50: tidy up PCIEGART implementation
[mv-sheeva.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
37
38 #include <linux/log2.h>
39 #include <linux/slab.h>
40
41 static void
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43 {
44         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45         struct drm_device *dev = dev_priv->dev;
46         struct nouveau_bo *nvbo = nouveau_bo(bo);
47
48         if (unlikely(nvbo->gem))
49                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
51         nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52         nouveau_vm_put(&nvbo->vma);
53         kfree(nvbo);
54 }
55
56 static void
57 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
58                        int *page_shift)
59 {
60         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61
62         if (dev_priv->card_type < NV_50) {
63                 if (nvbo->tile_mode) {
64                         if (dev_priv->chipset >= 0x40) {
65                                 *align = 65536;
66                                 *size = roundup(*size, 64 * nvbo->tile_mode);
67
68                         } else if (dev_priv->chipset >= 0x30) {
69                                 *align = 32768;
70                                 *size = roundup(*size, 64 * nvbo->tile_mode);
71
72                         } else if (dev_priv->chipset >= 0x20) {
73                                 *align = 16384;
74                                 *size = roundup(*size, 64 * nvbo->tile_mode);
75
76                         } else if (dev_priv->chipset >= 0x10) {
77                                 *align = 16384;
78                                 *size = roundup(*size, 32 * nvbo->tile_mode);
79                         }
80                 }
81         } else {
82                 if (likely(dev_priv->chan_vm)) {
83                         if (*size > 256 * 1024)
84                                 *page_shift = dev_priv->chan_vm->lpg_shift;
85                         else
86                                 *page_shift = dev_priv->chan_vm->spg_shift;
87                 } else {
88                         *page_shift = 12;
89                 }
90
91                 *size = roundup(*size, (1 << *page_shift));
92                 *align = max((1 << *page_shift), *align);
93         }
94
95         *size = roundup(*size, PAGE_SIZE);
96 }
97
98 int
99 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100                int size, int align, uint32_t flags, uint32_t tile_mode,
101                uint32_t tile_flags, bool no_vm, bool mappable,
102                struct nouveau_bo **pnvbo)
103 {
104         struct drm_nouveau_private *dev_priv = dev->dev_private;
105         struct nouveau_bo *nvbo;
106         int ret = 0, page_shift = 0;
107
108         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
109         if (!nvbo)
110                 return -ENOMEM;
111         INIT_LIST_HEAD(&nvbo->head);
112         INIT_LIST_HEAD(&nvbo->entry);
113         nvbo->mappable = mappable;
114         nvbo->no_vm = no_vm;
115         nvbo->tile_mode = tile_mode;
116         nvbo->tile_flags = tile_flags;
117         nvbo->bo.bdev = &dev_priv->ttm.bdev;
118
119         nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
120         align >>= PAGE_SHIFT;
121
122         if (!nvbo->no_vm && dev_priv->chan_vm) {
123                 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124                                      NV_MEM_ACCESS_RW, &nvbo->vma);
125                 if (ret) {
126                         kfree(nvbo);
127                         return ret;
128                 }
129         }
130
131         nouveau_bo_placement_set(nvbo, flags, 0);
132
133         nvbo->channel = chan;
134         ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
135                           ttm_bo_type_device, &nvbo->placement, align, 0,
136                           false, NULL, size, nouveau_bo_del_ttm);
137         if (ret) {
138                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
139                 return ret;
140         }
141         nvbo->channel = NULL;
142
143         if (nvbo->vma.node) {
144                 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
145                         nvbo->bo.offset = nvbo->vma.offset;
146         }
147
148         *pnvbo = nvbo;
149         return 0;
150 }
151
152 static void
153 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
154 {
155         *n = 0;
156
157         if (type & TTM_PL_FLAG_VRAM)
158                 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
159         if (type & TTM_PL_FLAG_TT)
160                 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
161         if (type & TTM_PL_FLAG_SYSTEM)
162                 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
163 }
164
165 static void
166 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167 {
168         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
169
170         if (dev_priv->card_type == NV_10 &&
171             nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
172                 /*
173                  * Make sure that the color and depth buffers are handled
174                  * by independent memory controller units. Up to a 9x
175                  * speed up when alpha-blending and depth-test are enabled
176                  * at the same time.
177                  */
178                 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181                         nvbo->placement.fpfn = vram_pages / 2;
182                         nvbo->placement.lpfn = ~0;
183                 } else {
184                         nvbo->placement.fpfn = 0;
185                         nvbo->placement.lpfn = vram_pages / 2;
186                 }
187         }
188 }
189
190 void
191 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
192 {
193         struct ttm_placement *pl = &nvbo->placement;
194         uint32_t flags = TTM_PL_MASK_CACHING |
195                 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
196
197         pl->placement = nvbo->placements;
198         set_placement_list(nvbo->placements, &pl->num_placement,
199                            type, flags);
200
201         pl->busy_placement = nvbo->busy_placements;
202         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
203                            type | busy, flags);
204
205         set_placement_range(nvbo, type);
206 }
207
208 int
209 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
210 {
211         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
212         struct ttm_buffer_object *bo = &nvbo->bo;
213         int ret;
214
215         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
216                 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
217                          "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
218                          1 << bo->mem.mem_type, memtype);
219                 return -EINVAL;
220         }
221
222         if (nvbo->pin_refcnt++)
223                 return 0;
224
225         ret = ttm_bo_reserve(bo, false, false, false, 0);
226         if (ret)
227                 goto out;
228
229         nouveau_bo_placement_set(nvbo, memtype, 0);
230
231         ret = nouveau_bo_validate(nvbo, false, false, false);
232         if (ret == 0) {
233                 switch (bo->mem.mem_type) {
234                 case TTM_PL_VRAM:
235                         dev_priv->fb_aper_free -= bo->mem.size;
236                         break;
237                 case TTM_PL_TT:
238                         dev_priv->gart_info.aper_free -= bo->mem.size;
239                         break;
240                 default:
241                         break;
242                 }
243         }
244         ttm_bo_unreserve(bo);
245 out:
246         if (unlikely(ret))
247                 nvbo->pin_refcnt--;
248         return ret;
249 }
250
251 int
252 nouveau_bo_unpin(struct nouveau_bo *nvbo)
253 {
254         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
255         struct ttm_buffer_object *bo = &nvbo->bo;
256         int ret;
257
258         if (--nvbo->pin_refcnt)
259                 return 0;
260
261         ret = ttm_bo_reserve(bo, false, false, false, 0);
262         if (ret)
263                 return ret;
264
265         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
266
267         ret = nouveau_bo_validate(nvbo, false, false, false);
268         if (ret == 0) {
269                 switch (bo->mem.mem_type) {
270                 case TTM_PL_VRAM:
271                         dev_priv->fb_aper_free += bo->mem.size;
272                         break;
273                 case TTM_PL_TT:
274                         dev_priv->gart_info.aper_free += bo->mem.size;
275                         break;
276                 default:
277                         break;
278                 }
279         }
280
281         ttm_bo_unreserve(bo);
282         return ret;
283 }
284
285 int
286 nouveau_bo_map(struct nouveau_bo *nvbo)
287 {
288         int ret;
289
290         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
291         if (ret)
292                 return ret;
293
294         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
295         ttm_bo_unreserve(&nvbo->bo);
296         return ret;
297 }
298
299 void
300 nouveau_bo_unmap(struct nouveau_bo *nvbo)
301 {
302         if (nvbo)
303                 ttm_bo_kunmap(&nvbo->kmap);
304 }
305
306 int
307 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
308                     bool no_wait_reserve, bool no_wait_gpu)
309 {
310         int ret;
311
312         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
313                               no_wait_reserve, no_wait_gpu);
314         if (ret)
315                 return ret;
316
317         if (nvbo->vma.node) {
318                 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
319                         nvbo->bo.offset = nvbo->vma.offset;
320         }
321
322         return 0;
323 }
324
325 u16
326 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
327 {
328         bool is_iomem;
329         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
330         mem = &mem[index];
331         if (is_iomem)
332                 return ioread16_native((void __force __iomem *)mem);
333         else
334                 return *mem;
335 }
336
337 void
338 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
339 {
340         bool is_iomem;
341         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
342         mem = &mem[index];
343         if (is_iomem)
344                 iowrite16_native(val, (void __force __iomem *)mem);
345         else
346                 *mem = val;
347 }
348
349 u32
350 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
351 {
352         bool is_iomem;
353         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
354         mem = &mem[index];
355         if (is_iomem)
356                 return ioread32_native((void __force __iomem *)mem);
357         else
358                 return *mem;
359 }
360
361 void
362 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
363 {
364         bool is_iomem;
365         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
366         mem = &mem[index];
367         if (is_iomem)
368                 iowrite32_native(val, (void __force __iomem *)mem);
369         else
370                 *mem = val;
371 }
372
373 static struct ttm_backend *
374 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
375 {
376         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
377         struct drm_device *dev = dev_priv->dev;
378
379         switch (dev_priv->gart_info.type) {
380 #if __OS_HAS_AGP
381         case NOUVEAU_GART_AGP:
382                 return ttm_agp_backend_init(bdev, dev->agp->bridge);
383 #endif
384         case NOUVEAU_GART_SGDMA:
385                 return nouveau_sgdma_init_ttm(dev);
386         default:
387                 NV_ERROR(dev, "Unknown GART type %d\n",
388                          dev_priv->gart_info.type);
389                 break;
390         }
391
392         return NULL;
393 }
394
395 static int
396 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
397 {
398         /* We'll do this from user space. */
399         return 0;
400 }
401
402 static int
403 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
404                          struct ttm_mem_type_manager *man)
405 {
406         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
407         struct drm_device *dev = dev_priv->dev;
408
409         switch (type) {
410         case TTM_PL_SYSTEM:
411                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
412                 man->available_caching = TTM_PL_MASK_CACHING;
413                 man->default_caching = TTM_PL_FLAG_CACHED;
414                 break;
415         case TTM_PL_VRAM:
416                 if (dev_priv->card_type == NV_50) {
417                         man->func = &nouveau_vram_manager;
418                         man->io_reserve_fastpath = false;
419                         man->use_io_reserve_lru = true;
420                 } else {
421                         man->func = &ttm_bo_manager_func;
422                 }
423                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
424                              TTM_MEMTYPE_FLAG_MAPPABLE;
425                 man->available_caching = TTM_PL_FLAG_UNCACHED |
426                                          TTM_PL_FLAG_WC;
427                 man->default_caching = TTM_PL_FLAG_WC;
428                 break;
429         case TTM_PL_TT:
430                 man->func = &ttm_bo_manager_func;
431                 switch (dev_priv->gart_info.type) {
432                 case NOUVEAU_GART_AGP:
433                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
434                         man->available_caching = TTM_PL_FLAG_UNCACHED |
435                                 TTM_PL_FLAG_WC;
436                         man->default_caching = TTM_PL_FLAG_WC;
437                         break;
438                 case NOUVEAU_GART_SGDMA:
439                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
440                                      TTM_MEMTYPE_FLAG_CMA;
441                         man->available_caching = TTM_PL_MASK_CACHING;
442                         man->default_caching = TTM_PL_FLAG_CACHED;
443                         man->gpu_offset = dev_priv->gart_info.aper_base;
444                         break;
445                 default:
446                         NV_ERROR(dev, "Unknown GART type: %d\n",
447                                  dev_priv->gart_info.type);
448                         return -EINVAL;
449                 }
450                 break;
451         default:
452                 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
453                 return -EINVAL;
454         }
455         return 0;
456 }
457
458 static void
459 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
460 {
461         struct nouveau_bo *nvbo = nouveau_bo(bo);
462
463         switch (bo->mem.mem_type) {
464         case TTM_PL_VRAM:
465                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
466                                          TTM_PL_FLAG_SYSTEM);
467                 break;
468         default:
469                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
470                 break;
471         }
472
473         *pl = nvbo->placement;
474 }
475
476
477 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
478  * TTM_PL_{VRAM,TT} directly.
479  */
480
481 static int
482 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
483                               struct nouveau_bo *nvbo, bool evict,
484                               bool no_wait_reserve, bool no_wait_gpu,
485                               struct ttm_mem_reg *new_mem)
486 {
487         struct nouveau_fence *fence = NULL;
488         int ret;
489
490         ret = nouveau_fence_new(chan, &fence, true);
491         if (ret)
492                 return ret;
493
494         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
495                                         no_wait_reserve, no_wait_gpu, new_mem);
496         nouveau_fence_unref(&fence);
497         return ret;
498 }
499
500 static inline uint32_t
501 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
502                       struct nouveau_channel *chan, struct ttm_mem_reg *mem)
503 {
504         struct nouveau_bo *nvbo = nouveau_bo(bo);
505
506         if (nvbo->no_vm) {
507                 if (mem->mem_type == TTM_PL_TT)
508                         return NvDmaGART;
509                 return NvDmaVRAM;
510         }
511
512         if (mem->mem_type == TTM_PL_TT)
513                 return chan->gart_handle;
514         return chan->vram_handle;
515 }
516
517 static int
518 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
519                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
520 {
521         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
522         struct nouveau_bo *nvbo = nouveau_bo(bo);
523         u64 length = (new_mem->num_pages << PAGE_SHIFT);
524         u64 src_offset, dst_offset;
525         int ret;
526
527         src_offset = old_mem->start << PAGE_SHIFT;
528         dst_offset = new_mem->start << PAGE_SHIFT;
529         if (!nvbo->no_vm) {
530                 if (old_mem->mem_type == TTM_PL_VRAM)
531                         src_offset  = nvbo->vma.offset;
532                 else
533                         src_offset += dev_priv->gart_info.aper_base;
534
535                 if (new_mem->mem_type == TTM_PL_VRAM)
536                         dst_offset  = nvbo->vma.offset;
537                 else
538                         dst_offset += dev_priv->gart_info.aper_base;
539         }
540
541         ret = RING_SPACE(chan, 3);
542         if (ret)
543                 return ret;
544
545         BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
546         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
547         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
548
549         while (length) {
550                 u32 amount, stride, height;
551
552                 amount  = min(length, (u64)(4 * 1024 * 1024));
553                 stride  = 16 * 4;
554                 height  = amount / stride;
555
556                 if (new_mem->mem_type == TTM_PL_VRAM &&
557                     nouveau_bo_tile_layout(nvbo)) {
558                         ret = RING_SPACE(chan, 8);
559                         if (ret)
560                                 return ret;
561
562                         BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
563                         OUT_RING  (chan, 0);
564                         OUT_RING  (chan, 0);
565                         OUT_RING  (chan, stride);
566                         OUT_RING  (chan, height);
567                         OUT_RING  (chan, 1);
568                         OUT_RING  (chan, 0);
569                         OUT_RING  (chan, 0);
570                 } else {
571                         ret = RING_SPACE(chan, 2);
572                         if (ret)
573                                 return ret;
574
575                         BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
576                         OUT_RING  (chan, 1);
577                 }
578                 if (old_mem->mem_type == TTM_PL_VRAM &&
579                     nouveau_bo_tile_layout(nvbo)) {
580                         ret = RING_SPACE(chan, 8);
581                         if (ret)
582                                 return ret;
583
584                         BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
585                         OUT_RING  (chan, 0);
586                         OUT_RING  (chan, 0);
587                         OUT_RING  (chan, stride);
588                         OUT_RING  (chan, height);
589                         OUT_RING  (chan, 1);
590                         OUT_RING  (chan, 0);
591                         OUT_RING  (chan, 0);
592                 } else {
593                         ret = RING_SPACE(chan, 2);
594                         if (ret)
595                                 return ret;
596
597                         BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
598                         OUT_RING  (chan, 1);
599                 }
600
601                 ret = RING_SPACE(chan, 14);
602                 if (ret)
603                         return ret;
604
605                 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
606                 OUT_RING  (chan, upper_32_bits(src_offset));
607                 OUT_RING  (chan, upper_32_bits(dst_offset));
608                 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
609                 OUT_RING  (chan, lower_32_bits(src_offset));
610                 OUT_RING  (chan, lower_32_bits(dst_offset));
611                 OUT_RING  (chan, stride);
612                 OUT_RING  (chan, stride);
613                 OUT_RING  (chan, stride);
614                 OUT_RING  (chan, height);
615                 OUT_RING  (chan, 0x00000101);
616                 OUT_RING  (chan, 0x00000000);
617                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
618                 OUT_RING  (chan, 0);
619
620                 length -= amount;
621                 src_offset += amount;
622                 dst_offset += amount;
623         }
624
625         return 0;
626 }
627
628 static int
629 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
630                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
631 {
632         u32 src_offset = old_mem->start << PAGE_SHIFT;
633         u32 dst_offset = new_mem->start << PAGE_SHIFT;
634         u32 page_count = new_mem->num_pages;
635         int ret;
636
637         ret = RING_SPACE(chan, 3);
638         if (ret)
639                 return ret;
640
641         BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
642         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
643         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
644
645         page_count = new_mem->num_pages;
646         while (page_count) {
647                 int line_count = (page_count > 2047) ? 2047 : page_count;
648
649                 ret = RING_SPACE(chan, 11);
650                 if (ret)
651                         return ret;
652
653                 BEGIN_RING(chan, NvSubM2MF,
654                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
655                 OUT_RING  (chan, src_offset);
656                 OUT_RING  (chan, dst_offset);
657                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
658                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
659                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
660                 OUT_RING  (chan, line_count);
661                 OUT_RING  (chan, 0x00000101);
662                 OUT_RING  (chan, 0x00000000);
663                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
664                 OUT_RING  (chan, 0);
665
666                 page_count -= line_count;
667                 src_offset += (PAGE_SIZE * line_count);
668                 dst_offset += (PAGE_SIZE * line_count);
669         }
670
671         return 0;
672 }
673
674 static int
675 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
676                      bool no_wait_reserve, bool no_wait_gpu,
677                      struct ttm_mem_reg *new_mem)
678 {
679         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
680         struct nouveau_bo *nvbo = nouveau_bo(bo);
681         struct nouveau_channel *chan;
682         int ret;
683
684         chan = nvbo->channel;
685         if (!chan || nvbo->no_vm) {
686                 chan = dev_priv->channel;
687                 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
688         }
689
690         if (dev_priv->card_type < NV_50)
691                 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
692         else
693                 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
694         if (ret == 0) {
695                 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
696                                                     no_wait_reserve,
697                                                     no_wait_gpu, new_mem);
698         }
699
700         if (chan == dev_priv->channel)
701                 mutex_unlock(&chan->mutex);
702         return ret;
703 }
704
705 static int
706 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
707                       bool no_wait_reserve, bool no_wait_gpu,
708                       struct ttm_mem_reg *new_mem)
709 {
710         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
711         struct ttm_placement placement;
712         struct ttm_mem_reg tmp_mem;
713         int ret;
714
715         placement.fpfn = placement.lpfn = 0;
716         placement.num_placement = placement.num_busy_placement = 1;
717         placement.placement = placement.busy_placement = &placement_memtype;
718
719         tmp_mem = *new_mem;
720         tmp_mem.mm_node = NULL;
721         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
722         if (ret)
723                 return ret;
724
725         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
726         if (ret)
727                 goto out;
728
729         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
730         if (ret)
731                 goto out;
732
733         ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
734 out:
735         ttm_bo_mem_put(bo, &tmp_mem);
736         return ret;
737 }
738
739 static int
740 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
741                       bool no_wait_reserve, bool no_wait_gpu,
742                       struct ttm_mem_reg *new_mem)
743 {
744         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
745         struct ttm_placement placement;
746         struct ttm_mem_reg tmp_mem;
747         int ret;
748
749         placement.fpfn = placement.lpfn = 0;
750         placement.num_placement = placement.num_busy_placement = 1;
751         placement.placement = placement.busy_placement = &placement_memtype;
752
753         tmp_mem = *new_mem;
754         tmp_mem.mm_node = NULL;
755         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
756         if (ret)
757                 return ret;
758
759         ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
760         if (ret)
761                 goto out;
762
763         ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
764         if (ret)
765                 goto out;
766
767 out:
768         ttm_bo_mem_put(bo, &tmp_mem);
769         return ret;
770 }
771
772 static int
773 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
774                    struct nouveau_tile_reg **new_tile)
775 {
776         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
777         struct drm_device *dev = dev_priv->dev;
778         struct nouveau_bo *nvbo = nouveau_bo(bo);
779         uint64_t offset;
780
781         if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
782                 /* Nothing to do. */
783                 *new_tile = NULL;
784                 return 0;
785         }
786
787         offset = new_mem->start << PAGE_SHIFT;
788
789         if (dev_priv->chan_vm) {
790                 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
791         } else if (dev_priv->card_type >= NV_10) {
792                 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
793                                                 nvbo->tile_mode,
794                                                 nvbo->tile_flags);
795         }
796
797         return 0;
798 }
799
800 static void
801 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
802                       struct nouveau_tile_reg *new_tile,
803                       struct nouveau_tile_reg **old_tile)
804 {
805         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
806         struct drm_device *dev = dev_priv->dev;
807
808         if (dev_priv->card_type >= NV_10 &&
809             dev_priv->card_type < NV_50) {
810                 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
811                 *old_tile = new_tile;
812         }
813 }
814
815 static int
816 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
817                 bool no_wait_reserve, bool no_wait_gpu,
818                 struct ttm_mem_reg *new_mem)
819 {
820         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
821         struct nouveau_bo *nvbo = nouveau_bo(bo);
822         struct ttm_mem_reg *old_mem = &bo->mem;
823         struct nouveau_tile_reg *new_tile = NULL;
824         int ret = 0;
825
826         ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
827         if (ret)
828                 return ret;
829
830         /* Fake bo copy. */
831         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
832                 BUG_ON(bo->mem.mm_node != NULL);
833                 bo->mem = *new_mem;
834                 new_mem->mm_node = NULL;
835                 goto out;
836         }
837
838         /* Software copy if the card isn't up and running yet. */
839         if (!dev_priv->channel) {
840                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
841                 goto out;
842         }
843
844         /* Hardware assisted copy. */
845         if (new_mem->mem_type == TTM_PL_SYSTEM)
846                 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
847         else if (old_mem->mem_type == TTM_PL_SYSTEM)
848                 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
849         else
850                 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
851
852         if (!ret)
853                 goto out;
854
855         /* Fallback to software copy. */
856         ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
857
858 out:
859         if (ret)
860                 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
861         else
862                 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
863
864         return ret;
865 }
866
867 static int
868 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
869 {
870         return 0;
871 }
872
873 static int
874 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
875 {
876         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
877         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
878         struct drm_device *dev = dev_priv->dev;
879         int ret;
880
881         mem->bus.addr = NULL;
882         mem->bus.offset = 0;
883         mem->bus.size = mem->num_pages << PAGE_SHIFT;
884         mem->bus.base = 0;
885         mem->bus.is_iomem = false;
886         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
887                 return -EINVAL;
888         switch (mem->mem_type) {
889         case TTM_PL_SYSTEM:
890                 /* System memory */
891                 return 0;
892         case TTM_PL_TT:
893 #if __OS_HAS_AGP
894                 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
895                         mem->bus.offset = mem->start << PAGE_SHIFT;
896                         mem->bus.base = dev_priv->gart_info.aper_base;
897                         mem->bus.is_iomem = true;
898                 }
899 #endif
900                 break;
901         case TTM_PL_VRAM:
902         {
903                 struct nouveau_vram *vram = mem->mm_node;
904
905                 if (!dev_priv->bar1_vm) {
906                         mem->bus.offset = mem->start << PAGE_SHIFT;
907                         mem->bus.base = pci_resource_start(dev->pdev, 1);
908                         mem->bus.is_iomem = true;
909                         break;
910                 }
911
912                 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12,
913                                      NV_MEM_ACCESS_RW, &vram->bar_vma);
914                 if (ret)
915                         return ret;
916
917                 nouveau_vm_map(&vram->bar_vma, vram);
918                 if (ret) {
919                         nouveau_vm_put(&vram->bar_vma);
920                         return ret;
921                 }
922
923                 mem->bus.offset  = vram->bar_vma.offset;
924                 mem->bus.offset -= 0x0020000000ULL;
925                 mem->bus.base = pci_resource_start(dev->pdev, 1);
926                 mem->bus.is_iomem = true;
927         }
928                 break;
929         default:
930                 return -EINVAL;
931         }
932         return 0;
933 }
934
935 static void
936 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
937 {
938         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
939         struct nouveau_vram *vram = mem->mm_node;
940
941         if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
942                 return;
943
944         if (!vram->bar_vma.node)
945                 return;
946
947         nouveau_vm_unmap(&vram->bar_vma);
948         nouveau_vm_put(&vram->bar_vma);
949 }
950
951 static int
952 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
953 {
954         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
955         struct nouveau_bo *nvbo = nouveau_bo(bo);
956
957         /* as long as the bo isn't in vram, and isn't tiled, we've got
958          * nothing to do here.
959          */
960         if (bo->mem.mem_type != TTM_PL_VRAM) {
961                 if (dev_priv->card_type < NV_50 ||
962                     !nouveau_bo_tile_layout(nvbo))
963                         return 0;
964         }
965
966         /* make sure bo is in mappable vram */
967         if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
968                 return 0;
969
970
971         nvbo->placement.fpfn = 0;
972         nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
973         nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
974         return nouveau_bo_validate(nvbo, false, true, false);
975 }
976
977 void
978 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
979 {
980         struct nouveau_fence *old_fence;
981
982         if (likely(fence))
983                 nouveau_fence_ref(fence);
984
985         spin_lock(&nvbo->bo.bdev->fence_lock);
986         old_fence = nvbo->bo.sync_obj;
987         nvbo->bo.sync_obj = fence;
988         spin_unlock(&nvbo->bo.bdev->fence_lock);
989
990         nouveau_fence_unref(&old_fence);
991 }
992
993 struct ttm_bo_driver nouveau_bo_driver = {
994         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
995         .invalidate_caches = nouveau_bo_invalidate_caches,
996         .init_mem_type = nouveau_bo_init_mem_type,
997         .evict_flags = nouveau_bo_evict_flags,
998         .move = nouveau_bo_move,
999         .verify_access = nouveau_bo_verify_access,
1000         .sync_obj_signaled = __nouveau_fence_signalled,
1001         .sync_obj_wait = __nouveau_fence_wait,
1002         .sync_obj_flush = __nouveau_fence_flush,
1003         .sync_obj_unref = __nouveau_fence_unref,
1004         .sync_obj_ref = __nouveau_fence_ref,
1005         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1006         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1007         .io_mem_free = &nouveau_ttm_io_mem_free,
1008 };
1009