]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: move adjust_mc_addr into amdgpu_gart_funcs
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <drm/drmP.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 /*
36  * GPUVM
37  * GPUVM is similar to the legacy gart on older asics, however
38  * rather than there being a single global gart table
39  * for the entire GPU, there are multiple VM page tables active
40  * at any given time.  The VM page tables can contain a mix
41  * vram pages and system memory pages and system memory pages
42  * can be mapped as snooped (cached system pages) or unsnooped
43  * (uncached system pages).
44  * Each VM has an ID associated with it and there is a page table
45  * associated with each VMID.  When execting a command buffer,
46  * the kernel tells the the ring what VMID to use for that command
47  * buffer.  VMIDs are allocated dynamically as commands are submitted.
48  * The userspace drivers maintain their own address space and the kernel
49  * sets up their pages tables accordingly when they submit their
50  * command buffers and a VMID is assigned.
51  * Cayman/Trinity support up to 8 active VMs at any given time;
52  * SI supports 16.
53  */
54
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
57
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
59                      START, LAST, static, amdgpu_vm_it)
60
61 #undef START
62 #undef LAST
63
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65  * the number of function parameters
66  */
67 struct amdgpu_pte_update_params {
68         /* amdgpu device we do this update for */
69         struct amdgpu_device *adev;
70         /* optional amdgpu_vm we do this update for */
71         struct amdgpu_vm *vm;
72         /* address where to copy page table entries from */
73         uint64_t src;
74         /* indirect buffer to fill with commands */
75         struct amdgpu_ib *ib;
76         /* Function which actually does the update */
77         void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
78                      uint64_t addr, unsigned count, uint32_t incr,
79                      uint64_t flags);
80         /* indicate update pt or its shadow */
81         bool shadow;
82 };
83
84 /* Helper to disable partial resident texture feature from a fence callback */
85 struct amdgpu_prt_cb {
86         struct amdgpu_device *adev;
87         struct dma_fence_cb cb;
88 };
89
90 /**
91  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
92  *
93  * @adev: amdgpu_device pointer
94  *
95  * Calculate the number of entries in a page directory or page table.
96  */
97 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
98                                       unsigned level)
99 {
100         if (level == 0)
101                 /* For the root directory */
102                 return adev->vm_manager.max_pfn >>
103                         (amdgpu_vm_block_size * adev->vm_manager.num_level);
104         else if (level == adev->vm_manager.num_level)
105                 /* For the page tables on the leaves */
106                 return AMDGPU_VM_PTE_COUNT;
107         else
108                 /* Everything in between */
109                 return 1 << amdgpu_vm_block_size;
110 }
111
112 /**
113  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
114  *
115  * @adev: amdgpu_device pointer
116  *
117  * Calculate the size of the BO for a page directory or page table in bytes.
118  */
119 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
120 {
121         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
122 }
123
124 /**
125  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
126  *
127  * @vm: vm providing the BOs
128  * @validated: head of validation list
129  * @entry: entry to add
130  *
131  * Add the page directory to the list of BOs to
132  * validate for command submission.
133  */
134 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
135                          struct list_head *validated,
136                          struct amdgpu_bo_list_entry *entry)
137 {
138         entry->robj = vm->root.bo;
139         entry->priority = 0;
140         entry->tv.bo = &entry->robj->tbo;
141         entry->tv.shared = true;
142         entry->user_pages = NULL;
143         list_add(&entry->tv.head, validated);
144 }
145
146 /**
147  * amdgpu_vm_validate_layer - validate a single page table level
148  *
149  * @parent: parent page table level
150  * @validate: callback to do the validation
151  * @param: parameter for the validation callback
152  *
153  * Validate the page table BOs on command submission if neccessary.
154  */
155 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
156                                     int (*validate)(void *, struct amdgpu_bo *),
157                                     void *param)
158 {
159         unsigned i;
160         int r;
161
162         if (!parent->entries)
163                 return 0;
164
165         for (i = 0; i <= parent->last_entry_used; ++i) {
166                 struct amdgpu_vm_pt *entry = &parent->entries[i];
167
168                 if (!entry->bo)
169                         continue;
170
171                 r = validate(param, entry->bo);
172                 if (r)
173                         return r;
174
175                 /*
176                  * Recurse into the sub directory. This is harmless because we
177                  * have only a maximum of 5 layers.
178                  */
179                 r = amdgpu_vm_validate_level(entry, validate, param);
180                 if (r)
181                         return r;
182         }
183
184         return r;
185 }
186
187 /**
188  * amdgpu_vm_validate_pt_bos - validate the page table BOs
189  *
190  * @adev: amdgpu device pointer
191  * @vm: vm providing the BOs
192  * @validate: callback to do the validation
193  * @param: parameter for the validation callback
194  *
195  * Validate the page table BOs on command submission if neccessary.
196  */
197 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
198                               int (*validate)(void *p, struct amdgpu_bo *bo),
199                               void *param)
200 {
201         uint64_t num_evictions;
202
203         /* We only need to validate the page tables
204          * if they aren't already valid.
205          */
206         num_evictions = atomic64_read(&adev->num_evictions);
207         if (num_evictions == vm->last_eviction_counter)
208                 return 0;
209
210         return amdgpu_vm_validate_level(&vm->root, validate, param);
211 }
212
213 /**
214  * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
215  *
216  * @adev: amdgpu device instance
217  * @vm: vm providing the BOs
218  *
219  * Move the PT BOs to the tail of the LRU.
220  */
221 static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
222 {
223         unsigned i;
224
225         if (!parent->entries)
226                 return;
227
228         for (i = 0; i <= parent->last_entry_used; ++i) {
229                 struct amdgpu_vm_pt *entry = &parent->entries[i];
230
231                 if (!entry->bo)
232                         continue;
233
234                 ttm_bo_move_to_lru_tail(&entry->bo->tbo);
235                 amdgpu_vm_move_level_in_lru(entry);
236         }
237 }
238
239 /**
240  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
241  *
242  * @adev: amdgpu device instance
243  * @vm: vm providing the BOs
244  *
245  * Move the PT BOs to the tail of the LRU.
246  */
247 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
248                                   struct amdgpu_vm *vm)
249 {
250         struct ttm_bo_global *glob = adev->mman.bdev.glob;
251
252         spin_lock(&glob->lru_lock);
253         amdgpu_vm_move_level_in_lru(&vm->root);
254         spin_unlock(&glob->lru_lock);
255 }
256
257  /**
258  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
259  *
260  * @adev: amdgpu_device pointer
261  * @vm: requested vm
262  * @saddr: start of the address range
263  * @eaddr: end of the address range
264  *
265  * Make sure the page directories and page tables are allocated
266  */
267 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
268                                   struct amdgpu_vm *vm,
269                                   struct amdgpu_vm_pt *parent,
270                                   uint64_t saddr, uint64_t eaddr,
271                                   unsigned level)
272 {
273         unsigned shift = (adev->vm_manager.num_level - level) *
274                 amdgpu_vm_block_size;
275         unsigned pt_idx, from, to;
276         int r;
277
278         if (!parent->entries) {
279                 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
280
281                 parent->entries = drm_calloc_large(num_entries,
282                                                    sizeof(struct amdgpu_vm_pt));
283                 if (!parent->entries)
284                         return -ENOMEM;
285                 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
286         }
287
288         from = saddr >> shift;
289         to = eaddr >> shift;
290         if (from >= amdgpu_vm_num_entries(adev, level) ||
291             to >= amdgpu_vm_num_entries(adev, level))
292                 return -EINVAL;
293
294         if (to > parent->last_entry_used)
295                 parent->last_entry_used = to;
296
297         ++level;
298         saddr = saddr & ((1 << shift) - 1);
299         eaddr = eaddr & ((1 << shift) - 1);
300
301         /* walk over the address space and allocate the page tables */
302         for (pt_idx = from; pt_idx <= to; ++pt_idx) {
303                 struct reservation_object *resv = vm->root.bo->tbo.resv;
304                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
305                 struct amdgpu_bo *pt;
306
307                 if (!entry->bo) {
308                         r = amdgpu_bo_create(adev,
309                                              amdgpu_vm_bo_size(adev, level),
310                                              AMDGPU_GPU_PAGE_SIZE, true,
311                                              AMDGPU_GEM_DOMAIN_VRAM,
312                                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
313                                              AMDGPU_GEM_CREATE_SHADOW |
314                                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
315                                              AMDGPU_GEM_CREATE_VRAM_CLEARED,
316                                              NULL, resv, &pt);
317                         if (r)
318                                 return r;
319
320                         /* Keep a reference to the root directory to avoid
321                         * freeing them up in the wrong order.
322                         */
323                         pt->parent = amdgpu_bo_ref(vm->root.bo);
324
325                         entry->bo = pt;
326                         entry->addr = 0;
327                 }
328
329                 if (level < adev->vm_manager.num_level) {
330                         uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
331                         uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
332                                 ((1 << shift) - 1);
333                         r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
334                                                    sub_eaddr, level);
335                         if (r)
336                                 return r;
337                 }
338         }
339
340         return 0;
341 }
342
343 /**
344  * amdgpu_vm_alloc_pts - Allocate page tables.
345  *
346  * @adev: amdgpu_device pointer
347  * @vm: VM to allocate page tables for
348  * @saddr: Start address which needs to be allocated
349  * @size: Size from start address we need.
350  *
351  * Make sure the page tables are allocated.
352  */
353 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
354                         struct amdgpu_vm *vm,
355                         uint64_t saddr, uint64_t size)
356 {
357         uint64_t last_pfn;
358         uint64_t eaddr;
359
360         /* validate the parameters */
361         if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
362                 return -EINVAL;
363
364         eaddr = saddr + size - 1;
365         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
366         if (last_pfn >= adev->vm_manager.max_pfn) {
367                 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
368                         last_pfn, adev->vm_manager.max_pfn);
369                 return -EINVAL;
370         }
371
372         saddr /= AMDGPU_GPU_PAGE_SIZE;
373         eaddr /= AMDGPU_GPU_PAGE_SIZE;
374
375         return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
376 }
377
378 /**
379  * amdgpu_vm_had_gpu_reset - check if reset occured since last use
380  *
381  * @adev: amdgpu_device pointer
382  * @id: VMID structure
383  *
384  * Check if GPU reset occured since last use of the VMID.
385  */
386 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
387                                     struct amdgpu_vm_id *id)
388 {
389         return id->current_gpu_reset_count !=
390                 atomic_read(&adev->gpu_reset_counter);
391 }
392
393 /**
394  * amdgpu_vm_grab_id - allocate the next free VMID
395  *
396  * @vm: vm to allocate id for
397  * @ring: ring we want to submit job to
398  * @sync: sync object where we add dependencies
399  * @fence: fence protecting ID from reuse
400  *
401  * Allocate an id for the vm, adding fences to the sync obj as necessary.
402  */
403 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
404                       struct amdgpu_sync *sync, struct dma_fence *fence,
405                       struct amdgpu_job *job)
406 {
407         struct amdgpu_device *adev = ring->adev;
408         uint64_t fence_context = adev->fence_context + ring->idx;
409         struct dma_fence *updates = sync->last_vm_update;
410         struct amdgpu_vm_id *id, *idle;
411         struct dma_fence **fences;
412         unsigned i;
413         int r = 0;
414
415         fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
416                                GFP_KERNEL);
417         if (!fences)
418                 return -ENOMEM;
419
420         mutex_lock(&adev->vm_manager.lock);
421
422         /* Check if we have an idle VMID */
423         i = 0;
424         list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
425                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
426                 if (!fences[i])
427                         break;
428                 ++i;
429         }
430
431         /* If we can't find a idle VMID to use, wait till one becomes available */
432         if (&idle->list == &adev->vm_manager.ids_lru) {
433                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
434                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
435                 struct dma_fence_array *array;
436                 unsigned j;
437
438                 for (j = 0; j < i; ++j)
439                         dma_fence_get(fences[j]);
440
441                 array = dma_fence_array_create(i, fences, fence_context,
442                                            seqno, true);
443                 if (!array) {
444                         for (j = 0; j < i; ++j)
445                                 dma_fence_put(fences[j]);
446                         kfree(fences);
447                         r = -ENOMEM;
448                         goto error;
449                 }
450
451
452                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
453                 dma_fence_put(&array->base);
454                 if (r)
455                         goto error;
456
457                 mutex_unlock(&adev->vm_manager.lock);
458                 return 0;
459
460         }
461         kfree(fences);
462
463         job->vm_needs_flush = true;
464         /* Check if we can use a VMID already assigned to this VM */
465         i = ring->idx;
466         do {
467                 struct dma_fence *flushed;
468
469                 id = vm->ids[i++];
470                 if (i == AMDGPU_MAX_RINGS)
471                         i = 0;
472
473                 /* Check all the prerequisites to using this VMID */
474                 if (!id)
475                         continue;
476                 if (amdgpu_vm_had_gpu_reset(adev, id))
477                         continue;
478
479                 if (atomic64_read(&id->owner) != vm->client_id)
480                         continue;
481
482                 if (job->vm_pd_addr != id->pd_gpu_addr)
483                         continue;
484
485                 if (!id->last_flush)
486                         continue;
487
488                 if (id->last_flush->context != fence_context &&
489                     !dma_fence_is_signaled(id->last_flush))
490                         continue;
491
492                 flushed  = id->flushed_updates;
493                 if (updates &&
494                     (!flushed || dma_fence_is_later(updates, flushed)))
495                         continue;
496
497                 /* Good we can use this VMID. Remember this submission as
498                  * user of the VMID.
499                  */
500                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
501                 if (r)
502                         goto error;
503
504                 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
505                 vm->ids[ring->idx] = id;
506
507                 job->vm_id = id - adev->vm_manager.ids;
508                 job->vm_needs_flush = false;
509                 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
510
511                 mutex_unlock(&adev->vm_manager.lock);
512                 return 0;
513
514         } while (i != ring->idx);
515
516         /* Still no ID to use? Then use the idle one found earlier */
517         id = idle;
518
519         /* Remember this submission as user of the VMID */
520         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
521         if (r)
522                 goto error;
523
524         dma_fence_put(id->last_flush);
525         id->last_flush = NULL;
526
527         dma_fence_put(id->flushed_updates);
528         id->flushed_updates = dma_fence_get(updates);
529
530         id->pd_gpu_addr = job->vm_pd_addr;
531         id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
532         list_move_tail(&id->list, &adev->vm_manager.ids_lru);
533         atomic64_set(&id->owner, vm->client_id);
534         vm->ids[ring->idx] = id;
535
536         job->vm_id = id - adev->vm_manager.ids;
537         trace_amdgpu_vm_grab_id(vm, ring->idx, job);
538
539 error:
540         mutex_unlock(&adev->vm_manager.lock);
541         return r;
542 }
543
544 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
545 {
546         struct amdgpu_device *adev = ring->adev;
547         const struct amdgpu_ip_block *ip_block;
548
549         if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
550                 /* only compute rings */
551                 return false;
552
553         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
554         if (!ip_block)
555                 return false;
556
557         if (ip_block->version->major <= 7) {
558                 /* gfx7 has no workaround */
559                 return true;
560         } else if (ip_block->version->major == 8) {
561                 if (adev->gfx.mec_fw_version >= 673)
562                         /* gfx8 is fixed in MEC firmware 673 */
563                         return false;
564                 else
565                         return true;
566         }
567         return false;
568 }
569
570 static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
571 {
572         u64 addr = mc_addr;
573
574         if (adev->gart.gart_funcs->adjust_mc_addr)
575                 addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
576
577         return addr;
578 }
579
580 /**
581  * amdgpu_vm_flush - hardware flush the vm
582  *
583  * @ring: ring to use for flush
584  * @vm_id: vmid number to use
585  * @pd_addr: address of the page directory
586  *
587  * Emit a VM flush when it is necessary.
588  */
589 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
590 {
591         struct amdgpu_device *adev = ring->adev;
592         struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
593         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
594                 id->gds_base != job->gds_base ||
595                 id->gds_size != job->gds_size ||
596                 id->gws_base != job->gws_base ||
597                 id->gws_size != job->gws_size ||
598                 id->oa_base != job->oa_base ||
599                 id->oa_size != job->oa_size);
600         bool vm_flush_needed = job->vm_needs_flush ||
601                 amdgpu_vm_ring_has_compute_vm_bug(ring);
602         unsigned patch_offset = 0;
603         int r;
604
605         if (amdgpu_vm_had_gpu_reset(adev, id)) {
606                 gds_switch_needed = true;
607                 vm_flush_needed = true;
608         }
609
610         if (!vm_flush_needed && !gds_switch_needed)
611                 return 0;
612
613         if (ring->funcs->init_cond_exec)
614                 patch_offset = amdgpu_ring_init_cond_exec(ring);
615
616         if (ring->funcs->emit_pipeline_sync)
617                 amdgpu_ring_emit_pipeline_sync(ring);
618
619         if (ring->funcs->emit_vm_flush && vm_flush_needed) {
620                 u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
621                 struct dma_fence *fence;
622
623                 trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
624                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
625
626                 r = amdgpu_fence_emit(ring, &fence);
627                 if (r)
628                         return r;
629
630                 mutex_lock(&adev->vm_manager.lock);
631                 dma_fence_put(id->last_flush);
632                 id->last_flush = fence;
633                 mutex_unlock(&adev->vm_manager.lock);
634         }
635
636         if (gds_switch_needed) {
637                 id->gds_base = job->gds_base;
638                 id->gds_size = job->gds_size;
639                 id->gws_base = job->gws_base;
640                 id->gws_size = job->gws_size;
641                 id->oa_base = job->oa_base;
642                 id->oa_size = job->oa_size;
643                 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
644                                             job->gds_size, job->gws_base,
645                                             job->gws_size, job->oa_base,
646                                             job->oa_size);
647         }
648
649         if (ring->funcs->patch_cond_exec)
650                 amdgpu_ring_patch_cond_exec(ring, patch_offset);
651
652         /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
653         if (ring->funcs->emit_switch_buffer) {
654                 amdgpu_ring_emit_switch_buffer(ring);
655                 amdgpu_ring_emit_switch_buffer(ring);
656         }
657         return 0;
658 }
659
660 /**
661  * amdgpu_vm_reset_id - reset VMID to zero
662  *
663  * @adev: amdgpu device structure
664  * @vm_id: vmid number to use
665  *
666  * Reset saved GDW, GWS and OA to force switch on next flush.
667  */
668 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
669 {
670         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
671
672         id->gds_base = 0;
673         id->gds_size = 0;
674         id->gws_base = 0;
675         id->gws_size = 0;
676         id->oa_base = 0;
677         id->oa_size = 0;
678 }
679
680 /**
681  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
682  *
683  * @vm: requested vm
684  * @bo: requested buffer object
685  *
686  * Find @bo inside the requested vm.
687  * Search inside the @bos vm list for the requested vm
688  * Returns the found bo_va or NULL if none is found
689  *
690  * Object has to be reserved!
691  */
692 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
693                                        struct amdgpu_bo *bo)
694 {
695         struct amdgpu_bo_va *bo_va;
696
697         list_for_each_entry(bo_va, &bo->va, bo_list) {
698                 if (bo_va->vm == vm) {
699                         return bo_va;
700                 }
701         }
702         return NULL;
703 }
704
705 /**
706  * amdgpu_vm_do_set_ptes - helper to call the right asic function
707  *
708  * @params: see amdgpu_pte_update_params definition
709  * @pe: addr of the page entry
710  * @addr: dst addr to write into pe
711  * @count: number of page entries to update
712  * @incr: increase next addr by incr bytes
713  * @flags: hw access flags
714  *
715  * Traces the parameters and calls the right asic functions
716  * to setup the page table using the DMA.
717  */
718 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
719                                   uint64_t pe, uint64_t addr,
720                                   unsigned count, uint32_t incr,
721                                   uint64_t flags)
722 {
723         trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
724
725         if (count < 3) {
726                 amdgpu_vm_write_pte(params->adev, params->ib, pe,
727                                     addr | flags, count, incr);
728
729         } else {
730                 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
731                                       count, incr, flags);
732         }
733 }
734
735 /**
736  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
737  *
738  * @params: see amdgpu_pte_update_params definition
739  * @pe: addr of the page entry
740  * @addr: dst addr to write into pe
741  * @count: number of page entries to update
742  * @incr: increase next addr by incr bytes
743  * @flags: hw access flags
744  *
745  * Traces the parameters and calls the DMA function to copy the PTEs.
746  */
747 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
748                                    uint64_t pe, uint64_t addr,
749                                    unsigned count, uint32_t incr,
750                                    uint64_t flags)
751 {
752         uint64_t src = (params->src + (addr >> 12) * 8);
753
754
755         trace_amdgpu_vm_copy_ptes(pe, src, count);
756
757         amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
758 }
759
760 /**
761  * amdgpu_vm_map_gart - Resolve gart mapping of addr
762  *
763  * @pages_addr: optional DMA address to use for lookup
764  * @addr: the unmapped addr
765  *
766  * Look up the physical address of the page that the pte resolves
767  * to and return the pointer for the page table entry.
768  */
769 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
770 {
771         uint64_t result;
772
773         /* page table offset */
774         result = pages_addr[addr >> PAGE_SHIFT];
775
776         /* in case cpu page size != gpu page size*/
777         result |= addr & (~PAGE_MASK);
778
779         result &= 0xFFFFFFFFFFFFF000ULL;
780
781         return result;
782 }
783
784 /*
785  * amdgpu_vm_update_level - update a single level in the hierarchy
786  *
787  * @adev: amdgpu_device pointer
788  * @vm: requested vm
789  * @parent: parent directory
790  *
791  * Makes sure all entries in @parent are up to date.
792  * Returns 0 for success, error for failure.
793  */
794 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
795                                   struct amdgpu_vm *vm,
796                                   struct amdgpu_vm_pt *parent,
797                                   unsigned level)
798 {
799         struct amdgpu_bo *shadow;
800         struct amdgpu_ring *ring;
801         uint64_t pd_addr, shadow_addr;
802         uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
803         uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
804         unsigned count = 0, pt_idx, ndw;
805         struct amdgpu_job *job;
806         struct amdgpu_pte_update_params params;
807         struct dma_fence *fence = NULL;
808
809         int r;
810
811         if (!parent->entries)
812                 return 0;
813         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
814
815         /* padding, etc. */
816         ndw = 64;
817
818         /* assume the worst case */
819         ndw += parent->last_entry_used * 6;
820
821         pd_addr = amdgpu_bo_gpu_offset(parent->bo);
822
823         shadow = parent->bo->shadow;
824         if (shadow) {
825                 r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
826                 if (r)
827                         return r;
828                 shadow_addr = amdgpu_bo_gpu_offset(shadow);
829                 ndw *= 2;
830         } else {
831                 shadow_addr = 0;
832         }
833
834         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
835         if (r)
836                 return r;
837
838         memset(&params, 0, sizeof(params));
839         params.adev = adev;
840         params.ib = &job->ibs[0];
841
842         /* walk over the address space and update the directory */
843         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
844                 struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
845                 uint64_t pde, pt;
846
847                 if (bo == NULL)
848                         continue;
849
850                 if (bo->shadow) {
851                         struct amdgpu_bo *pt_shadow = bo->shadow;
852
853                         r = amdgpu_ttm_bind(&pt_shadow->tbo,
854                                             &pt_shadow->tbo.mem);
855                         if (r)
856                                 return r;
857                 }
858
859                 pt = amdgpu_bo_gpu_offset(bo);
860                 if (parent->entries[pt_idx].addr == pt)
861                         continue;
862
863                 parent->entries[pt_idx].addr = pt;
864
865                 pde = pd_addr + pt_idx * 8;
866                 if (((last_pde + 8 * count) != pde) ||
867                     ((last_pt + incr * count) != pt) ||
868                     (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
869
870                         if (count) {
871                                 uint64_t pt_addr =
872                                         amdgpu_vm_adjust_mc_addr(adev, last_pt);
873
874                                 if (shadow)
875                                         amdgpu_vm_do_set_ptes(&params,
876                                                               last_shadow,
877                                                               pt_addr, count,
878                                                               incr,
879                                                               AMDGPU_PTE_VALID);
880
881                                 amdgpu_vm_do_set_ptes(&params, last_pde,
882                                                       pt_addr, count, incr,
883                                                       AMDGPU_PTE_VALID);
884                         }
885
886                         count = 1;
887                         last_pde = pde;
888                         last_shadow = shadow_addr + pt_idx * 8;
889                         last_pt = pt;
890                 } else {
891                         ++count;
892                 }
893         }
894
895         if (count) {
896                 uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
897
898                 if (vm->root.bo->shadow)
899                         amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
900                                               count, incr, AMDGPU_PTE_VALID);
901
902                 amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
903                                       count, incr, AMDGPU_PTE_VALID);
904         }
905
906         if (params.ib->length_dw == 0) {
907                 amdgpu_job_free(job);
908         } else {
909                 amdgpu_ring_pad_ib(ring, params.ib);
910                 amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
911                                  AMDGPU_FENCE_OWNER_VM);
912                 if (shadow)
913                         amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
914                                          AMDGPU_FENCE_OWNER_VM);
915
916                 WARN_ON(params.ib->length_dw > ndw);
917                 r = amdgpu_job_submit(job, ring, &vm->entity,
918                                 AMDGPU_FENCE_OWNER_VM, &fence);
919                 if (r)
920                         goto error_free;
921
922                 amdgpu_bo_fence(parent->bo, fence, true);
923                 dma_fence_put(vm->last_dir_update);
924                 vm->last_dir_update = dma_fence_get(fence);
925                 dma_fence_put(fence);
926         }
927         /*
928          * Recurse into the subdirectories. This recursion is harmless because
929          * we only have a maximum of 5 layers.
930          */
931         for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
932                 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
933
934                 if (!entry->bo)
935                         continue;
936
937                 r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
938                 if (r)
939                         return r;
940         }
941
942         return 0;
943
944 error_free:
945         amdgpu_job_free(job);
946         return r;
947 }
948
949 /*
950  * amdgpu_vm_update_directories - make sure that all directories are valid
951  *
952  * @adev: amdgpu_device pointer
953  * @vm: requested vm
954  *
955  * Makes sure all directories are up to date.
956  * Returns 0 for success, error for failure.
957  */
958 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
959                                  struct amdgpu_vm *vm)
960 {
961         return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
962 }
963
964 /**
965  * amdgpu_vm_find_pt - find the page table for an address
966  *
967  * @p: see amdgpu_pte_update_params definition
968  * @addr: virtual address in question
969  *
970  * Find the page table BO for a virtual address, return NULL when none found.
971  */
972 static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
973                                           uint64_t addr)
974 {
975         struct amdgpu_vm_pt *entry = &p->vm->root;
976         unsigned idx, level = p->adev->vm_manager.num_level;
977
978         while (entry->entries) {
979                 idx = addr >> (amdgpu_vm_block_size * level--);
980                 idx %= amdgpu_bo_size(entry->bo) / 8;
981                 entry = &entry->entries[idx];
982         }
983
984         if (level)
985                 return NULL;
986
987         return entry->bo;
988 }
989
990 /**
991  * amdgpu_vm_update_ptes - make sure that page tables are valid
992  *
993  * @params: see amdgpu_pte_update_params definition
994  * @vm: requested vm
995  * @start: start of GPU address range
996  * @end: end of GPU address range
997  * @dst: destination address to map to, the next dst inside the function
998  * @flags: mapping flags
999  *
1000  * Update the page tables in the range @start - @end.
1001  */
1002 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1003                                   uint64_t start, uint64_t end,
1004                                   uint64_t dst, uint64_t flags)
1005 {
1006         const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
1007
1008         uint64_t cur_pe_start, cur_nptes, cur_dst;
1009         uint64_t addr; /* next GPU address to be updated */
1010         struct amdgpu_bo *pt;
1011         unsigned nptes; /* next number of ptes to be updated */
1012         uint64_t next_pe_start;
1013
1014         /* initialize the variables */
1015         addr = start;
1016         pt = amdgpu_vm_get_pt(params, addr);
1017         if (!pt) {
1018                 pr_err("PT not found, aborting update_ptes\n");
1019                 return;
1020         }
1021
1022         if (params->shadow) {
1023                 if (!pt->shadow)
1024                         return;
1025                 pt = pt->shadow;
1026         }
1027         if ((addr & ~mask) == (end & ~mask))
1028                 nptes = end - addr;
1029         else
1030                 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
1031
1032         cur_pe_start = amdgpu_bo_gpu_offset(pt);
1033         cur_pe_start += (addr & mask) * 8;
1034         cur_nptes = nptes;
1035         cur_dst = dst;
1036
1037         /* for next ptb*/
1038         addr += nptes;
1039         dst += nptes * AMDGPU_GPU_PAGE_SIZE;
1040
1041         /* walk over the address space and update the page tables */
1042         while (addr < end) {
1043                 pt = amdgpu_vm_get_pt(params, addr);
1044                 if (!pt) {
1045                         pr_err("PT not found, aborting update_ptes\n");
1046                         return;
1047                 }
1048
1049                 if (params->shadow) {
1050                         if (!pt->shadow)
1051                                 return;
1052                         pt = pt->shadow;
1053                 }
1054
1055                 if ((addr & ~mask) == (end & ~mask))
1056                         nptes = end - addr;
1057                 else
1058                         nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
1059
1060                 next_pe_start = amdgpu_bo_gpu_offset(pt);
1061                 next_pe_start += (addr & mask) * 8;
1062
1063                 if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
1064                     ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
1065                         /* The next ptb is consecutive to current ptb.
1066                          * Don't call the update function now.
1067                          * Will update two ptbs together in future.
1068                         */
1069                         cur_nptes += nptes;
1070                 } else {
1071                         params->func(params, cur_pe_start, cur_dst, cur_nptes,
1072                                      AMDGPU_GPU_PAGE_SIZE, flags);
1073
1074                         cur_pe_start = next_pe_start;
1075                         cur_nptes = nptes;
1076                         cur_dst = dst;
1077                 }
1078
1079                 /* for next ptb*/
1080                 addr += nptes;
1081                 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
1082         }
1083
1084         params->func(params, cur_pe_start, cur_dst, cur_nptes,
1085                      AMDGPU_GPU_PAGE_SIZE, flags);
1086 }
1087
1088 /*
1089  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1090  *
1091  * @params: see amdgpu_pte_update_params definition
1092  * @vm: requested vm
1093  * @start: first PTE to handle
1094  * @end: last PTE to handle
1095  * @dst: addr those PTEs should point to
1096  * @flags: hw mapping flags
1097  */
1098 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
1099                                 uint64_t start, uint64_t end,
1100                                 uint64_t dst, uint64_t flags)
1101 {
1102         /**
1103          * The MC L1 TLB supports variable sized pages, based on a fragment
1104          * field in the PTE. When this field is set to a non-zero value, page
1105          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1106          * flags are considered valid for all PTEs within the fragment range
1107          * and corresponding mappings are assumed to be physically contiguous.
1108          *
1109          * The L1 TLB can store a single PTE for the whole fragment,
1110          * significantly increasing the space available for translation
1111          * caching. This leads to large improvements in throughput when the
1112          * TLB is under pressure.
1113          *
1114          * The L2 TLB distributes small and large fragments into two
1115          * asymmetric partitions. The large fragment cache is significantly
1116          * larger. Thus, we try to use large fragments wherever possible.
1117          * Userspace can support this by aligning virtual base address and
1118          * allocation size to the fragment size.
1119          */
1120
1121         /* SI and newer are optimized for 64KB */
1122         uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
1123         uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
1124
1125         uint64_t frag_start = ALIGN(start, frag_align);
1126         uint64_t frag_end = end & ~(frag_align - 1);
1127
1128         /* system pages are non continuously */
1129         if (params->src || !(flags & AMDGPU_PTE_VALID) ||
1130             (frag_start >= frag_end)) {
1131
1132                 amdgpu_vm_update_ptes(params, start, end, dst, flags);
1133                 return;
1134         }
1135
1136         /* handle the 4K area at the beginning */
1137         if (start != frag_start) {
1138                 amdgpu_vm_update_ptes(params, start, frag_start,
1139                                       dst, flags);
1140                 dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
1141         }
1142
1143         /* handle the area in the middle */
1144         amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
1145                               flags | frag_flags);
1146
1147         /* handle the 4K area at the end */
1148         if (frag_end != end) {
1149                 dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
1150                 amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
1151         }
1152 }
1153
1154 /**
1155  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1156  *
1157  * @adev: amdgpu_device pointer
1158  * @exclusive: fence we need to sync to
1159  * @src: address where to copy page table entries from
1160  * @pages_addr: DMA addresses to use for mapping
1161  * @vm: requested vm
1162  * @start: start of mapped range
1163  * @last: last mapped entry
1164  * @flags: flags for the entries
1165  * @addr: addr to set the area to
1166  * @fence: optional resulting fence
1167  *
1168  * Fill in the page table entries between @start and @last.
1169  * Returns 0 for success, -EINVAL for failure.
1170  */
1171 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1172                                        struct dma_fence *exclusive,
1173                                        uint64_t src,
1174                                        dma_addr_t *pages_addr,
1175                                        struct amdgpu_vm *vm,
1176                                        uint64_t start, uint64_t last,
1177                                        uint64_t flags, uint64_t addr,
1178                                        struct dma_fence **fence)
1179 {
1180         struct amdgpu_ring *ring;
1181         void *owner = AMDGPU_FENCE_OWNER_VM;
1182         unsigned nptes, ncmds, ndw;
1183         struct amdgpu_job *job;
1184         struct amdgpu_pte_update_params params;
1185         struct dma_fence *f = NULL;
1186         int r;
1187
1188         memset(&params, 0, sizeof(params));
1189         params.adev = adev;
1190         params.vm = vm;
1191         params.src = src;
1192
1193         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1194
1195         /* sync to everything on unmapping */
1196         if (!(flags & AMDGPU_PTE_VALID))
1197                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1198
1199         nptes = last - start + 1;
1200
1201         /*
1202          * reserve space for one command every (1 << BLOCK_SIZE)
1203          *  entries or 2k dwords (whatever is smaller)
1204          */
1205         ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
1206
1207         /* padding, etc. */
1208         ndw = 64;
1209
1210         if (src) {
1211                 /* only copy commands needed */
1212                 ndw += ncmds * 7;
1213
1214                 params.func = amdgpu_vm_do_copy_ptes;
1215
1216         } else if (pages_addr) {
1217                 /* copy commands needed */
1218                 ndw += ncmds * 7;
1219
1220                 /* and also PTEs */
1221                 ndw += nptes * 2;
1222
1223                 params.func = amdgpu_vm_do_copy_ptes;
1224
1225         } else {
1226                 /* set page commands needed */
1227                 ndw += ncmds * 10;
1228
1229                 /* two extra commands for begin/end of fragment */
1230                 ndw += 2 * 10;
1231
1232                 params.func = amdgpu_vm_do_set_ptes;
1233         }
1234
1235         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1236         if (r)
1237                 return r;
1238
1239         params.ib = &job->ibs[0];
1240
1241         if (!src && pages_addr) {
1242                 uint64_t *pte;
1243                 unsigned i;
1244
1245                 /* Put the PTEs at the end of the IB. */
1246                 i = ndw - nptes * 2;
1247                 pte= (uint64_t *)&(job->ibs->ptr[i]);
1248                 params.src = job->ibs->gpu_addr + i * 4;
1249
1250                 for (i = 0; i < nptes; ++i) {
1251                         pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1252                                                     AMDGPU_GPU_PAGE_SIZE);
1253                         pte[i] |= flags;
1254                 }
1255                 addr = 0;
1256         }
1257
1258         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1259         if (r)
1260                 goto error_free;
1261
1262         r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
1263                              owner);
1264         if (r)
1265                 goto error_free;
1266
1267         r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
1268         if (r)
1269                 goto error_free;
1270
1271         params.shadow = true;
1272         amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1273         params.shadow = false;
1274         amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1275
1276         amdgpu_ring_pad_ib(ring, params.ib);
1277         WARN_ON(params.ib->length_dw > ndw);
1278         r = amdgpu_job_submit(job, ring, &vm->entity,
1279                               AMDGPU_FENCE_OWNER_VM, &f);
1280         if (r)
1281                 goto error_free;
1282
1283         amdgpu_bo_fence(vm->root.bo, f, true);
1284         dma_fence_put(*fence);
1285         *fence = f;
1286         return 0;
1287
1288 error_free:
1289         amdgpu_job_free(job);
1290         return r;
1291 }
1292
1293 /**
1294  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1295  *
1296  * @adev: amdgpu_device pointer
1297  * @exclusive: fence we need to sync to
1298  * @gtt_flags: flags as they are used for GTT
1299  * @pages_addr: DMA addresses to use for mapping
1300  * @vm: requested vm
1301  * @mapping: mapped range and flags to use for the update
1302  * @flags: HW flags for the mapping
1303  * @nodes: array of drm_mm_nodes with the MC addresses
1304  * @fence: optional resulting fence
1305  *
1306  * Split the mapping into smaller chunks so that each update fits
1307  * into a SDMA IB.
1308  * Returns 0 for success, -EINVAL for failure.
1309  */
1310 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1311                                       struct dma_fence *exclusive,
1312                                       uint64_t gtt_flags,
1313                                       dma_addr_t *pages_addr,
1314                                       struct amdgpu_vm *vm,
1315                                       struct amdgpu_bo_va_mapping *mapping,
1316                                       uint64_t flags,
1317                                       struct drm_mm_node *nodes,
1318                                       struct dma_fence **fence)
1319 {
1320         uint64_t pfn, src = 0, start = mapping->start;
1321         int r;
1322
1323         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1324          * but in case of something, we filter the flags in first place
1325          */
1326         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1327                 flags &= ~AMDGPU_PTE_READABLE;
1328         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1329                 flags &= ~AMDGPU_PTE_WRITEABLE;
1330
1331         flags &= ~AMDGPU_PTE_EXECUTABLE;
1332         flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1333
1334         flags &= ~AMDGPU_PTE_MTYPE_MASK;
1335         flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1336
1337         trace_amdgpu_vm_bo_update(mapping);
1338
1339         pfn = mapping->offset >> PAGE_SHIFT;
1340         if (nodes) {
1341                 while (pfn >= nodes->size) {
1342                         pfn -= nodes->size;
1343                         ++nodes;
1344                 }
1345         }
1346
1347         do {
1348                 uint64_t max_entries;
1349                 uint64_t addr, last;
1350
1351                 if (nodes) {
1352                         addr = nodes->start << PAGE_SHIFT;
1353                         max_entries = (nodes->size - pfn) *
1354                                 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1355                 } else {
1356                         addr = 0;
1357                         max_entries = S64_MAX;
1358                 }
1359
1360                 if (pages_addr) {
1361                         if (flags == gtt_flags)
1362                                 src = adev->gart.table_addr +
1363                                         (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1364                         else
1365                                 max_entries = min(max_entries, 16ull * 1024ull);
1366                         addr = 0;
1367                 } else if (flags & AMDGPU_PTE_VALID) {
1368                         addr += adev->vm_manager.vram_base_offset;
1369                 }
1370                 addr += pfn << PAGE_SHIFT;
1371
1372                 last = min((uint64_t)mapping->last, start + max_entries - 1);
1373                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1374                                                 src, pages_addr, vm,
1375                                                 start, last, flags, addr,
1376                                                 fence);
1377                 if (r)
1378                         return r;
1379
1380                 pfn += last - start + 1;
1381                 if (nodes && nodes->size == pfn) {
1382                         pfn = 0;
1383                         ++nodes;
1384                 }
1385                 start = last + 1;
1386
1387         } while (unlikely(start != mapping->last + 1));
1388
1389         return 0;
1390 }
1391
1392 /**
1393  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1394  *
1395  * @adev: amdgpu_device pointer
1396  * @bo_va: requested BO and VM object
1397  * @clear: if true clear the entries
1398  *
1399  * Fill in the page table entries for @bo_va.
1400  * Returns 0 for success, -EINVAL for failure.
1401  */
1402 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1403                         struct amdgpu_bo_va *bo_va,
1404                         bool clear)
1405 {
1406         struct amdgpu_vm *vm = bo_va->vm;
1407         struct amdgpu_bo_va_mapping *mapping;
1408         dma_addr_t *pages_addr = NULL;
1409         uint64_t gtt_flags, flags;
1410         struct ttm_mem_reg *mem;
1411         struct drm_mm_node *nodes;
1412         struct dma_fence *exclusive;
1413         int r;
1414
1415         if (clear || !bo_va->bo) {
1416                 mem = NULL;
1417                 nodes = NULL;
1418                 exclusive = NULL;
1419         } else {
1420                 struct ttm_dma_tt *ttm;
1421
1422                 mem = &bo_va->bo->tbo.mem;
1423                 nodes = mem->mm_node;
1424                 if (mem->mem_type == TTM_PL_TT) {
1425                         ttm = container_of(bo_va->bo->tbo.ttm, struct
1426                                            ttm_dma_tt, ttm);
1427                         pages_addr = ttm->dma_address;
1428                 }
1429                 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1430         }
1431
1432         if (bo_va->bo) {
1433                 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1434                 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1435                         adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
1436                         flags : 0;
1437         } else {
1438                 flags = 0x0;
1439                 gtt_flags = ~0x0;
1440         }
1441
1442         spin_lock(&vm->status_lock);
1443         if (!list_empty(&bo_va->vm_status))
1444                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1445         spin_unlock(&vm->status_lock);
1446
1447         list_for_each_entry(mapping, &bo_va->invalids, list) {
1448                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1449                                                gtt_flags, pages_addr, vm,
1450                                                mapping, flags, nodes,
1451                                                &bo_va->last_pt_update);
1452                 if (r)
1453                         return r;
1454         }
1455
1456         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1457                 list_for_each_entry(mapping, &bo_va->valids, list)
1458                         trace_amdgpu_vm_bo_mapping(mapping);
1459
1460                 list_for_each_entry(mapping, &bo_va->invalids, list)
1461                         trace_amdgpu_vm_bo_mapping(mapping);
1462         }
1463
1464         spin_lock(&vm->status_lock);
1465         list_splice_init(&bo_va->invalids, &bo_va->valids);
1466         list_del_init(&bo_va->vm_status);
1467         if (clear)
1468                 list_add(&bo_va->vm_status, &vm->cleared);
1469         spin_unlock(&vm->status_lock);
1470
1471         return 0;
1472 }
1473
1474 /**
1475  * amdgpu_vm_update_prt_state - update the global PRT state
1476  */
1477 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1478 {
1479         unsigned long flags;
1480         bool enable;
1481
1482         spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1483         enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1484         adev->gart.gart_funcs->set_prt(adev, enable);
1485         spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1486 }
1487
1488 /**
1489  * amdgpu_vm_prt_get - add a PRT user
1490  */
1491 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1492 {
1493         if (!adev->gart.gart_funcs->set_prt)
1494                 return;
1495
1496         if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1497                 amdgpu_vm_update_prt_state(adev);
1498 }
1499
1500 /**
1501  * amdgpu_vm_prt_put - drop a PRT user
1502  */
1503 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1504 {
1505         if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1506                 amdgpu_vm_update_prt_state(adev);
1507 }
1508
1509 /**
1510  * amdgpu_vm_prt_cb - callback for updating the PRT status
1511  */
1512 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1513 {
1514         struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1515
1516         amdgpu_vm_prt_put(cb->adev);
1517         kfree(cb);
1518 }
1519
1520 /**
1521  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1522  */
1523 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1524                                  struct dma_fence *fence)
1525 {
1526         struct amdgpu_prt_cb *cb;
1527
1528         if (!adev->gart.gart_funcs->set_prt)
1529                 return;
1530
1531         cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1532         if (!cb) {
1533                 /* Last resort when we are OOM */
1534                 if (fence)
1535                         dma_fence_wait(fence, false);
1536
1537                 amdgpu_vm_prt_put(adev);
1538         } else {
1539                 cb->adev = adev;
1540                 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1541                                                      amdgpu_vm_prt_cb))
1542                         amdgpu_vm_prt_cb(fence, &cb->cb);
1543         }
1544 }
1545
1546 /**
1547  * amdgpu_vm_free_mapping - free a mapping
1548  *
1549  * @adev: amdgpu_device pointer
1550  * @vm: requested vm
1551  * @mapping: mapping to be freed
1552  * @fence: fence of the unmap operation
1553  *
1554  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1555  */
1556 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1557                                    struct amdgpu_vm *vm,
1558                                    struct amdgpu_bo_va_mapping *mapping,
1559                                    struct dma_fence *fence)
1560 {
1561         if (mapping->flags & AMDGPU_PTE_PRT)
1562                 amdgpu_vm_add_prt_cb(adev, fence);
1563         kfree(mapping);
1564 }
1565
1566 /**
1567  * amdgpu_vm_prt_fini - finish all prt mappings
1568  *
1569  * @adev: amdgpu_device pointer
1570  * @vm: requested vm
1571  *
1572  * Register a cleanup callback to disable PRT support after VM dies.
1573  */
1574 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1575 {
1576         struct reservation_object *resv = vm->root.bo->tbo.resv;
1577         struct dma_fence *excl, **shared;
1578         unsigned i, shared_count;
1579         int r;
1580
1581         r = reservation_object_get_fences_rcu(resv, &excl,
1582                                               &shared_count, &shared);
1583         if (r) {
1584                 /* Not enough memory to grab the fence list, as last resort
1585                  * block for all the fences to complete.
1586                  */
1587                 reservation_object_wait_timeout_rcu(resv, true, false,
1588                                                     MAX_SCHEDULE_TIMEOUT);
1589                 return;
1590         }
1591
1592         /* Add a callback for each fence in the reservation object */
1593         amdgpu_vm_prt_get(adev);
1594         amdgpu_vm_add_prt_cb(adev, excl);
1595
1596         for (i = 0; i < shared_count; ++i) {
1597                 amdgpu_vm_prt_get(adev);
1598                 amdgpu_vm_add_prt_cb(adev, shared[i]);
1599         }
1600
1601         kfree(shared);
1602 }
1603
1604 /**
1605  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1606  *
1607  * @adev: amdgpu_device pointer
1608  * @vm: requested vm
1609  * @fence: optional resulting fence (unchanged if no work needed to be done
1610  * or if an error occurred)
1611  *
1612  * Make sure all freed BOs are cleared in the PT.
1613  * Returns 0 for success.
1614  *
1615  * PTs have to be reserved and mutex must be locked!
1616  */
1617 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1618                           struct amdgpu_vm *vm,
1619                           struct dma_fence **fence)
1620 {
1621         struct amdgpu_bo_va_mapping *mapping;
1622         struct dma_fence *f = NULL;
1623         int r;
1624
1625         while (!list_empty(&vm->freed)) {
1626                 mapping = list_first_entry(&vm->freed,
1627                         struct amdgpu_bo_va_mapping, list);
1628                 list_del(&mapping->list);
1629
1630                 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1631                                                0, 0, &f);
1632                 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1633                 if (r) {
1634                         dma_fence_put(f);
1635                         return r;
1636                 }
1637         }
1638
1639         if (fence && f) {
1640                 dma_fence_put(*fence);
1641                 *fence = f;
1642         } else {
1643                 dma_fence_put(f);
1644         }
1645
1646         return 0;
1647
1648 }
1649
1650 /**
1651  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1652  *
1653  * @adev: amdgpu_device pointer
1654  * @vm: requested vm
1655  *
1656  * Make sure all invalidated BOs are cleared in the PT.
1657  * Returns 0 for success.
1658  *
1659  * PTs have to be reserved and mutex must be locked!
1660  */
1661 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1662                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1663 {
1664         struct amdgpu_bo_va *bo_va = NULL;
1665         int r = 0;
1666
1667         spin_lock(&vm->status_lock);
1668         while (!list_empty(&vm->invalidated)) {
1669                 bo_va = list_first_entry(&vm->invalidated,
1670                         struct amdgpu_bo_va, vm_status);
1671                 spin_unlock(&vm->status_lock);
1672
1673                 r = amdgpu_vm_bo_update(adev, bo_va, true);
1674                 if (r)
1675                         return r;
1676
1677                 spin_lock(&vm->status_lock);
1678         }
1679         spin_unlock(&vm->status_lock);
1680
1681         if (bo_va)
1682                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1683
1684         return r;
1685 }
1686
1687 /**
1688  * amdgpu_vm_bo_add - add a bo to a specific vm
1689  *
1690  * @adev: amdgpu_device pointer
1691  * @vm: requested vm
1692  * @bo: amdgpu buffer object
1693  *
1694  * Add @bo into the requested vm.
1695  * Add @bo to the list of bos associated with the vm
1696  * Returns newly added bo_va or NULL for failure
1697  *
1698  * Object has to be reserved!
1699  */
1700 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1701                                       struct amdgpu_vm *vm,
1702                                       struct amdgpu_bo *bo)
1703 {
1704         struct amdgpu_bo_va *bo_va;
1705
1706         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1707         if (bo_va == NULL) {
1708                 return NULL;
1709         }
1710         bo_va->vm = vm;
1711         bo_va->bo = bo;
1712         bo_va->ref_count = 1;
1713         INIT_LIST_HEAD(&bo_va->bo_list);
1714         INIT_LIST_HEAD(&bo_va->valids);
1715         INIT_LIST_HEAD(&bo_va->invalids);
1716         INIT_LIST_HEAD(&bo_va->vm_status);
1717
1718         if (bo)
1719                 list_add_tail(&bo_va->bo_list, &bo->va);
1720
1721         return bo_va;
1722 }
1723
1724 /**
1725  * amdgpu_vm_bo_map - map bo inside a vm
1726  *
1727  * @adev: amdgpu_device pointer
1728  * @bo_va: bo_va to store the address
1729  * @saddr: where to map the BO
1730  * @offset: requested offset in the BO
1731  * @flags: attributes of pages (read/write/valid/etc.)
1732  *
1733  * Add a mapping of the BO at the specefied addr into the VM.
1734  * Returns 0 for success, error for failure.
1735  *
1736  * Object has to be reserved and unreserved outside!
1737  */
1738 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1739                      struct amdgpu_bo_va *bo_va,
1740                      uint64_t saddr, uint64_t offset,
1741                      uint64_t size, uint64_t flags)
1742 {
1743         struct amdgpu_bo_va_mapping *mapping, *tmp;
1744         struct amdgpu_vm *vm = bo_va->vm;
1745         uint64_t eaddr;
1746
1747         /* validate the parameters */
1748         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1749             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1750                 return -EINVAL;
1751
1752         /* make sure object fit at this offset */
1753         eaddr = saddr + size - 1;
1754         if (saddr >= eaddr ||
1755             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1756                 return -EINVAL;
1757
1758         saddr /= AMDGPU_GPU_PAGE_SIZE;
1759         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1760
1761         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1762         if (tmp) {
1763                 /* bo and tmp overlap, invalid addr */
1764                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1765                         "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
1766                         tmp->start, tmp->last + 1);
1767                 return -EINVAL;
1768         }
1769
1770         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1771         if (!mapping)
1772                 return -ENOMEM;
1773
1774         INIT_LIST_HEAD(&mapping->list);
1775         mapping->start = saddr;
1776         mapping->last = eaddr;
1777         mapping->offset = offset;
1778         mapping->flags = flags;
1779
1780         list_add(&mapping->list, &bo_va->invalids);
1781         amdgpu_vm_it_insert(mapping, &vm->va);
1782
1783         if (flags & AMDGPU_PTE_PRT)
1784                 amdgpu_vm_prt_get(adev);
1785
1786         return 0;
1787 }
1788
1789 /**
1790  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1791  *
1792  * @adev: amdgpu_device pointer
1793  * @bo_va: bo_va to store the address
1794  * @saddr: where to map the BO
1795  * @offset: requested offset in the BO
1796  * @flags: attributes of pages (read/write/valid/etc.)
1797  *
1798  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1799  * mappings as we do so.
1800  * Returns 0 for success, error for failure.
1801  *
1802  * Object has to be reserved and unreserved outside!
1803  */
1804 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1805                              struct amdgpu_bo_va *bo_va,
1806                              uint64_t saddr, uint64_t offset,
1807                              uint64_t size, uint64_t flags)
1808 {
1809         struct amdgpu_bo_va_mapping *mapping;
1810         struct amdgpu_vm *vm = bo_va->vm;
1811         uint64_t eaddr;
1812         int r;
1813
1814         /* validate the parameters */
1815         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1816             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1817                 return -EINVAL;
1818
1819         /* make sure object fit at this offset */
1820         eaddr = saddr + size - 1;
1821         if (saddr >= eaddr ||
1822             (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1823                 return -EINVAL;
1824
1825         /* Allocate all the needed memory */
1826         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1827         if (!mapping)
1828                 return -ENOMEM;
1829
1830         r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
1831         if (r) {
1832                 kfree(mapping);
1833                 return r;
1834         }
1835
1836         saddr /= AMDGPU_GPU_PAGE_SIZE;
1837         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1838
1839         mapping->start = saddr;
1840         mapping->last = eaddr;
1841         mapping->offset = offset;
1842         mapping->flags = flags;
1843
1844         list_add(&mapping->list, &bo_va->invalids);
1845         amdgpu_vm_it_insert(mapping, &vm->va);
1846
1847         if (flags & AMDGPU_PTE_PRT)
1848                 amdgpu_vm_prt_get(adev);
1849
1850         return 0;
1851 }
1852
1853 /**
1854  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1855  *
1856  * @adev: amdgpu_device pointer
1857  * @bo_va: bo_va to remove the address from
1858  * @saddr: where to the BO is mapped
1859  *
1860  * Remove a mapping of the BO at the specefied addr from the VM.
1861  * Returns 0 for success, error for failure.
1862  *
1863  * Object has to be reserved and unreserved outside!
1864  */
1865 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1866                        struct amdgpu_bo_va *bo_va,
1867                        uint64_t saddr)
1868 {
1869         struct amdgpu_bo_va_mapping *mapping;
1870         struct amdgpu_vm *vm = bo_va->vm;
1871         bool valid = true;
1872
1873         saddr /= AMDGPU_GPU_PAGE_SIZE;
1874
1875         list_for_each_entry(mapping, &bo_va->valids, list) {
1876                 if (mapping->start == saddr)
1877                         break;
1878         }
1879
1880         if (&mapping->list == &bo_va->valids) {
1881                 valid = false;
1882
1883                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1884                         if (mapping->start == saddr)
1885                                 break;
1886                 }
1887
1888                 if (&mapping->list == &bo_va->invalids)
1889                         return -ENOENT;
1890         }
1891
1892         list_del(&mapping->list);
1893         amdgpu_vm_it_remove(mapping, &vm->va);
1894         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1895
1896         if (valid)
1897                 list_add(&mapping->list, &vm->freed);
1898         else
1899                 amdgpu_vm_free_mapping(adev, vm, mapping,
1900                                        bo_va->last_pt_update);
1901
1902         return 0;
1903 }
1904
1905 /**
1906  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1907  *
1908  * @adev: amdgpu_device pointer
1909  * @vm: VM structure to use
1910  * @saddr: start of the range
1911  * @size: size of the range
1912  *
1913  * Remove all mappings in a range, split them as appropriate.
1914  * Returns 0 for success, error for failure.
1915  */
1916 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1917                                 struct amdgpu_vm *vm,
1918                                 uint64_t saddr, uint64_t size)
1919 {
1920         struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1921         LIST_HEAD(removed);
1922         uint64_t eaddr;
1923
1924         eaddr = saddr + size - 1;
1925         saddr /= AMDGPU_GPU_PAGE_SIZE;
1926         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1927
1928         /* Allocate all the needed memory */
1929         before = kzalloc(sizeof(*before), GFP_KERNEL);
1930         if (!before)
1931                 return -ENOMEM;
1932         INIT_LIST_HEAD(&before->list);
1933
1934         after = kzalloc(sizeof(*after), GFP_KERNEL);
1935         if (!after) {
1936                 kfree(before);
1937                 return -ENOMEM;
1938         }
1939         INIT_LIST_HEAD(&after->list);
1940
1941         /* Now gather all removed mappings */
1942         tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1943         while (tmp) {
1944                 /* Remember mapping split at the start */
1945                 if (tmp->start < saddr) {
1946                         before->start = tmp->start;
1947                         before->last = saddr - 1;
1948                         before->offset = tmp->offset;
1949                         before->flags = tmp->flags;
1950                         list_add(&before->list, &tmp->list);
1951                 }
1952
1953                 /* Remember mapping split at the end */
1954                 if (tmp->last > eaddr) {
1955                         after->start = eaddr + 1;
1956                         after->last = tmp->last;
1957                         after->offset = tmp->offset;
1958                         after->offset += after->start - tmp->start;
1959                         after->flags = tmp->flags;
1960                         list_add(&after->list, &tmp->list);
1961                 }
1962
1963                 list_del(&tmp->list);
1964                 list_add(&tmp->list, &removed);
1965
1966                 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1967         }
1968
1969         /* And free them up */
1970         list_for_each_entry_safe(tmp, next, &removed, list) {
1971                 amdgpu_vm_it_remove(tmp, &vm->va);
1972                 list_del(&tmp->list);
1973
1974                 if (tmp->start < saddr)
1975                     tmp->start = saddr;
1976                 if (tmp->last > eaddr)
1977                     tmp->last = eaddr;
1978
1979                 list_add(&tmp->list, &vm->freed);
1980                 trace_amdgpu_vm_bo_unmap(NULL, tmp);
1981         }
1982
1983         /* Insert partial mapping before the range */
1984         if (!list_empty(&before->list)) {
1985                 amdgpu_vm_it_insert(before, &vm->va);
1986                 if (before->flags & AMDGPU_PTE_PRT)
1987                         amdgpu_vm_prt_get(adev);
1988         } else {
1989                 kfree(before);
1990         }
1991
1992         /* Insert partial mapping after the range */
1993         if (!list_empty(&after->list)) {
1994                 amdgpu_vm_it_insert(after, &vm->va);
1995                 if (after->flags & AMDGPU_PTE_PRT)
1996                         amdgpu_vm_prt_get(adev);
1997         } else {
1998                 kfree(after);
1999         }
2000
2001         return 0;
2002 }
2003
2004 /**
2005  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2006  *
2007  * @adev: amdgpu_device pointer
2008  * @bo_va: requested bo_va
2009  *
2010  * Remove @bo_va->bo from the requested vm.
2011  *
2012  * Object have to be reserved!
2013  */
2014 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2015                       struct amdgpu_bo_va *bo_va)
2016 {
2017         struct amdgpu_bo_va_mapping *mapping, *next;
2018         struct amdgpu_vm *vm = bo_va->vm;
2019
2020         list_del(&bo_va->bo_list);
2021
2022         spin_lock(&vm->status_lock);
2023         list_del(&bo_va->vm_status);
2024         spin_unlock(&vm->status_lock);
2025
2026         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2027                 list_del(&mapping->list);
2028                 amdgpu_vm_it_remove(mapping, &vm->va);
2029                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2030                 list_add(&mapping->list, &vm->freed);
2031         }
2032         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2033                 list_del(&mapping->list);
2034                 amdgpu_vm_it_remove(mapping, &vm->va);
2035                 amdgpu_vm_free_mapping(adev, vm, mapping,
2036                                        bo_va->last_pt_update);
2037         }
2038
2039         dma_fence_put(bo_va->last_pt_update);
2040         kfree(bo_va);
2041 }
2042
2043 /**
2044  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2045  *
2046  * @adev: amdgpu_device pointer
2047  * @vm: requested vm
2048  * @bo: amdgpu buffer object
2049  *
2050  * Mark @bo as invalid.
2051  */
2052 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2053                              struct amdgpu_bo *bo)
2054 {
2055         struct amdgpu_bo_va *bo_va;
2056
2057         list_for_each_entry(bo_va, &bo->va, bo_list) {
2058                 spin_lock(&bo_va->vm->status_lock);
2059                 if (list_empty(&bo_va->vm_status))
2060                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
2061                 spin_unlock(&bo_va->vm->status_lock);
2062         }
2063 }
2064
2065 /**
2066  * amdgpu_vm_init - initialize a vm instance
2067  *
2068  * @adev: amdgpu_device pointer
2069  * @vm: requested vm
2070  *
2071  * Init @vm fields.
2072  */
2073 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2074 {
2075         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2076                 AMDGPU_VM_PTE_COUNT * 8);
2077         unsigned ring_instance;
2078         struct amdgpu_ring *ring;
2079         struct amd_sched_rq *rq;
2080         int i, r;
2081
2082         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2083                 vm->ids[i] = NULL;
2084         vm->va = RB_ROOT;
2085         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2086         spin_lock_init(&vm->status_lock);
2087         INIT_LIST_HEAD(&vm->invalidated);
2088         INIT_LIST_HEAD(&vm->cleared);
2089         INIT_LIST_HEAD(&vm->freed);
2090
2091         /* create scheduler entity for page table updates */
2092
2093         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2094         ring_instance %= adev->vm_manager.vm_pte_num_rings;
2095         ring = adev->vm_manager.vm_pte_rings[ring_instance];
2096         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
2097         r = amd_sched_entity_init(&ring->sched, &vm->entity,
2098                                   rq, amdgpu_sched_jobs);
2099         if (r)
2100                 return r;
2101
2102         vm->last_dir_update = NULL;
2103
2104         r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2105                              AMDGPU_GEM_DOMAIN_VRAM,
2106                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2107                              AMDGPU_GEM_CREATE_SHADOW |
2108                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2109                              AMDGPU_GEM_CREATE_VRAM_CLEARED,
2110                              NULL, NULL, &vm->root.bo);
2111         if (r)
2112                 goto error_free_sched_entity;
2113
2114         r = amdgpu_bo_reserve(vm->root.bo, false);
2115         if (r)
2116                 goto error_free_root;
2117
2118         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
2119         amdgpu_bo_unreserve(vm->root.bo);
2120
2121         return 0;
2122
2123 error_free_root:
2124         amdgpu_bo_unref(&vm->root.bo->shadow);
2125         amdgpu_bo_unref(&vm->root.bo);
2126         vm->root.bo = NULL;
2127
2128 error_free_sched_entity:
2129         amd_sched_entity_fini(&ring->sched, &vm->entity);
2130
2131         return r;
2132 }
2133
2134 /**
2135  * amdgpu_vm_free_levels - free PD/PT levels
2136  *
2137  * @level: PD/PT starting level to free
2138  *
2139  * Free the page directory or page table level and all sub levels.
2140  */
2141 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
2142 {
2143         unsigned i;
2144
2145         if (level->bo) {
2146                 amdgpu_bo_unref(&level->bo->shadow);
2147                 amdgpu_bo_unref(&level->bo);
2148         }
2149
2150         if (level->entries)
2151                 for (i = 0; i <= level->last_entry_used; i++)
2152                         amdgpu_vm_free_levels(&level->entries[i]);
2153
2154         drm_free_large(level->entries);
2155 }
2156
2157 /**
2158  * amdgpu_vm_fini - tear down a vm instance
2159  *
2160  * @adev: amdgpu_device pointer
2161  * @vm: requested vm
2162  *
2163  * Tear down @vm.
2164  * Unbind the VM and remove all bos from the vm bo list
2165  */
2166 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2167 {
2168         struct amdgpu_bo_va_mapping *mapping, *tmp;
2169         bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2170
2171         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
2172
2173         if (!RB_EMPTY_ROOT(&vm->va)) {
2174                 dev_err(adev->dev, "still active bo inside vm\n");
2175         }
2176         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
2177                 list_del(&mapping->list);
2178                 amdgpu_vm_it_remove(mapping, &vm->va);
2179                 kfree(mapping);
2180         }
2181         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2182                 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2183                         amdgpu_vm_prt_fini(adev, vm);
2184                         prt_fini_needed = false;
2185                 }
2186
2187                 list_del(&mapping->list);
2188                 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2189         }
2190
2191         amdgpu_vm_free_levels(&vm->root);
2192         dma_fence_put(vm->last_dir_update);
2193 }
2194
2195 /**
2196  * amdgpu_vm_manager_init - init the VM manager
2197  *
2198  * @adev: amdgpu_device pointer
2199  *
2200  * Initialize the VM manager structures
2201  */
2202 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2203 {
2204         unsigned i;
2205
2206         INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
2207
2208         /* skip over VMID 0, since it is the system VM */
2209         for (i = 1; i < adev->vm_manager.num_ids; ++i) {
2210                 amdgpu_vm_reset_id(adev, i);
2211                 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
2212                 list_add_tail(&adev->vm_manager.ids[i].list,
2213                               &adev->vm_manager.ids_lru);
2214         }
2215
2216         adev->vm_manager.fence_context =
2217                 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2218         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2219                 adev->vm_manager.seqno[i] = 0;
2220
2221         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2222         atomic64_set(&adev->vm_manager.client_counter, 0);
2223         spin_lock_init(&adev->vm_manager.prt_lock);
2224         atomic_set(&adev->vm_manager.num_prt_users, 0);
2225 }
2226
2227 /**
2228  * amdgpu_vm_manager_fini - cleanup VM manager
2229  *
2230  * @adev: amdgpu_device pointer
2231  *
2232  * Cleanup the VM manager and free resources.
2233  */
2234 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2235 {
2236         unsigned i;
2237
2238         for (i = 0; i < AMDGPU_NUM_VM; ++i) {
2239                 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
2240
2241                 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
2242                 dma_fence_put(id->flushed_updates);
2243                 dma_fence_put(id->last_flush);
2244         }
2245 }