]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/amdgpu: stop using a bo list entry for the VM PTs
authorChristian König <christian.koenig@amd.com>
Wed, 28 Sep 2016 10:27:37 +0000 (12:27 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Oct 2016 18:38:28 +0000 (14:38 -0400)
Saves us a bit of memory.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 848e4ed7e32aeef2929fa0da60e8e8275f6af7b0..9d9831f2e2d00a73672f33ecda6ea18764d9cf7d 100644 (file)
@@ -820,8 +820,8 @@ struct amdgpu_ring {
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
 
 struct amdgpu_vm_pt {
-       struct amdgpu_bo_list_entry     entry;
-       uint64_t                        addr;
+       struct amdgpu_bo        *bo;
+       uint64_t                addr;
 };
 
 struct amdgpu_vm {
index c171b16cf0f11409567b3103eab7b6e5db35ab39..b8620d3dd61ef375888dc3edba37ea9e46fd44b5 100644 (file)
@@ -142,12 +142,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
        /* add the vm page table to the list */
        for (i = 0; i <= vm->max_pde_used; ++i) {
-               struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+               struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-               if (!entry->robj)
+               if (!bo)
                        continue;
 
-               r = validate(param, entry->robj);
+               r = validate(param, bo);
                if (r)
                        return r;
        }
@@ -171,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 
        spin_lock(&glob->lru_lock);
        for (i = 0; i <= vm->max_pde_used; ++i) {
-               struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+               struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-               if (!entry->robj)
+               if (!bo)
                        continue;
 
-               ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+               ttm_bo_move_to_lru_tail(&bo->tbo);
        }
        spin_unlock(&glob->lru_lock);
 }
@@ -674,7 +674,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 
        /* walk over the address space and update the page directory */
        for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
-               struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+               struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
                uint64_t pde, pt;
 
                if (bo == NULL)
@@ -790,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
        /* initialize the variables */
        addr = start;
        pt_idx = addr >> amdgpu_vm_block_size;
-       pt = vm->page_tables[pt_idx].entry.robj;
+       pt = vm->page_tables[pt_idx].bo;
        if (params->shadow) {
                if (!pt->shadow)
                        return;
-               pt = vm->page_tables[pt_idx].entry.robj->shadow;
+               pt = pt->shadow;
        }
        if ((addr & ~mask) == (end & ~mask))
                nptes = end - addr;
@@ -813,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
        /* walk over the address space and update the page tables */
        while (addr < end) {
                pt_idx = addr >> amdgpu_vm_block_size;
-               pt = vm->page_tables[pt_idx].entry.robj;
+               pt = vm->page_tables[pt_idx].bo;
                if (params->shadow) {
                        if (!pt->shadow)
                                return;
-                       pt = vm->page_tables[pt_idx].entry.robj->shadow;
+                       pt = pt->shadow;
                }
 
                if ((addr & ~mask) == (end & ~mask))
@@ -1425,11 +1425,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        /* walk over the address space and allocate the page tables */
        for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
                struct reservation_object *resv = vm->page_directory->tbo.resv;
-               struct amdgpu_bo_list_entry *entry;
                struct amdgpu_bo *pt;
 
-               entry = &vm->page_tables[pt_idx].entry;
-               if (entry->robj)
+               if (vm->page_tables[pt_idx].bo)
                        continue;
 
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
@@ -1463,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                        }
                }
 
-               entry->robj = pt;
-               entry->priority = 0;
-               entry->tv.bo = &entry->robj->tbo;
-               entry->tv.shared = true;
-               entry->user_pages = NULL;
+               vm->page_tables[pt_idx].bo = pt;
                vm->page_tables[pt_idx].addr = 0;
        }
 
@@ -1719,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        }
 
        for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
-               struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+               struct amdgpu_bo *pt = vm->page_tables[i].bo;
 
                if (!pt)
                        continue;