Makes it easier to move it into the rings.
Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags)
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
}
void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags)
+ unsigned pfn, struct ttm_mem_reg *mem,
+ unsigned npages, uint32_t flags)
{
void __iomem *ptr = (void *)vm->pt;
+ uint64_t addr;
+ int i;
+
+ addr = flags = cayman_vm_page_flags(rdev, flags);
- addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= flags;
- writeq(addr, ptr + (pfn * 8));
+ for (i = 0; i < npages; ++i, ++pfn) {
+ if (mem) {
+ addr = radeon_vm_get_addr(rdev, mem, i);
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= flags;
+ }
+ writeq(addr, ptr + (pfn * 8));
+ }
}
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- uint32_t (*page_flags)(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
+ unsigned pfn, struct ttm_mem_reg *mem,
+ unsigned npages, uint32_t flags);
} vm;
/* ring specific callbacks */
struct {
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_page_flags(rdev, v, flags) (rdev)->asic->vm.page_flags((rdev), (v), (flags))
-#define radeon_asic_vm_set_page(rdev, v, pfn, addr, flags) (rdev)->asic->vm.set_page((rdev), (v), (pfn), (addr), (flags))
+#define radeon_asic_vm_set_page(rdev, v, pfn, mem, npages, flags) (rdev)->asic->vm.set_page((rdev), (v), (pfn), (mem), (npages), (flags))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
+u64 radeon_vm_get_addr(struct radeon_device *rdev,
+ struct ttm_mem_reg *mem,
+ unsigned pfn);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
},
.ring = {
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib);
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
- struct radeon_vm *vm,
- uint32_t flags);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
- unsigned pfn, uint64_t addr, uint32_t flags);
+ unsigned pfn, struct ttm_mem_reg *mem,
+ unsigned npages, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
/* DCE6 - SI */
r = radeon_asic_vm_init(rdev);
if (r)
return r;
-
+
rdev->vm_manager.enabled = true;
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
* to (cayman+).
* Returns the physical address of the page.
*/
-static u64 radeon_vm_get_addr(struct radeon_device *rdev,
- struct ttm_mem_reg *mem,
- unsigned pfn)
+u64 radeon_vm_get_addr(struct radeon_device *rdev,
+ struct ttm_mem_reg *mem,
+ unsigned pfn)
{
u64 addr = 0;
struct ttm_mem_reg *mem)
{
struct radeon_bo_va *bo_va;
- unsigned ngpu_pages, i;
- uint64_t addr = 0, pfn;
- uint32_t flags;
+ unsigned ngpu_pages;
+ uint64_t pfn;
/* nothing to do if vm isn't bound */
if (vm->sa_bo == NULL)
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
}
}
- pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
- flags = radeon_asic_vm_page_flags(rdev, bo_va->vm, bo_va->flags);
- for (i = 0, addr = 0; i < ngpu_pages; i++) {
- if (mem && bo_va->valid) {
- addr = radeon_vm_get_addr(rdev, mem, i);
- }
- radeon_asic_vm_set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+ if (!bo_va->valid) {
+ mem = NULL;
}
+ pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+ radeon_asic_vm_set_page(rdev, bo_va->vm, pfn, mem, ngpu_pages, bo_va->flags);
radeon_fence_unref(&vm->last_flush);
return 0;
}