ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
- upper_32_bits(dmaobj->base.start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
+ nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+ nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+ upper_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_done(*pgpuobj);
}
return ret;
ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, dmaobj->flags0);
- nv_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
- nv_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
- nv_wo32(*pgpuobj, 0x0c, 0x00000000);
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+ nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
+ nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
+ nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
+ nvkm_done(*pgpuobj);
}
return ret;
struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
if (!dmaobj->base.start)
return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
- offset = nv_ro32(pgt, 8 + (offset >> 10));
+ nvkm_kmap(pgt);
+ offset = nvkm_ro32(pgt, 8 + (offset >> 10));
offset &= 0xfffff000;
+ nvkm_done(pgt);
}
ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
*pgpuobj = gpuobj;
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
- nv_wo32(*pgpuobj, 0x04, length);
- nv_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
- nv_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
+ nvkm_wo32(*pgpuobj, 0x04, length);
+ nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
+ nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+ nvkm_done(*pgpuobj);
}
return ret;
ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
- upper_32_bits(dmaobj->base.start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_kmap(*pgpuobj);
+ nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
+ nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+ nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+ upper_32_bits(dmaobj->base.start));
+ nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+ nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+ nvkm_done(*pgpuobj);
}
return ret;