#define TTM_BO_VM_NUM_PREFAULT 16
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
- struct vm_area_struct *vma,
struct vm_fault *vmf)
{
int ret = 0;
goto out_unlock;
ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
return ret;
}
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ttm_bo_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
ttm_bo_unref(&bo);
}
* Wait for buffer data in transit, due to a pipelined
* move.
*/
- ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+ ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
retval = ret;