Though there is no issue exposed yet, it's possible that another
thread releases the entry while our trying to deref it out of the
lock. Fit it by moving the dereference within lock.
Signed-off-by: Jike Song <jike.song@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
+ kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock);
+
entry = __gvt_cache_find(vgpu, gfn);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ pfn = (entry == NULL) ? 0 : entry->pfn;
- return entry == NULL ? 0 : entry->pfn;
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return pfn;
}
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)