struct fasync_struct *async_queue;
struct work_struct work;
struct workqueue_struct *workqueue;
+ struct mutex lock;
};
/* To track the allocated memory buffer */
u32 end;
};
-static DEFINE_SPINLOCK(vpu_lock);
static LIST_HEAD(head);
static int vpu_major;
*/
static int vpu_open(struct inode *inode, struct file *filp)
{
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
open_count++;
filp->private_data = (void *)(&vpu_data);
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
return 0;
}
break;
}
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
list_add(&rec->list, &head);
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
}
vpu_free_dma_buffer(&vpu_mem);
}
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
list_for_each_entry_safe(rec, n, &head, list) {
if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
/* delete from list */
break;
}
}
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
}
}
case VPU_IOC_GET_SHARE_MEM:
{
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
if (share_mem.cpu_addr != 0) {
ret = copy_to_user((void __user *)arg,
&share_mem,
sizeof(struct vpu_mem_desc));
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
} else {
if (copy_from_user(&share_mem,
(struct vpu_mem_desc *)arg,
sizeof(struct vpu_mem_desc))) {
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
return -EFAULT;
}
if (vpu_alloc_dma_buffer(&share_mem) == -1)
ret = -EFAULT;
}
}
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
}
case VPU_IOC_REQ_VSHARE_MEM:
{
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
if (vshare_mem.cpu_addr != 0) {
ret = copy_to_user((void __user *)arg,
&vshare_mem,
sizeof(struct vpu_mem_desc));
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
} else {
if (copy_from_user(&vshare_mem,
(struct vpu_mem_desc *)arg,
sizeof(struct
vpu_mem_desc))) {
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
return -EFAULT;
}
/* vmalloc shared memory if not allocated */
sizeof(struct vpu_mem_desc)))
ret = -EFAULT;
}
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
break;
}
case VPU_IOC_GET_WORK_ADDR:
*/
static int vpu_release(struct inode *inode, struct file *filp)
{
- spin_lock(&vpu_lock);
+ mutex_lock(&vpu_data.lock);
if (open_count > 0 && !(--open_count)) {
vpu_free_buffers();
vfree((void *)vshare_mem.cpu_addr);
vshare_mem.cpu_addr = 0;
}
- spin_unlock(&vpu_lock);
+ mutex_unlock(&vpu_data.lock);
return 0;
}
{
int ret = -EINVAL;
- spin_lock(&vpu_lock);
ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
vm->vm_flags |= VM_IO;
- spin_unlock(&vpu_lock);
return ret;
}
vpu_data.workqueue = create_workqueue("vpu_wq");
INIT_WORK(&vpu_data.work, vpu_worker_callback);
+ mutex_init(&vpu_data.lock);
printk(KERN_INFO "VPU initialized\n");
goto out;