static DEFINE_MUTEX(binder_lock);
static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
+ if (vma && mm != vma->vm_mm) {
+ pr_err("binder: %d: vma mm and task mm mismatch\n",
+ proc->pid);
+ vma = NULL;
+ }
}
if (allocate == 0)
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+ mutex_lock(&binder_mmap_lock);
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
}
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+ mutex_unlock(&binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
- proc->files = get_files_struct(current);
+ proc->files = get_files_struct(proc->tsk);
proc->vma = vma;
/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
+ mutex_lock(&binder_mmap_lock);
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
+ mutex_unlock(&binder_mmap_lock);
err_bad_arg:
printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);