if (mem_idx >= fbdev->num_fbs)
return NULL;
- return &fbdev->regions[mem_idx];
+ return omapfb_get_mem_region(&fbdev->regions[mem_idx]);
}
static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
/* XXX uses only the first overlay */
ovl = ofbi->overlays[0];
- old_rg = ofbi->region;
+ old_rg = omapfb_get_mem_region(ofbi->region);
new_rg = get_mem_region(ofbi, pi->mem_idx);
if (!new_rg) {
r = -EINVAL;
- goto out;
+ goto put_old;
}
if (pi->enabled && !new_rg->size) {
* until it's reallocated.
*/
r = -EINVAL;
- goto out;
+ goto put_new;
}
ovl->get_overlay_info(ovl, &old_info);
if (ovl->manager)
ovl->manager->apply(ovl->manager);
+ omapfb_put_mem_region(new_rg);
+ omapfb_put_mem_region(old_rg);
+
return 0;
undo:
}
ovl->set_overlay_info(ovl, &old_info);
+ put_new:
+ omapfb_put_mem_region(new_rg);
+ put_old:
+ omapfb_put_mem_region(old_rg);
out:
dev_err(fbdev->dev, "setup_plane failed\n");
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
- int r, i;
+ int r = 0, i;
size_t size;
if (mi->type > OMAPFB_MEMTYPE_MAX)
rg = ofbi->region;
- if (atomic_read(&rg->map_count))
- return -EBUSY;
+ /* FIXME probably should be a rwsem ... */
+ mutex_lock(&rg->mtx);
+ while (rg->ref) {
+ mutex_unlock(&rg->mtx);
+ schedule();
+ mutex_lock(&rg->mtx);
+ }
+
+ if (atomic_read(&rg->map_count)) {
+ r = -EBUSY;
+ goto out;
+ }
for (i = 0; i < fbdev->num_fbs; i++) {
struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]);
for (j = 0; j < ofbi2->num_overlays; j++) {
if (ofbi2->overlays[j]->info.enabled) {
r = -EBUSY;
- return r;
+ goto out;
}
}
}
r = omapfb_realloc_fbmem(fbi, size, mi->type);
if (r) {
dev_err(fbdev->dev, "realloc fbmem failed\n");
- return r;
+ goto out;
}
}
- return 0;
+ out:
+ mutex_unlock(&rg->mtx);
+
+ return r;
}
static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_mem_region *rg;
- rg = ofbi->region;
+ rg = omapfb_get_mem_region(ofbi->region);
memset(mi, 0, sizeof(*mi));
mi->size = rg->size;
mi->type = rg->type;
+ omapfb_put_mem_region(rg);
+
return 0;
}
* DO NOT MODIFY PAR */
static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
int r;
DBG("check_var(%d)\n", FB2OFB(fbi)->id);
+ omapfb_get_mem_region(ofbi->region);
+
r = check_fb_var(fbi, var);
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
/* set the video mode according to info->var */
static int omapfb_set_par(struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
int r;
DBG("set_par(%d)\n", FB2OFB(fbi)->id);
+ omapfb_get_mem_region(ofbi->region);
+
set_fb_fix(fbi);
r = setup_vrfb_rotation(fbi);
if (r)
- return r;
+ goto out;
r = omapfb_apply_changes(fbi, 0);
+ out:
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
static int omapfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
struct fb_var_screeninfo new_var;
int r;
fbi->var = new_var;
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+ omapfb_put_mem_region(ofbi->region);
+
return r;
}
{
struct omapfb2_mem_region *rg = vma->vm_private_data;
+ omapfb_get_mem_region(rg);
atomic_inc(&rg->map_count);
+ omapfb_put_mem_region(rg);
}
static void mmap_user_close(struct vm_area_struct *vma)
{
struct omapfb2_mem_region *rg = vma->vm_private_data;
+ omapfb_get_mem_region(rg);
atomic_dec(&rg->map_count);
+ omapfb_put_mem_region(rg);
}
static struct vm_operations_struct mmap_user_ops = {
unsigned long off;
unsigned long start;
u32 len;
+ int r = -EINVAL;
if (vma->vm_end - vma->vm_start == 0)
return 0;
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
- rg = ofbi->region;
+ rg = omapfb_get_mem_region(ofbi->region);
start = omapfb_get_region_paddr(ofbi);
len = fix->smem_len;
if (off >= len)
- return -EINVAL;
+ goto error;
if ((vma->vm_end - vma->vm_start + off) > len)
- return -EINVAL;
+ goto error;
off += start;
vma->vm_ops = &mmap_user_ops;
vma->vm_private_data = rg;
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ r = -EAGAIN;
+ goto error;
+ }
+
/* vm_ops.open won't be called for mmap itself. */
atomic_inc(&rg->map_count);
+
+ omapfb_put_mem_region(rg);
+
return 0;
+
+ error:
+ omapfb_put_mem_region(ofbi->region);
+
+ return r;
}
/* Store a single color palette entry into a pseudo palette or the hardware
ofbi->region = &fbdev->regions[i];
ofbi->region->id = i;
+ mutex_init(&ofbi->region->mtx);
/* assign these early, so that fb alloc can use them */
ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
/* setup fb_infos */
for (i = 0; i < fbdev->num_fbs; i++) {
- r = omapfb_fb_init(fbdev, fbdev->fbs[i]);
+ struct fb_info *fbi = fbdev->fbs[i];
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ omapfb_get_mem_region(ofbi->region);
+ r = omapfb_fb_init(fbdev, fbi);
+ omapfb_put_mem_region(ofbi->region);
+
if (r) {
dev_err(fbdev->dev, "failed to setup fb_info\n");
return r;
DBG("framebuffers registered\n");
for (i = 0; i < fbdev->num_fbs; i++) {
- r = omapfb_apply_changes(fbdev->fbs[i], 1);
+ struct fb_info *fbi = fbdev->fbs[i];
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ omapfb_get_mem_region(ofbi->region);
+ r = omapfb_apply_changes(fbi, 1);
+ omapfb_put_mem_region(ofbi->region);
+
if (r) {
dev_err(fbdev->dev, "failed to change mode\n");
return r;
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_mem_region *rg;
enum omap_dss_rotation_type rot_type;
int r;
if (rot_type == ofbi->rotation_type)
goto out;
- if (ofbi->region->size) {
+ rg = omapfb_get_mem_region(ofbi->region);
+
+ if (rg->size) {
r = -EBUSY;
- goto out;
+ goto put_region;
}
ofbi->rotation_type = rot_type;
* Since the VRAM for this FB is not allocated at the moment we don't
* need to do any further parameter checking at this point.
*/
+put_region:
+ omapfb_put_mem_region(rg);
out:
unlock_fb_info(fbi);
ofbi->mirror = mirror;
+ omapfb_get_mem_region(ofbi->region);
+
memcpy(&new_var, &fbi->var, sizeof(new_var));
r = check_fb_var(fbi, &new_var);
if (r)
r = count;
out:
+ omapfb_put_mem_region(ofbi->region);
+
unlock_fb_info(fbi);
return r;
DBG("detaching %d\n", ofbi->overlays[i]->id);
+ omapfb_get_mem_region(ofbi->region);
+
omapfb_overlay_enable(ovl, 0);
if (ovl->manager)
ovl->manager->apply(ovl->manager);
+ omapfb_put_mem_region(ofbi->region);
+
for (t = i + 1; t < ofbi->num_overlays; t++) {
ofbi->rotation[t-1] = ofbi->rotation[t];
ofbi->overlays[t-1] = ofbi->overlays[t];
}
if (added) {
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+
+ omapfb_put_mem_region(ofbi->region);
+
if (r)
goto out;
}
for (i = 0; i < num_ovls; ++i)
ofbi->rotation[i] = rotation[i];
+ omapfb_get_mem_region(ofbi->region);
+
r = omapfb_apply_changes(fbi, 0);
+
+ omapfb_put_mem_region(ofbi->region);
+
if (r)
goto out;
rg = ofbi->region;
+ /* FIXME probably should be a rwsem ... */
+ mutex_lock(&rg->mtx);
+ while (rg->ref) {
+ mutex_unlock(&rg->mtx);
+ schedule();
+ mutex_lock(&rg->mtx);
+ }
+
if (atomic_read(&rg->map_count)) {
r = -EBUSY;
goto out;
r = count;
out:
+ mutex_unlock(&rg->mtx);
+
unlock_fb_info(fbi);
return r;
u8 type; /* OMAPFB_PLANE_MEM_* */
bool alloc; /* allocated by the driver */
bool map; /* kernel mapped by the driver */
+ struct mutex mtx;
+ unsigned int ref;
atomic_t map_count;
};
return ovl->set_overlay_info(ovl, &info);
}
+static inline struct omapfb2_mem_region *
+omapfb_get_mem_region(struct omapfb2_mem_region *rg)
+{
+ mutex_lock(&rg->mtx);
+ rg->ref++;
+ mutex_unlock(&rg->mtx);
+ return rg;
+}
+
+static inline void omapfb_put_mem_region(struct omapfb2_mem_region *rg)
+{
+ mutex_lock(&rg->mtx);
+ rg->ref--;
+ mutex_unlock(&rg->mtx);
+}
+
#endif