2 * Copyright (C) 2008-2009 QUALCOMM Incorporated.
5 /* FIXME: most allocations need not be GFP_ATOMIC */
6 /* FIXME: management of mutexes */
7 /* FIXME: msm_pmem_region_lookup return values */
8 /* FIXME: way too many copy to/from user */
9 /* FIXME: does region->active mean free */
10 /* FIXME: check limits on command lenghts passed from userspace */
11 /* FIXME: __msm_release: which queues should we flush when opencnt != 0 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <mach/board.h>
20 #include <linux/list.h>
21 #include <linux/uaccess.h>
22 #include <linux/android_pmem.h>
23 #include <linux/poll.h>
24 #include <media/msm_camera.h>
25 #include <mach/camera.h>
27 #define MSM_MAX_CAMERA_SENSORS 5
29 #define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
30 __func__, __LINE__, ((to) ? "to" : "from"))
31 #define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
32 #define ERR_COPY_TO_USER() ERR_USER_COPY(1)
34 static struct class *msm_class;
35 static dev_t msm_devno;
36 static LIST_HEAD(msm_sensors);
38 #define __CONTAINS(r, v, l, field) ({ \
41 typeof(v) __e = __v + l; \
42 int res = __v >= __r->field && \
43 __e <= __r->field + __r->len; \
47 #define CONTAINS(r1, r2, field) ({ \
48 typeof(r2) __r2 = r2; \
49 __CONTAINS(r1, __r2->field, __r2->len, field); \
52 #define IN_RANGE(r, v, field) ({ \
55 int res = ((__vv >= __r->field) && \
56 (__vv < (__r->field + __r->len))); \
60 #define OVERLAPS(r1, r2, field) ({ \
61 typeof(r1) __r1 = r1; \
62 typeof(r2) __r2 = r2; \
63 typeof(__r2->field) __v = __r2->field; \
64 typeof(__v) __e = __v + __r2->len - 1; \
65 int res = (IN_RANGE(__r1, __v, field) || \
66 IN_RANGE(__r1, __e, field)); \
70 #define MSM_DRAIN_QUEUE_NOSYNC(sync, name) do { \
71 struct msm_queue_cmd *qcmd = NULL; \
72 CDBG("%s: draining queue "#name"\n", __func__); \
73 while (!list_empty(&(sync)->name)) { \
74 qcmd = list_first_entry(&(sync)->name, \
75 struct msm_queue_cmd, list); \
76 list_del_init(&qcmd->list); \
81 #define MSM_DRAIN_QUEUE(sync, name) do { \
82 unsigned long flags; \
83 spin_lock_irqsave(&(sync)->name##_lock, flags); \
84 MSM_DRAIN_QUEUE_NOSYNC(sync, name); \
85 spin_unlock_irqrestore(&(sync)->name##_lock, flags); \
88 static int check_overlap(struct hlist_head *ptype,
92 struct msm_pmem_region *region;
93 struct msm_pmem_region t = { .paddr = paddr, .len = len };
94 struct hlist_node *node;
96 hlist_for_each_entry(region, node, ptype, list) {
97 if (CONTAINS(region, &t, paddr) ||
98 CONTAINS(&t, region, paddr) ||
99 OVERLAPS(region, &t, paddr)) {
101 " region (PHYS %p len %ld)"
102 " clashes with registered region"
103 " (paddr %p len %ld)\n",
104 (void *)t.paddr, t.len,
105 (void *)region->paddr, region->len);
113 static int msm_pmem_table_add(struct hlist_head *ptype,
114 struct msm_pmem_info *info)
118 unsigned long vstart;
121 struct msm_pmem_region *region;
123 rc = get_pmem_file(info->fd, &paddr, &vstart, &len, &file);
125 pr_err("msm_pmem_table_add: get_pmem_file fd %d error %d\n",
130 if (check_overlap(ptype, paddr, len) < 0)
133 CDBG("%s: type = %d, paddr = 0x%lx, vaddr = 0x%lx\n",
135 info->type, paddr, (unsigned long)info->vaddr);
137 region = kmalloc(sizeof(*region), GFP_KERNEL);
141 INIT_HLIST_NODE(®ion->list);
143 region->type = info->type;
144 region->vaddr = info->vaddr;
145 region->paddr = paddr;
148 region->y_off = info->y_off;
149 region->cbcr_off = info->cbcr_off;
150 region->fd = info->fd;
151 region->active = info->active;
153 hlist_add_head(&(region->list), ptype);
158 /* return of 0 means failure */
159 static uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
160 int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount)
162 struct msm_pmem_region *region;
163 struct msm_pmem_region *regptr;
164 struct hlist_node *node, *n;
170 hlist_for_each_entry_safe(region, node, n, ptype, list) {
171 if (region->type == pmem_type && region->active) {
183 static unsigned long msm_pmem_frame_ptov_lookup(struct msm_sync *sync,
184 unsigned long pyaddr,
185 unsigned long pcbcraddr,
186 uint32_t *yoff, uint32_t *cbcroff, int *fd)
188 struct msm_pmem_region *region;
189 struct hlist_node *node, *n;
191 hlist_for_each_entry_safe(region, node, n, &sync->frame, list) {
192 if (pyaddr == (region->paddr + region->y_off) &&
193 pcbcraddr == (region->paddr +
196 /* offset since we could pass vaddr inside
197 * a registerd pmem buffer
199 *yoff = region->y_off;
200 *cbcroff = region->cbcr_off;
203 return (unsigned long)(region->vaddr);
210 static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync,
211 unsigned long addr, int *fd)
213 struct msm_pmem_region *region;
214 struct hlist_node *node, *n;
216 hlist_for_each_entry_safe(region, node, n, &sync->stats, list) {
217 if (addr == region->paddr && region->active) {
218 /* offset since we could pass vaddr inside a
219 * registered pmem buffer */
222 return (unsigned long)(region->vaddr);
229 static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync *sync,
230 unsigned long buffer,
231 uint32_t yoff, uint32_t cbcroff, int fd)
233 struct msm_pmem_region *region;
234 struct hlist_node *node, *n;
236 hlist_for_each_entry_safe(region,
237 node, n, &sync->frame, list) {
238 if (((unsigned long)(region->vaddr) == buffer) &&
239 (region->y_off == yoff) &&
240 (region->cbcr_off == cbcroff) &&
241 (region->fd == fd) &&
242 (region->active == 0)) {
245 return region->paddr;
252 static unsigned long msm_pmem_stats_vtop_lookup(
253 struct msm_sync *sync,
254 unsigned long buffer,
257 struct msm_pmem_region *region;
258 struct hlist_node *node, *n;
260 hlist_for_each_entry_safe(region, node, n, &sync->stats, list) {
261 if (((unsigned long)(region->vaddr) == buffer) &&
262 (region->fd == fd) && region->active == 0) {
264 return region->paddr;
271 static int __msm_pmem_table_del(struct msm_sync *sync,
272 struct msm_pmem_info *pinfo)
275 struct msm_pmem_region *region;
276 struct hlist_node *node, *n;
278 switch (pinfo->type) {
279 case MSM_PMEM_OUTPUT1:
280 case MSM_PMEM_OUTPUT2:
281 case MSM_PMEM_THUMBAIL:
282 case MSM_PMEM_MAINIMG:
283 case MSM_PMEM_RAW_MAINIMG:
284 hlist_for_each_entry_safe(region, node, n,
285 &sync->frame, list) {
287 if (pinfo->type == region->type &&
288 pinfo->vaddr == region->vaddr &&
289 pinfo->fd == region->fd) {
291 put_pmem_file(region->file);
297 case MSM_PMEM_AEC_AWB:
299 hlist_for_each_entry_safe(region, node, n,
300 &sync->stats, list) {
302 if (pinfo->type == region->type &&
303 pinfo->vaddr == region->vaddr &&
304 pinfo->fd == region->fd) {
306 put_pmem_file(region->file);
320 static int msm_pmem_table_del(struct msm_sync *sync, void __user *arg)
322 struct msm_pmem_info info;
324 if (copy_from_user(&info, arg, sizeof(info))) {
325 ERR_COPY_FROM_USER();
329 return __msm_pmem_table_del(sync, &info);
332 static int __msm_get_frame(struct msm_sync *sync,
333 struct msm_frame *frame)
338 struct msm_queue_cmd *qcmd = NULL;
339 struct msm_vfe_phy_info *pphy;
341 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
342 if (!list_empty(&sync->prev_frame_q)) {
343 qcmd = list_first_entry(&sync->prev_frame_q,
344 struct msm_queue_cmd, list);
345 list_del_init(&qcmd->list);
347 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
350 pr_err("%s: no preview frame.\n", __func__);
354 pphy = (struct msm_vfe_phy_info *)(qcmd->command);
357 msm_pmem_frame_ptov_lookup(sync,
359 pphy->cbcr_phy, &(frame->y_off),
360 &(frame->cbcr_off), &(frame->fd));
361 if (!frame->buffer) {
362 pr_err("%s: cannot get frame, invalid lookup address "
363 "y=%x cbcr=%x offset=%d\n",
371 CDBG("__msm_get_frame: y=0x%x, cbcr=0x%x, qcmd=0x%x, virt_addr=0x%x\n",
372 pphy->y_phy, pphy->cbcr_phy, (int) qcmd, (int) frame->buffer);
378 static int msm_get_frame(struct msm_sync *sync, void __user *arg)
381 struct msm_frame frame;
383 if (copy_from_user(&frame,
385 sizeof(struct msm_frame))) {
386 ERR_COPY_FROM_USER();
390 rc = __msm_get_frame(sync, &frame);
395 if (frame.croplen > sync->croplen) {
396 pr_err("msm_get_frame: invalid frame croplen %d\n",
401 if (copy_to_user((void *)frame.cropinfo,
409 if (copy_to_user((void *)arg,
410 &frame, sizeof(struct msm_frame))) {
415 CDBG("Got frame!!!\n");
420 static int msm_enable_vfe(struct msm_sync *sync, void __user *arg)
423 struct camera_enable_cmd cfg;
425 if (copy_from_user(&cfg,
427 sizeof(struct camera_enable_cmd))) {
428 ERR_COPY_FROM_USER();
432 if (sync->vfefn.vfe_enable)
433 rc = sync->vfefn.vfe_enable(&cfg);
435 CDBG("msm_enable_vfe: returned rc = %d\n", rc);
439 static int msm_disable_vfe(struct msm_sync *sync, void __user *arg)
442 struct camera_enable_cmd cfg;
444 if (copy_from_user(&cfg,
446 sizeof(struct camera_enable_cmd))) {
447 ERR_COPY_FROM_USER();
451 if (sync->vfefn.vfe_disable)
452 rc = sync->vfefn.vfe_disable(&cfg, NULL);
454 CDBG("msm_disable_vfe: returned rc = %d\n", rc);
458 static struct msm_queue_cmd *__msm_control(struct msm_sync *sync,
459 struct msm_control_device_queue *queue,
460 struct msm_queue_cmd *qcmd,
466 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
467 list_add_tail(&qcmd->list, &sync->msg_event_q);
468 /* wake up config thread */
469 wake_up(&sync->msg_event_wait);
470 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
475 /* wait for config status */
476 rc = wait_event_interruptible_timeout(
477 queue->ctrl_status_wait,
478 !list_empty_careful(&queue->ctrl_status_q),
480 if (list_empty_careful(&queue->ctrl_status_q)) {
484 pr_err("msm_control: wait_event error %d\n", rc);
486 /* This is a bit scary. If we time out too early, we
487 * will free qcmd at the end of this function, and the
488 * dsp may do the same when it does respond, so we
489 * remove the message from the source queue.
491 pr_err("%s: error waiting for ctrl_status_q: %d\n",
493 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
494 list_del_init(&qcmd->list);
495 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
501 /* control command status is ready */
502 spin_lock_irqsave(&queue->ctrl_status_q_lock, flags);
503 BUG_ON(list_empty(&queue->ctrl_status_q));
504 qcmd = list_first_entry(&queue->ctrl_status_q,
505 struct msm_queue_cmd, list);
506 list_del_init(&qcmd->list);
507 spin_unlock_irqrestore(&queue->ctrl_status_q_lock, flags);
512 static int msm_control(struct msm_control_device *ctrl_pmsm,
518 struct msm_sync *sync = ctrl_pmsm->pmsm->sync;
519 struct msm_ctrl_cmd udata, *ctrlcmd;
520 struct msm_queue_cmd *qcmd = NULL, *qcmd_temp;
522 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
523 ERR_COPY_FROM_USER();
528 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
529 sizeof(struct msm_ctrl_cmd) + udata.length,
532 pr_err("msm_control: cannot allocate buffer\n");
537 qcmd->type = MSM_CAM_Q_CTRL;
538 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
540 ctrlcmd->value = ctrlcmd + 1;
543 if (copy_from_user(ctrlcmd->value,
544 udata.value, udata.length)) {
545 ERR_COPY_FROM_USER();
552 /* qcmd will be set to NULL */
553 qcmd = __msm_control(sync, NULL, qcmd, 0);
557 qcmd_temp = __msm_control(sync,
559 qcmd, MAX_SCHEDULE_TIMEOUT);
561 if (IS_ERR(qcmd_temp)) {
562 rc = PTR_ERR(qcmd_temp);
568 void __user *to = udata.value;
569 udata = *(struct msm_ctrl_cmd *)qcmd->command;
570 if (udata.length > 0) {
581 if (copy_to_user((void *)arg, &udata,
582 sizeof(struct msm_ctrl_cmd))) {
590 /* Note: if we get here as a result of an error, we will free the
591 * qcmd that we kmalloc() in this function. When we come here as
592 * a result of a successful completion, we are freeing the qcmd that
593 * we dequeued from queue->ctrl_status_q.
597 CDBG("msm_control: end rc = %d\n", rc);
601 static int msm_get_stats(struct msm_sync *sync, void __user *arg)
607 struct msm_stats_event_ctrl se;
609 struct msm_queue_cmd *qcmd = NULL;
610 struct msm_ctrl_cmd *ctrl = NULL;
611 struct msm_vfe_resp *data = NULL;
612 struct msm_stats_buf stats;
614 if (copy_from_user(&se, arg,
615 sizeof(struct msm_stats_event_ctrl))) {
616 ERR_COPY_FROM_USER();
620 timeout = (int)se.timeout_ms;
622 CDBG("msm_get_stats timeout %d\n", timeout);
623 rc = wait_event_interruptible_timeout(
624 sync->msg_event_wait,
625 !list_empty_careful(&sync->msg_event_q),
626 msecs_to_jiffies(timeout));
627 if (list_empty_careful(&sync->msg_event_q)) {
631 pr_err("msm_get_stats error %d\n", rc);
635 CDBG("msm_get_stats returned from wait: %d\n", rc);
637 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
638 BUG_ON(list_empty(&sync->msg_event_q));
639 qcmd = list_first_entry(&sync->msg_event_q,
640 struct msm_queue_cmd, list);
641 list_del_init(&qcmd->list);
642 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
644 CDBG("=== received from DSP === %d\n", qcmd->type);
646 switch (qcmd->type) {
647 case MSM_CAM_Q_VFE_EVT:
648 case MSM_CAM_Q_VFE_MSG:
649 data = (struct msm_vfe_resp *)(qcmd->command);
651 /* adsp event and message */
652 se.resptype = MSM_CAM_RESP_STAT_EVT_MSG;
654 /* 0 - msg from aDSP, 1 - event from mARM */
655 se.stats_event.type = data->evt_msg.type;
656 se.stats_event.msg_id = data->evt_msg.msg_id;
657 se.stats_event.len = data->evt_msg.len;
659 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
660 CDBG("length = %d\n", se.stats_event.len);
661 CDBG("msg_id = %d\n", se.stats_event.msg_id);
663 if ((data->type == VFE_MSG_STATS_AF) ||
664 (data->type == VFE_MSG_STATS_WE)) {
667 msm_pmem_stats_ptov_lookup(sync,
671 pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
677 if (copy_to_user((void *)(se.stats_event.data),
679 sizeof(struct msm_stats_buf))) {
684 } else if ((data->evt_msg.len > 0) &&
685 (data->type == VFE_MSG_GENERAL)) {
686 if (copy_to_user((void *)(se.stats_event.data),
688 data->evt_msg.len)) {
692 } else if (data->type == VFE_MSG_OUTPUT1 ||
693 data->type == VFE_MSG_OUTPUT2) {
694 if (copy_to_user((void *)(se.stats_event.data),
700 } else if (data->type == VFE_MSG_SNAPSHOT && sync->pict_pp) {
701 struct msm_postproc buf;
702 struct msm_pmem_region region;
703 buf.fmnum = msm_pmem_region_lookup(&sync->frame,
706 if (buf.fmnum == 1) {
707 buf.fmain.buffer = (unsigned long)region.vaddr;
708 buf.fmain.y_off = region.y_off;
709 buf.fmain.cbcr_off = region.cbcr_off;
710 buf.fmain.fd = region.fd;
712 buf.fmnum = msm_pmem_region_lookup(&sync->frame,
713 MSM_PMEM_RAW_MAINIMG,
715 if (buf.fmnum == 1) {
716 buf.fmain.path = MSM_FRAME_PREV_2;
718 (unsigned long)region.vaddr;
719 buf.fmain.fd = region.fd;
721 pr_err("%s: pmem lookup failed\n",
727 if (copy_to_user((void *)(se.stats_event.data), &buf,
733 CDBG("snapshot copy_to_user!\n");
738 /* control command from control thread */
739 ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
741 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
742 CDBG("length = %d\n", ctrl->length);
744 if (ctrl->length > 0) {
745 if (copy_to_user((void *)(se.ctrl_cmd.value),
754 se.resptype = MSM_CAM_RESP_CTRL;
756 /* what to control */
757 se.ctrl_cmd.type = ctrl->type;
758 se.ctrl_cmd.length = ctrl->length;
759 se.ctrl_cmd.resp_fd = ctrl->resp_fd;
762 case MSM_CAM_Q_V4L2_REQ:
763 /* control command from v4l2 client */
764 ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
766 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
767 CDBG("length = %d\n", ctrl->length);
769 if (ctrl->length > 0) {
770 if (copy_to_user((void *)(se.ctrl_cmd.value),
771 ctrl->value, ctrl->length)) {
778 /* 2 tells config thread this is v4l2 request */
779 se.resptype = MSM_CAM_RESP_V4L2;
781 /* what to control */
782 se.ctrl_cmd.type = ctrl->type;
783 se.ctrl_cmd.length = ctrl->length;
789 } /* switch qcmd->type */
791 if (copy_to_user((void *)arg, &se, sizeof(se))) {
799 CDBG("msm_get_stats: %d\n", rc);
803 static int msm_ctrl_cmd_done(struct msm_control_device *ctrl_pmsm,
809 struct msm_ctrl_cmd udata, *ctrlcmd;
810 struct msm_queue_cmd *qcmd = NULL;
812 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
813 ERR_COPY_FROM_USER();
818 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
819 sizeof(struct msm_ctrl_cmd) + udata.length,
826 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
828 if (udata.length > 0) {
829 ctrlcmd->value = ctrlcmd + 1;
830 if (copy_from_user(ctrlcmd->value,
833 ERR_COPY_FROM_USER();
839 ctrlcmd->value = NULL;
842 CDBG("msm_ctrl_cmd_done: end rc = %d\n", rc);
844 /* wake up control thread */
845 spin_lock_irqsave(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock, flags);
846 list_add_tail(&qcmd->list, &ctrl_pmsm->ctrl_q.ctrl_status_q);
847 wake_up(&ctrl_pmsm->ctrl_q.ctrl_status_wait);
848 spin_unlock_irqrestore(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock, flags);
854 static int msm_config_vfe(struct msm_sync *sync, void __user *arg)
856 struct msm_vfe_cfg_cmd cfgcmd;
857 struct msm_pmem_region region[8];
858 struct axidata axi_data;
862 memset(&axi_data, 0, sizeof(axi_data));
864 if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
865 ERR_COPY_FROM_USER();
869 switch (cfgcmd.cmd_type) {
870 case CMD_STATS_ENABLE:
872 msm_pmem_region_lookup(&sync->stats,
873 MSM_PMEM_AEC_AWB, ®ion[0],
874 NUM_WB_EXP_STAT_OUTPUT_BUFFERS);
875 if (!axi_data.bufnum1) {
876 pr_err("%s: pmem region lookup error\n", __func__);
879 axi_data.region = ®ion[0];
882 case CMD_STATS_AF_ENABLE:
884 msm_pmem_region_lookup(&sync->stats,
885 MSM_PMEM_AF, ®ion[0],
886 NUM_AF_STAT_OUTPUT_BUFFERS);
887 if (!axi_data.bufnum1) {
888 pr_err("%s: pmem region lookup error\n", __func__);
891 axi_data.region = ®ion[0];
895 case CMD_STATS_DISABLE:
898 pr_err("%s: unknown command type %d\n",
899 __func__, cfgcmd.cmd_type);
904 if (sync->vfefn.vfe_config)
905 rc = sync->vfefn.vfe_config(&cfgcmd, data);
910 static int msm_frame_axi_cfg(struct msm_sync *sync,
911 struct msm_vfe_cfg_cmd *cfgcmd)
914 struct axidata axi_data;
915 void *data = &axi_data;
916 struct msm_pmem_region region[8];
919 memset(&axi_data, 0, sizeof(axi_data));
921 switch (cfgcmd->cmd_type) {
922 case CMD_AXI_CFG_OUT1:
923 pmem_type = MSM_PMEM_OUTPUT1;
925 msm_pmem_region_lookup(&sync->frame, pmem_type,
927 if (!axi_data.bufnum1) {
928 pr_err("%s: pmem region lookup error\n", __func__);
933 case CMD_AXI_CFG_OUT2:
934 pmem_type = MSM_PMEM_OUTPUT2;
936 msm_pmem_region_lookup(&sync->frame, pmem_type,
938 if (!axi_data.bufnum2) {
939 pr_err("%s: pmem region lookup error\n", __func__);
944 case CMD_AXI_CFG_SNAP_O1_AND_O2:
945 pmem_type = MSM_PMEM_THUMBAIL;
947 msm_pmem_region_lookup(&sync->frame, pmem_type,
949 if (!axi_data.bufnum1) {
950 pr_err("%s: pmem region lookup error\n", __func__);
954 pmem_type = MSM_PMEM_MAINIMG;
956 msm_pmem_region_lookup(&sync->frame, pmem_type,
957 ®ion[axi_data.bufnum1], 8);
958 if (!axi_data.bufnum2) {
959 pr_err("%s: pmem region lookup error\n", __func__);
964 case CMD_RAW_PICT_AXI_CFG:
965 pmem_type = MSM_PMEM_RAW_MAINIMG;
967 msm_pmem_region_lookup(&sync->frame, pmem_type,
969 if (!axi_data.bufnum2) {
970 pr_err("%s: pmem region lookup error\n", __func__);
980 pr_err("%s: unknown command type %d\n",
981 __func__, cfgcmd->cmd_type);
985 axi_data.region = ®ion[0];
987 /* send the AXI configuration command to driver */
988 if (sync->vfefn.vfe_config)
989 rc = sync->vfefn.vfe_config(cfgcmd, data);
994 static int msm_get_sensor_info(struct msm_sync *sync, void __user *arg)
997 struct msm_camsensor_info info;
998 struct msm_camera_sensor_info *sdata;
1000 if (copy_from_user(&info,
1002 sizeof(struct msm_camsensor_info))) {
1003 ERR_COPY_FROM_USER();
1007 sdata = sync->pdev->dev.platform_data;
1008 CDBG("sensor_name %s\n", sdata->sensor_name);
1010 memcpy(&info.name[0],
1013 info.flash_enabled = sdata->flash_type != MSM_CAMERA_FLASH_NONE;
1015 /* copy back to user space */
1016 if (copy_to_user((void *)arg,
1018 sizeof(struct msm_camsensor_info))) {
1026 static int __msm_put_frame_buf(struct msm_sync *sync,
1027 struct msm_frame *pb)
1030 struct msm_vfe_cfg_cmd cfgcmd;
1034 pphy = msm_pmem_frame_vtop_lookup(sync,
1036 pb->y_off, pb->cbcr_off, pb->fd);
1039 CDBG("rel: vaddr = 0x%lx, paddr = 0x%lx\n",
1041 cfgcmd.cmd_type = CMD_FRAME_BUF_RELEASE;
1042 cfgcmd.value = (void *)pb;
1043 if (sync->vfefn.vfe_config)
1044 rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
1046 pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
1054 static int msm_put_frame_buffer(struct msm_sync *sync, void __user *arg)
1056 struct msm_frame buf_t;
1058 if (copy_from_user(&buf_t,
1060 sizeof(struct msm_frame))) {
1061 ERR_COPY_FROM_USER();
1065 return __msm_put_frame_buf(sync, &buf_t);
1068 static int __msm_register_pmem(struct msm_sync *sync,
1069 struct msm_pmem_info *pinfo)
1073 switch (pinfo->type) {
1074 case MSM_PMEM_OUTPUT1:
1075 case MSM_PMEM_OUTPUT2:
1076 case MSM_PMEM_THUMBAIL:
1077 case MSM_PMEM_MAINIMG:
1078 case MSM_PMEM_RAW_MAINIMG:
1079 rc = msm_pmem_table_add(&sync->frame, pinfo);
1082 case MSM_PMEM_AEC_AWB:
1084 rc = msm_pmem_table_add(&sync->stats, pinfo);
1095 static int msm_register_pmem(struct msm_sync *sync, void __user *arg)
1097 struct msm_pmem_info info;
1099 if (copy_from_user(&info, arg, sizeof(info))) {
1100 ERR_COPY_FROM_USER();
1104 return __msm_register_pmem(sync, &info);
1107 static int msm_stats_axi_cfg(struct msm_sync *sync,
1108 struct msm_vfe_cfg_cmd *cfgcmd)
1111 struct axidata axi_data;
1112 void *data = &axi_data;
1114 struct msm_pmem_region region[3];
1115 int pmem_type = MSM_PMEM_MAX;
1117 memset(&axi_data, 0, sizeof(axi_data));
1119 switch (cfgcmd->cmd_type) {
1120 case CMD_STATS_AXI_CFG:
1121 pmem_type = MSM_PMEM_AEC_AWB;
1123 case CMD_STATS_AF_AXI_CFG:
1124 pmem_type = MSM_PMEM_AF;
1130 pr_err("%s: unknown command type %d\n",
1131 __func__, cfgcmd->cmd_type);
1135 if (cfgcmd->cmd_type != CMD_GENERAL) {
1137 msm_pmem_region_lookup(&sync->stats, pmem_type,
1138 ®ion[0], NUM_WB_EXP_STAT_OUTPUT_BUFFERS);
1139 if (!axi_data.bufnum1) {
1140 pr_err("%s: pmem region lookup error\n", __func__);
1143 axi_data.region = ®ion[0];
1146 /* send the AEC/AWB STATS configuration command to driver */
1147 if (sync->vfefn.vfe_config)
1148 rc = sync->vfefn.vfe_config(cfgcmd, &axi_data);
1153 static int msm_put_stats_buffer(struct msm_sync *sync, void __user *arg)
1157 struct msm_stats_buf buf;
1159 struct msm_vfe_cfg_cmd cfgcmd;
1161 if (copy_from_user(&buf, arg,
1162 sizeof(struct msm_stats_buf))) {
1163 ERR_COPY_FROM_USER();
1167 CDBG("msm_put_stats_buffer\n");
1168 pphy = msm_pmem_stats_vtop_lookup(sync, buf.buffer, buf.fd);
1171 if (buf.type == STAT_AEAW)
1172 cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
1173 else if (buf.type == STAT_AF)
1174 cfgcmd.cmd_type = CMD_STATS_AF_BUF_RELEASE;
1176 pr_err("%s: invalid buf type %d\n",
1183 cfgcmd.value = (void *)&buf;
1185 if (sync->vfefn.vfe_config) {
1186 rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
1188 pr_err("msm_put_stats_buffer: "\
1189 "vfe_config err %d\n", rc);
1191 pr_err("msm_put_stats_buffer: vfe_config is NULL\n");
1193 pr_err("msm_put_stats_buffer: NULL physical address\n");
1201 static int msm_axi_config(struct msm_sync *sync, void __user *arg)
1203 struct msm_vfe_cfg_cmd cfgcmd;
1205 if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
1206 ERR_COPY_FROM_USER();
1210 switch (cfgcmd.cmd_type) {
1211 case CMD_AXI_CFG_OUT1:
1212 case CMD_AXI_CFG_OUT2:
1213 case CMD_AXI_CFG_SNAP_O1_AND_O2:
1214 case CMD_RAW_PICT_AXI_CFG:
1215 return msm_frame_axi_cfg(sync, &cfgcmd);
1217 case CMD_STATS_AXI_CFG:
1218 case CMD_STATS_AF_AXI_CFG:
1219 return msm_stats_axi_cfg(sync, &cfgcmd);
1222 pr_err("%s: unknown command type %d\n",
1231 static int __msm_get_pic(struct msm_sync *sync, struct msm_ctrl_cmd *ctrl)
1233 unsigned long flags;
1237 struct msm_queue_cmd *qcmd = NULL;
1239 tm = (int)ctrl->timeout_ms;
1241 rc = wait_event_interruptible_timeout(
1242 sync->pict_frame_wait,
1243 !list_empty_careful(&sync->pict_frame_q),
1244 msecs_to_jiffies(tm));
1245 if (list_empty_careful(&sync->pict_frame_q)) {
1249 pr_err("msm_camera_get_picture, rc = %d\n", rc);
1254 spin_lock_irqsave(&sync->pict_frame_q_lock, flags);
1255 BUG_ON(list_empty(&sync->pict_frame_q));
1256 qcmd = list_first_entry(&sync->pict_frame_q,
1257 struct msm_queue_cmd, list);
1258 list_del_init(&qcmd->list);
1259 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1261 if (qcmd->command != NULL) {
1262 struct msm_ctrl_cmd *q =
1263 (struct msm_ctrl_cmd *)qcmd->command;
1264 ctrl->type = q->type;
1265 ctrl->status = q->status;
1275 static int msm_get_pic(struct msm_sync *sync, void __user *arg)
1277 struct msm_ctrl_cmd ctrlcmd_t;
1280 if (copy_from_user(&ctrlcmd_t,
1282 sizeof(struct msm_ctrl_cmd))) {
1283 ERR_COPY_FROM_USER();
1287 rc = __msm_get_pic(sync, &ctrlcmd_t);
1291 if (sync->croplen) {
1292 if (ctrlcmd_t.length < sync->croplen) {
1293 pr_err("msm_get_pic: invalid len %d\n",
1297 if (copy_to_user(ctrlcmd_t.value,
1305 if (copy_to_user((void *)arg,
1307 sizeof(struct msm_ctrl_cmd))) {
1314 static int msm_set_crop(struct msm_sync *sync, void __user *arg)
1316 struct crop_info crop;
1318 if (copy_from_user(&crop,
1320 sizeof(struct crop_info))) {
1321 ERR_COPY_FROM_USER();
1325 if (!sync->croplen) {
1326 sync->cropinfo = kmalloc(crop.len, GFP_KERNEL);
1327 if (!sync->cropinfo)
1329 } else if (sync->croplen < crop.len)
1332 if (copy_from_user(sync->cropinfo,
1335 ERR_COPY_FROM_USER();
1336 kfree(sync->cropinfo);
1340 sync->croplen = crop.len;
1345 static int msm_pict_pp_done(struct msm_sync *sync, void __user *arg)
1347 struct msm_ctrl_cmd udata;
1348 struct msm_ctrl_cmd *ctrlcmd = NULL;
1349 struct msm_queue_cmd *qcmd = NULL;
1350 unsigned long flags;
1356 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
1357 ERR_COPY_FROM_USER();
1362 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
1363 sizeof(struct msm_ctrl_cmd),
1370 qcmd->type = MSM_CAM_Q_VFE_MSG;
1371 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
1372 memset(ctrlcmd, 0, sizeof(struct msm_ctrl_cmd));
1373 ctrlcmd->type = udata.type;
1374 ctrlcmd->status = udata.status;
1376 spin_lock_irqsave(&sync->pict_frame_q_lock, flags);
1377 list_add_tail(&qcmd->list, &sync->pict_frame_q);
1378 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1379 wake_up(&sync->pict_frame_wait);
1385 static long msm_ioctl_common(struct msm_device *pmsm,
1389 CDBG("msm_ioctl_common\n");
1391 case MSM_CAM_IOCTL_REGISTER_PMEM:
1392 return msm_register_pmem(pmsm->sync, argp);
1393 case MSM_CAM_IOCTL_UNREGISTER_PMEM:
1394 return msm_pmem_table_del(pmsm->sync, argp);
1400 static long msm_ioctl_config(struct file *filep, unsigned int cmd,
1404 void __user *argp = (void __user *)arg;
1405 struct msm_device *pmsm = filep->private_data;
1407 CDBG("msm_ioctl_config cmd = %d\n", _IOC_NR(cmd));
1410 case MSM_CAM_IOCTL_GET_SENSOR_INFO:
1411 rc = msm_get_sensor_info(pmsm->sync, argp);
1414 case MSM_CAM_IOCTL_CONFIG_VFE:
1415 /* Coming from config thread for update */
1416 rc = msm_config_vfe(pmsm->sync, argp);
1419 case MSM_CAM_IOCTL_GET_STATS:
1420 /* Coming from config thread wait
1421 * for vfe statistics and control requests */
1422 rc = msm_get_stats(pmsm->sync, argp);
1425 case MSM_CAM_IOCTL_ENABLE_VFE:
1426 /* This request comes from control thread:
1427 * enable either QCAMTASK or VFETASK */
1428 rc = msm_enable_vfe(pmsm->sync, argp);
1431 case MSM_CAM_IOCTL_DISABLE_VFE:
1432 /* This request comes from control thread:
1433 * disable either QCAMTASK or VFETASK */
1434 rc = msm_disable_vfe(pmsm->sync, argp);
1437 case MSM_CAM_IOCTL_VFE_APPS_RESET:
1438 msm_camio_vfe_blk_reset();
1442 case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER:
1443 rc = msm_put_stats_buffer(pmsm->sync, argp);
1446 case MSM_CAM_IOCTL_AXI_CONFIG:
1447 rc = msm_axi_config(pmsm->sync, argp);
1450 case MSM_CAM_IOCTL_SET_CROP:
1451 rc = msm_set_crop(pmsm->sync, argp);
1454 case MSM_CAM_IOCTL_PICT_PP: {
1456 if (copy_from_user(&enable, argp, sizeof(enable))) {
1457 ERR_COPY_FROM_USER();
1460 pmsm->sync->pict_pp = enable;
1466 case MSM_CAM_IOCTL_PICT_PP_DONE:
1467 rc = msm_pict_pp_done(pmsm->sync, argp);
1470 case MSM_CAM_IOCTL_SENSOR_IO_CFG:
1471 rc = pmsm->sync->sctrl.s_config(argp);
1474 case MSM_CAM_IOCTL_FLASH_LED_CFG: {
1476 if (copy_from_user(&led_state, argp, sizeof(led_state))) {
1477 ERR_COPY_FROM_USER();
1480 rc = msm_camera_flash_set_led_state(led_state);
1485 rc = msm_ioctl_common(pmsm, cmd, argp);
1489 CDBG("msm_ioctl_config cmd = %d DONE\n", _IOC_NR(cmd));
1493 static int msm_unblock_poll_frame(struct msm_sync *);
1495 static long msm_ioctl_frame(struct file *filep, unsigned int cmd,
1499 void __user *argp = (void __user *)arg;
1500 struct msm_device *pmsm = filep->private_data;
1504 case MSM_CAM_IOCTL_GETFRAME:
1505 /* Coming from frame thread to get frame
1506 * after SELECT is done */
1507 rc = msm_get_frame(pmsm->sync, argp);
1509 case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER:
1510 rc = msm_put_frame_buffer(pmsm->sync, argp);
1512 case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME:
1513 rc = msm_unblock_poll_frame(pmsm->sync);
1523 static long msm_ioctl_control(struct file *filep, unsigned int cmd,
1527 void __user *argp = (void __user *)arg;
1528 struct msm_control_device *ctrl_pmsm = filep->private_data;
1529 struct msm_device *pmsm = ctrl_pmsm->pmsm;
1532 case MSM_CAM_IOCTL_CTRL_COMMAND:
1533 /* Coming from control thread, may need to wait for
1535 rc = msm_control(ctrl_pmsm, 1, argp);
1537 case MSM_CAM_IOCTL_CTRL_COMMAND_2:
1538 /* Sends a message, returns immediately */
1539 rc = msm_control(ctrl_pmsm, 0, argp);
1541 case MSM_CAM_IOCTL_CTRL_CMD_DONE:
1542 /* Config thread calls the control thread to notify it
1543 * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND.
1545 rc = msm_ctrl_cmd_done(ctrl_pmsm, argp);
1547 case MSM_CAM_IOCTL_GET_PICTURE:
1548 rc = msm_get_pic(pmsm->sync, argp);
1551 rc = msm_ioctl_common(pmsm, cmd, argp);
1558 static int __msm_release(struct msm_sync *sync)
1560 struct msm_pmem_region *region;
1561 struct hlist_node *hnode;
1562 struct hlist_node *n;
1564 mutex_lock(&sync->lock);
1568 if (!sync->opencnt) {
1569 /* need to clean up system resource */
1570 if (sync->vfefn.vfe_release)
1571 sync->vfefn.vfe_release(sync->pdev);
1573 if (sync->cropinfo) {
1574 kfree(sync->cropinfo);
1575 sync->cropinfo = NULL;
1579 hlist_for_each_entry_safe(region, hnode, n,
1580 &sync->frame, list) {
1582 put_pmem_file(region->file);
1586 hlist_for_each_entry_safe(region, hnode, n,
1587 &sync->stats, list) {
1589 put_pmem_file(region->file);
1593 MSM_DRAIN_QUEUE(sync, msg_event_q);
1594 MSM_DRAIN_QUEUE(sync, prev_frame_q);
1595 MSM_DRAIN_QUEUE(sync, pict_frame_q);
1597 sync->sctrl.s_release();
1599 sync->apps_id = NULL;
1600 CDBG("msm_release completed!\n");
1602 mutex_unlock(&sync->lock);
1607 static int msm_release_config(struct inode *node, struct file *filep)
1610 struct msm_device *pmsm = filep->private_data;
1611 printk("msm_camera: RELEASE %s\n", filep->f_path.dentry->d_name.name);
1612 rc = __msm_release(pmsm->sync);
1613 atomic_set(&pmsm->opened, 0);
1617 static int msm_release_control(struct inode *node, struct file *filep)
1620 struct msm_control_device *ctrl_pmsm = filep->private_data;
1621 struct msm_device *pmsm = ctrl_pmsm->pmsm;
1622 printk(KERN_INFO "msm_camera: RELEASE %s\n",
1623 filep->f_path.dentry->d_name.name);
1624 rc = __msm_release(pmsm->sync);
1626 MSM_DRAIN_QUEUE(&ctrl_pmsm->ctrl_q, ctrl_status_q);
1627 MSM_DRAIN_QUEUE(pmsm->sync, pict_frame_q);
1633 static int msm_release_frame(struct inode *node, struct file *filep)
1636 struct msm_device *pmsm = filep->private_data;
1637 printk(KERN_INFO "msm_camera: RELEASE %s\n",
1638 filep->f_path.dentry->d_name.name);
1639 rc = __msm_release(pmsm->sync);
1641 MSM_DRAIN_QUEUE(pmsm->sync, prev_frame_q);
1642 atomic_set(&pmsm->opened, 0);
1647 static int msm_unblock_poll_frame(struct msm_sync *sync)
1649 unsigned long flags;
1650 CDBG("msm_unblock_poll_frame\n");
1651 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1652 sync->unblock_poll_frame = 1;
1653 wake_up(&sync->prev_frame_wait);
1654 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1658 static unsigned int __msm_poll_frame(struct msm_sync *sync,
1660 struct poll_table_struct *pll_table)
1663 unsigned long flags;
1665 poll_wait(filep, &sync->prev_frame_wait, pll_table);
1667 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1668 if (!list_empty_careful(&sync->prev_frame_q))
1670 rc = POLLIN | POLLRDNORM;
1671 if (sync->unblock_poll_frame) {
1672 CDBG("%s: sync->unblock_poll_frame is true\n", __func__);
1674 sync->unblock_poll_frame = 0;
1676 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1681 static unsigned int msm_poll_frame(struct file *filep,
1682 struct poll_table_struct *pll_table)
1684 struct msm_device *pmsm = filep->private_data;
1685 return __msm_poll_frame(pmsm->sync, filep, pll_table);
1689 * This function executes in interrupt context.
1692 static void *msm_vfe_sync_alloc(int size,
1693 void *syncdata __attribute__((unused)))
1695 struct msm_queue_cmd *qcmd =
1696 kmalloc(sizeof(struct msm_queue_cmd) + size, GFP_ATOMIC);
1697 return qcmd ? qcmd + 1 : NULL;
1701 * This function executes in interrupt context.
1704 static void msm_vfe_sync(struct msm_vfe_resp *vdata,
1705 enum msm_queue qtype, void *syncdata)
1707 struct msm_queue_cmd *qcmd = NULL;
1708 struct msm_queue_cmd *qcmd_frame = NULL;
1709 struct msm_vfe_phy_info *fphy;
1711 unsigned long flags;
1712 struct msm_sync *sync = (struct msm_sync *)syncdata;
1714 pr_err("msm_camera: no context in dsp callback.\n");
1718 qcmd = ((struct msm_queue_cmd *)vdata) - 1;
1721 if (qtype == MSM_CAM_Q_VFE_MSG) {
1722 switch (vdata->type) {
1723 case VFE_MSG_OUTPUT1:
1724 case VFE_MSG_OUTPUT2:
1726 kmalloc(sizeof(struct msm_queue_cmd) +
1727 sizeof(struct msm_vfe_phy_info),
1731 fphy = (struct msm_vfe_phy_info *)(qcmd_frame + 1);
1734 qcmd_frame->type = MSM_CAM_Q_VFE_MSG;
1735 qcmd_frame->command = fphy;
1737 CDBG("qcmd_frame= 0x%x phy_y= 0x%x, phy_cbcr= 0x%x\n",
1738 (int) qcmd_frame, fphy->y_phy, fphy->cbcr_phy);
1740 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1741 list_add_tail(&qcmd_frame->list, &sync->prev_frame_q);
1742 wake_up(&sync->prev_frame_wait);
1743 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1744 CDBG("woke up frame thread\n");
1746 case VFE_MSG_SNAPSHOT:
1750 CDBG("snapshot pp = %d\n", sync->pict_pp);
1752 kmalloc(sizeof(struct msm_queue_cmd),
1756 qcmd_frame->type = MSM_CAM_Q_VFE_MSG;
1757 qcmd_frame->command = NULL;
1758 spin_lock_irqsave(&sync->pict_frame_q_lock,
1760 list_add_tail(&qcmd_frame->list, &sync->pict_frame_q);
1761 wake_up(&sync->pict_frame_wait);
1762 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1763 CDBG("woke up picture thread\n");
1766 CDBG("%s: qtype = %d not handled\n",
1767 __func__, vdata->type);
1772 qcmd->command = (void *)vdata;
1773 CDBG("vdata->type = %d\n", vdata->type);
1775 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
1776 list_add_tail(&qcmd->list, &sync->msg_event_q);
1777 wake_up(&sync->msg_event_wait);
1778 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
1779 CDBG("woke up config thread\n");
1786 static struct msm_vfe_callback msm_vfe_s = {
1787 .vfe_resp = msm_vfe_sync,
1788 .vfe_alloc = msm_vfe_sync_alloc,
1791 static int __msm_open(struct msm_sync *sync, const char *const apps_id)
1795 mutex_lock(&sync->lock);
1796 if (sync->apps_id && strcmp(sync->apps_id, apps_id)) {
1797 pr_err("msm_camera(%s): sensor %s is already opened for %s\n",
1799 sync->sdata->sensor_name,
1805 sync->apps_id = apps_id;
1807 if (!sync->opencnt) {
1809 msm_camvfe_fn_init(&sync->vfefn, sync);
1810 if (sync->vfefn.vfe_init) {
1811 rc = sync->vfefn.vfe_init(&msm_vfe_s,
1814 pr_err("vfe_init failed at %d\n", rc);
1817 rc = sync->sctrl.s_init(sync->sdata);
1819 pr_err("sensor init failed: %d\n", rc);
1823 pr_err("no sensor init func\n");
1829 INIT_HLIST_HEAD(&sync->frame);
1830 INIT_HLIST_HEAD(&sync->stats);
1831 sync->unblock_poll_frame = 0;
1837 mutex_unlock(&sync->lock);
1841 static int msm_open_common(struct inode *inode, struct file *filep,
1845 struct msm_device *pmsm =
1846 container_of(inode->i_cdev, struct msm_device, cdev);
1848 CDBG("msm_camera: open %s\n", filep->f_path.dentry->d_name.name);
1850 if (atomic_cmpxchg(&pmsm->opened, 0, 1) && once) {
1851 pr_err("msm_camera: %s is already opened.\n",
1852 filep->f_path.dentry->d_name.name);
1856 rc = nonseekable_open(inode, filep);
1858 pr_err("msm_open: nonseekable_open error %d\n", rc);
1862 rc = __msm_open(pmsm->sync, MSM_APPS_ID_PROP);
1866 filep->private_data = pmsm;
1868 CDBG("msm_open() open: rc = %d\n", rc);
1872 static int msm_open(struct inode *inode, struct file *filep)
1874 return msm_open_common(inode, filep, 1);
1877 static int msm_open_control(struct inode *inode, struct file *filep)
1881 struct msm_control_device *ctrl_pmsm =
1882 kmalloc(sizeof(struct msm_control_device), GFP_KERNEL);
1886 rc = msm_open_common(inode, filep, 0);
1892 ctrl_pmsm->pmsm = filep->private_data;
1893 filep->private_data = ctrl_pmsm;
1894 spin_lock_init(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock);
1895 INIT_LIST_HEAD(&ctrl_pmsm->ctrl_q.ctrl_status_q);
1896 init_waitqueue_head(&ctrl_pmsm->ctrl_q.ctrl_status_wait);
1898 CDBG("msm_open() open: rc = %d\n", rc);
1902 static int __msm_v4l2_control(struct msm_sync *sync,
1903 struct msm_ctrl_cmd *out)
1907 struct msm_queue_cmd *qcmd = NULL, *rcmd = NULL;
1908 struct msm_ctrl_cmd *ctrl;
1909 struct msm_control_device_queue FIXME;
1911 /* wake up config thread, 4 is for V4L2 application */
1912 qcmd = kmalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
1914 pr_err("msm_control: cannot allocate buffer\n");
1918 qcmd->type = MSM_CAM_Q_V4L2_REQ;
1919 qcmd->command = out;
1921 rcmd = __msm_control(sync, &FIXME, qcmd, out->timeout_ms);
1927 ctrl = (struct msm_ctrl_cmd *)(rcmd->command);
1928 /* FIXME: we should just set out->length = ctrl->length; */
1929 BUG_ON(out->length < ctrl->length);
1930 memcpy(out->value, ctrl->value, ctrl->length);
1934 CDBG("__msm_v4l2_control: end rc = %d\n", rc);
1938 static const struct file_operations msm_fops_config = {
1939 .owner = THIS_MODULE,
1941 .unlocked_ioctl = msm_ioctl_config,
1942 .release = msm_release_config,
1945 static const struct file_operations msm_fops_control = {
1946 .owner = THIS_MODULE,
1947 .open = msm_open_control,
1948 .unlocked_ioctl = msm_ioctl_control,
1949 .release = msm_release_control,
1952 static const struct file_operations msm_fops_frame = {
1953 .owner = THIS_MODULE,
1955 .unlocked_ioctl = msm_ioctl_frame,
1956 .release = msm_release_frame,
1957 .poll = msm_poll_frame,
1960 static int msm_setup_cdev(struct msm_device *msm,
1964 const struct file_operations *fops)
1968 struct device *device =
1969 device_create(msm_class, NULL,
1971 "%s%d", suffix, node);
1973 if (IS_ERR(device)) {
1974 rc = PTR_ERR(device);
1975 pr_err("msm_camera: error creating device: %d\n", rc);
1979 cdev_init(&msm->cdev, fops);
1980 msm->cdev.owner = THIS_MODULE;
1982 rc = cdev_add(&msm->cdev, devno, 1);
1984 pr_err("msm_camera: error adding cdev: %d\n", rc);
1985 device_destroy(msm_class, devno);
1992 static int msm_tear_down_cdev(struct msm_device *msm, dev_t devno)
1994 cdev_del(&msm->cdev);
1995 device_destroy(msm_class, devno);
1999 int msm_v4l2_register(struct msm_v4l2_driver *drv)
2001 /* FIXME: support multiple sensors */
2002 if (list_empty(&msm_sensors))
2005 drv->sync = list_first_entry(&msm_sensors, struct msm_sync, list);
2006 drv->open = __msm_open;
2007 drv->release = __msm_release;
2008 drv->ctrl = __msm_v4l2_control;
2009 drv->reg_pmem = __msm_register_pmem;
2010 drv->get_frame = __msm_get_frame;
2011 drv->put_frame = __msm_put_frame_buf;
2012 drv->get_pict = __msm_get_pic;
2013 drv->drv_poll = __msm_poll_frame;
2017 EXPORT_SYMBOL(msm_v4l2_register);
2019 int msm_v4l2_unregister(struct msm_v4l2_driver *drv)
2024 EXPORT_SYMBOL(msm_v4l2_unregister);
2026 static int msm_sync_init(struct msm_sync *sync,
2027 struct platform_device *pdev,
2028 int (*sensor_probe)(const struct msm_camera_sensor_info *,
2029 struct msm_sensor_ctrl *))
2032 struct msm_sensor_ctrl sctrl;
2033 sync->sdata = pdev->dev.platform_data;
2035 spin_lock_init(&sync->msg_event_q_lock);
2036 INIT_LIST_HEAD(&sync->msg_event_q);
2037 init_waitqueue_head(&sync->msg_event_wait);
2039 spin_lock_init(&sync->prev_frame_q_lock);
2040 INIT_LIST_HEAD(&sync->prev_frame_q);
2041 init_waitqueue_head(&sync->prev_frame_wait);
2043 spin_lock_init(&sync->pict_frame_q_lock);
2044 INIT_LIST_HEAD(&sync->pict_frame_q);
2045 init_waitqueue_head(&sync->pict_frame_wait);
2047 rc = msm_camio_probe_on(pdev);
2050 rc = sensor_probe(sync->sdata, &sctrl);
2053 sync->sctrl = sctrl;
2055 msm_camio_probe_off(pdev);
2057 pr_err("msm_camera: failed to initialize %s\n",
2058 sync->sdata->sensor_name);
2063 mutex_init(&sync->lock);
2064 CDBG("initialized %s\n", sync->sdata->sensor_name);
2068 static int msm_sync_destroy(struct msm_sync *sync)
2073 static int msm_device_init(struct msm_device *pmsm,
2074 struct msm_sync *sync,
2077 int dev_num = 3 * node;
2078 int rc = msm_setup_cdev(pmsm, node,
2079 MKDEV(MAJOR(msm_devno), dev_num),
2080 "control", &msm_fops_control);
2082 pr_err("error creating control node: %d\n", rc);
2086 rc = msm_setup_cdev(pmsm + 1, node,
2087 MKDEV(MAJOR(msm_devno), dev_num + 1),
2088 "config", &msm_fops_config);
2090 pr_err("error creating config node: %d\n", rc);
2091 msm_tear_down_cdev(pmsm, MKDEV(MAJOR(msm_devno),
2096 rc = msm_setup_cdev(pmsm + 2, node,
2097 MKDEV(MAJOR(msm_devno), dev_num + 2),
2098 "frame", &msm_fops_frame);
2100 pr_err("error creating frame node: %d\n", rc);
2101 msm_tear_down_cdev(pmsm,
2102 MKDEV(MAJOR(msm_devno), dev_num));
2103 msm_tear_down_cdev(pmsm + 1,
2104 MKDEV(MAJOR(msm_devno), dev_num + 1));
2108 atomic_set(&pmsm[0].opened, 0);
2109 atomic_set(&pmsm[1].opened, 0);
2110 atomic_set(&pmsm[2].opened, 0);
2112 pmsm[0].sync = sync;
2113 pmsm[1].sync = sync;
2114 pmsm[2].sync = sync;
2119 int msm_camera_drv_start(struct platform_device *dev,
2120 int (*sensor_probe)(const struct msm_camera_sensor_info *,
2121 struct msm_sensor_ctrl *))
2123 struct msm_device *pmsm = NULL;
2124 struct msm_sync *sync;
2126 static int camera_node;
2128 if (camera_node >= MSM_MAX_CAMERA_SENSORS) {
2129 pr_err("msm_camera: too many camera sensors\n");
2134 /* There are three device nodes per sensor */
2135 rc = alloc_chrdev_region(&msm_devno, 0,
2136 3 * MSM_MAX_CAMERA_SENSORS,
2139 pr_err("msm_camera: failed to allocate chrdev: %d\n",
2144 msm_class = class_create(THIS_MODULE, "msm_camera");
2145 if (IS_ERR(msm_class)) {
2146 rc = PTR_ERR(msm_class);
2147 pr_err("msm_camera: create device class failed: %d\n",
2153 pmsm = kzalloc(sizeof(struct msm_device) * 3 +
2154 sizeof(struct msm_sync), GFP_ATOMIC);
2157 sync = (struct msm_sync *)(pmsm + 3);
2159 rc = msm_sync_init(sync, dev, sensor_probe);
2165 CDBG("setting camera node %d\n", camera_node);
2166 rc = msm_device_init(pmsm, sync, camera_node);
2168 msm_sync_destroy(sync);
2174 list_add(&sync->list, &msm_sensors);
2177 EXPORT_SYMBOL(msm_camera_drv_start);