2 * Copyright (C) 2008-2009 QUALCOMM Incorporated.
5 //FIXME: most allocations need not be GFP_ATOMIC
6 /* FIXME: management of mutexes */
7 /* FIXME: msm_pmem_region_lookup return values */
8 /* FIXME: way too many copy to/from user */
9 /* FIXME: does region->active mean free */
10 /* FIXME: check limits on command lenghts passed from userspace */
11 /* FIXME: __msm_release: which queues should we flush when opencnt != 0 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <mach/board.h>
20 #include <linux/list.h>
21 #include <linux/uaccess.h>
22 #include <linux/android_pmem.h>
23 #include <linux/poll.h>
24 #include <media/msm_camera.h>
25 #include <mach/camera.h>
27 #define MSM_MAX_CAMERA_SENSORS 5
29 #define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
30 __func__, __LINE__, ((to) ? "to" : "from"))
31 #define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
32 #define ERR_COPY_TO_USER() ERR_USER_COPY(1)
34 static struct class *msm_class;
35 static dev_t msm_devno;
36 static LIST_HEAD(msm_sensors);
38 #define __CONTAINS(r, v, l, field) ({ \
41 typeof(v) __e = __v + l; \
42 int res = __v >= __r->field && \
43 __e <= __r->field + __r->len; \
47 #define CONTAINS(r1, r2, field) ({ \
48 typeof(r2) __r2 = r2; \
49 __CONTAINS(r1, __r2->field, __r2->len, field); \
52 #define IN_RANGE(r, v, field) ({ \
55 int res = ((__vv >= __r->field) && \
56 (__vv < (__r->field + __r->len))); \
60 #define OVERLAPS(r1, r2, field) ({ \
61 typeof(r1) __r1 = r1; \
62 typeof(r2) __r2 = r2; \
63 typeof(__r2->field) __v = __r2->field; \
64 typeof(__v) __e = __v + __r2->len - 1; \
65 int res = (IN_RANGE(__r1, __v, field) || \
66 IN_RANGE(__r1, __e, field)); \
70 #define MSM_DRAIN_QUEUE_NOSYNC(sync, name) do { \
71 struct msm_queue_cmd *qcmd = NULL; \
72 CDBG("%s: draining queue "#name"\n", __func__); \
73 while (!list_empty(&(sync)->name)) { \
74 qcmd = list_first_entry(&(sync)->name, \
75 struct msm_queue_cmd, list); \
76 list_del_init(&qcmd->list); \
81 #define MSM_DRAIN_QUEUE(sync, name) do { \
82 unsigned long flags; \
83 spin_lock_irqsave(&(sync)->name##_lock, flags); \
84 MSM_DRAIN_QUEUE_NOSYNC(sync, name); \
85 spin_unlock_irqrestore(&(sync)->name##_lock, flags); \
88 static int check_overlap(struct hlist_head *ptype,
92 struct msm_pmem_region *region;
93 struct msm_pmem_region t = { .paddr = paddr, .len = len };
94 struct hlist_node *node;
96 hlist_for_each_entry(region, node, ptype, list) {
97 if (CONTAINS(region, &t, paddr) ||
98 CONTAINS(&t, region, paddr) ||
99 OVERLAPS(region, &t, paddr)) {
101 " region (PHYS %p len %ld)"
102 " clashes with registered region"
103 " (paddr %p len %ld)\n",
104 (void *)t.paddr, t.len,
105 (void *)region->paddr, region->len);
113 static int msm_pmem_table_add(struct hlist_head *ptype,
114 struct msm_pmem_info *info)
118 unsigned long vstart;
121 struct msm_pmem_region *region;
123 rc = get_pmem_file(info->fd, &paddr, &vstart, &len, &file);
125 pr_err("msm_pmem_table_add: get_pmem_file fd %d error %d\n",
130 if (check_overlap(ptype, paddr, len) < 0)
133 CDBG("%s: type = %d, paddr = 0x%lx, vaddr = 0x%lx\n",
135 info->type, paddr, (unsigned long)info->vaddr);
137 region = kmalloc(sizeof(*region), GFP_KERNEL);
141 INIT_HLIST_NODE(®ion->list);
143 region->type = info->type;
144 region->vaddr = info->vaddr;
145 region->paddr = paddr;
148 region->y_off = info->y_off;
149 region->cbcr_off = info->cbcr_off;
150 region->fd = info->fd;
151 region->active = info->active;
153 hlist_add_head(&(region->list), ptype);
158 /* return of 0 means failure */
159 static uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
160 int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount)
162 struct msm_pmem_region *region;
163 struct msm_pmem_region *regptr;
164 struct hlist_node *node, *n;
170 hlist_for_each_entry_safe(region, node, n, ptype, list) {
171 if (region->type == pmem_type && region->active) {
183 static unsigned long msm_pmem_frame_ptov_lookup(struct msm_sync *sync,
184 unsigned long pyaddr,
185 unsigned long pcbcraddr,
186 uint32_t *yoff, uint32_t *cbcroff, int *fd)
188 struct msm_pmem_region *region;
189 struct hlist_node *node, *n;
191 hlist_for_each_entry_safe(region, node, n, &sync->frame, list) {
192 if (pyaddr == (region->paddr + region->y_off) &&
193 pcbcraddr == (region->paddr +
196 /* offset since we could pass vaddr inside
197 * a registerd pmem buffer
199 *yoff = region->y_off;
200 *cbcroff = region->cbcr_off;
203 return (unsigned long)(region->vaddr);
210 static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync,
211 unsigned long addr, int *fd)
213 struct msm_pmem_region *region;
214 struct hlist_node *node, *n;
216 hlist_for_each_entry_safe(region, node, n, &sync->stats, list) {
217 if (addr == region->paddr && region->active) {
218 /* offset since we could pass vaddr inside a
219 * registered pmem buffer */
222 return (unsigned long)(region->vaddr);
229 static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync *sync,
230 unsigned long buffer,
231 uint32_t yoff, uint32_t cbcroff, int fd)
233 struct msm_pmem_region *region;
234 struct hlist_node *node, *n;
236 hlist_for_each_entry_safe(region,
237 node, n, &sync->frame, list) {
238 if (((unsigned long)(region->vaddr) == buffer) &&
239 (region->y_off == yoff) &&
240 (region->cbcr_off == cbcroff) &&
241 (region->fd == fd) &&
242 (region->active == 0)) {
245 return region->paddr;
252 static unsigned long msm_pmem_stats_vtop_lookup(
253 struct msm_sync *sync,
254 unsigned long buffer,
257 struct msm_pmem_region *region;
258 struct hlist_node *node, *n;
260 hlist_for_each_entry_safe(region, node, n, &sync->stats, list) {
261 if (((unsigned long)(region->vaddr) == buffer) &&
262 (region->fd == fd) && region->active == 0) {
264 return region->paddr;
271 static int __msm_pmem_table_del(struct msm_sync *sync,
272 struct msm_pmem_info *pinfo)
275 struct msm_pmem_region *region;
276 struct hlist_node *node, *n;
278 switch (pinfo->type) {
279 case MSM_PMEM_OUTPUT1:
280 case MSM_PMEM_OUTPUT2:
281 case MSM_PMEM_THUMBAIL:
282 case MSM_PMEM_MAINIMG:
283 case MSM_PMEM_RAW_MAINIMG:
284 hlist_for_each_entry_safe(region, node, n,
285 &sync->frame, list) {
287 if (pinfo->type == region->type &&
288 pinfo->vaddr == region->vaddr &&
289 pinfo->fd == region->fd) {
291 put_pmem_file(region->file);
297 case MSM_PMEM_AEC_AWB:
299 hlist_for_each_entry_safe(region, node, n,
300 &sync->stats, list) {
302 if (pinfo->type == region->type &&
303 pinfo->vaddr == region->vaddr &&
304 pinfo->fd == region->fd) {
306 put_pmem_file(region->file);
320 static int msm_pmem_table_del(struct msm_sync *sync, void __user *arg)
322 struct msm_pmem_info info;
324 if (copy_from_user(&info, arg, sizeof(info))) {
325 ERR_COPY_FROM_USER();
329 return __msm_pmem_table_del(sync, &info);
332 static int __msm_get_frame(struct msm_sync *sync,
333 struct msm_frame *frame)
338 struct msm_queue_cmd *qcmd = NULL;
339 struct msm_vfe_phy_info *pphy;
341 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
342 if (!list_empty(&sync->prev_frame_q)) {
343 qcmd = list_first_entry(&sync->prev_frame_q,
344 struct msm_queue_cmd, list);
345 list_del_init(&qcmd->list);
347 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
350 pr_err("%s: no preview frame.\n", __func__);
354 pphy = (struct msm_vfe_phy_info *)(qcmd->command);
357 msm_pmem_frame_ptov_lookup(sync,
359 pphy->cbcr_phy, &(frame->y_off),
360 &(frame->cbcr_off), &(frame->fd));
361 if (!frame->buffer) {
362 pr_err("%s: cannot get frame, invalid lookup address "
363 "y=%x cbcr=%x offset=%d\n",
371 CDBG("__msm_get_frame: y=0x%x, cbcr=0x%x, qcmd=0x%x, virt_addr=0x%x\n",
372 pphy->y_phy, pphy->cbcr_phy, (int) qcmd, (int) frame->buffer);
378 static int msm_get_frame(struct msm_sync *sync, void __user *arg)
381 struct msm_frame frame;
383 if (copy_from_user(&frame,
385 sizeof(struct msm_frame))) {
386 ERR_COPY_FROM_USER();
390 rc = __msm_get_frame(sync, &frame);
395 if (frame.croplen > sync->croplen) {
396 pr_err("msm_get_frame: invalid frame croplen %d\n",
401 if (copy_to_user((void *)frame.cropinfo,
409 if (copy_to_user((void *)arg,
410 &frame, sizeof(struct msm_frame))) {
415 CDBG("Got frame!!!\n");
420 static int msm_enable_vfe(struct msm_sync *sync, void __user *arg)
423 struct camera_enable_cmd cfg;
425 if (copy_from_user(&cfg,
427 sizeof(struct camera_enable_cmd))) {
428 ERR_COPY_FROM_USER();
432 if (sync->vfefn.vfe_enable)
433 rc = sync->vfefn.vfe_enable(&cfg);
435 CDBG("msm_enable_vfe: returned rc = %d\n", rc);
439 static int msm_disable_vfe(struct msm_sync *sync, void __user *arg)
442 struct camera_enable_cmd cfg;
444 if (copy_from_user(&cfg,
446 sizeof(struct camera_enable_cmd))) {
447 ERR_COPY_FROM_USER();
451 if (sync->vfefn.vfe_disable)
452 rc = sync->vfefn.vfe_disable(&cfg, NULL);
454 CDBG("msm_disable_vfe: returned rc = %d\n", rc);
458 static struct msm_queue_cmd* __msm_control(struct msm_sync *sync,
459 struct msm_control_device_queue *queue,
460 struct msm_queue_cmd *qcmd,
466 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
467 list_add_tail(&qcmd->list, &sync->msg_event_q);
468 /* wake up config thread */
469 wake_up(&sync->msg_event_wait);
470 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
475 /* wait for config status */
476 rc = wait_event_interruptible_timeout(
477 queue->ctrl_status_wait,
478 !list_empty_careful(&queue->ctrl_status_q),
480 if (list_empty_careful(&queue->ctrl_status_q)) {
484 pr_err("msm_control: wait_event error %d\n", rc);
486 /* This is a bit scary. If we time out too early, we
487 * will free qcmd at the end of this function, and the
488 * dsp may do the same when it does respond, so we
489 * remove the message from the source queue.
491 pr_err("%s: error waiting for ctrl_status_q: %d\n",
493 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
494 list_del_init(&qcmd->list);
495 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
501 /* control command status is ready */
502 spin_lock_irqsave(&queue->ctrl_status_q_lock, flags);
503 BUG_ON(list_empty(&queue->ctrl_status_q));
504 qcmd = list_first_entry(&queue->ctrl_status_q,
505 struct msm_queue_cmd, list);
506 list_del_init(&qcmd->list);
507 spin_unlock_irqrestore(&queue->ctrl_status_q_lock, flags);
512 static int msm_control(struct msm_control_device *ctrl_pmsm,
518 struct msm_sync *sync = ctrl_pmsm->pmsm->sync;
519 struct msm_ctrl_cmd udata, *ctrlcmd;
520 struct msm_queue_cmd *qcmd = NULL, *qcmd_temp;
522 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
523 ERR_COPY_FROM_USER();
528 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
529 sizeof(struct msm_ctrl_cmd) + udata.length,
532 pr_err("msm_control: cannot allocate buffer\n");
537 qcmd->type = MSM_CAM_Q_CTRL;
538 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
540 ctrlcmd->value = ctrlcmd + 1;
543 if (copy_from_user(ctrlcmd->value,
544 udata.value, udata.length)) {
545 ERR_COPY_FROM_USER();
552 /* qcmd will be set to NULL */
553 qcmd = __msm_control(sync, NULL, qcmd, 0);
557 qcmd_temp = __msm_control(sync,
559 qcmd, MAX_SCHEDULE_TIMEOUT);
561 if (IS_ERR(qcmd_temp)) {
562 rc = PTR_ERR(qcmd_temp);
568 void __user *to = udata.value;
569 udata = *(struct msm_ctrl_cmd *)qcmd->command;
570 if (udata.length > 0) {
581 if (copy_to_user((void *)arg, &udata,
582 sizeof(struct msm_ctrl_cmd))) {
590 /* Note: if we get here as a result of an error, we will free the
591 * qcmd that we kmalloc() in this function. When we come here as
592 * a result of a successful completion, we are freeing the qcmd that
593 * we dequeued from queue->ctrl_status_q.
598 CDBG("msm_control: end rc = %d\n", rc);
602 static int msm_get_stats(struct msm_sync *sync, void __user *arg)
608 struct msm_stats_event_ctrl se;
610 struct msm_queue_cmd *qcmd = NULL;
611 struct msm_ctrl_cmd *ctrl = NULL;
612 struct msm_vfe_resp *data = NULL;
613 struct msm_stats_buf stats;
615 if (copy_from_user(&se, arg,
616 sizeof(struct msm_stats_event_ctrl))) {
617 ERR_COPY_FROM_USER();
621 timeout = (int)se.timeout_ms;
623 CDBG("msm_get_stats timeout %d\n", timeout);
624 rc = wait_event_interruptible_timeout(
625 sync->msg_event_wait,
626 !list_empty_careful(&sync->msg_event_q),
627 msecs_to_jiffies(timeout));
628 if (list_empty_careful(&sync->msg_event_q)) {
632 pr_err("msm_get_stats error %d\n", rc);
636 CDBG("msm_get_stats returned from wait: %d\n", rc);
638 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
639 BUG_ON(list_empty(&sync->msg_event_q));
640 qcmd = list_first_entry(&sync->msg_event_q,
641 struct msm_queue_cmd, list);
642 list_del_init(&qcmd->list);
643 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
645 CDBG("=== received from DSP === %d\n", qcmd->type);
647 switch (qcmd->type) {
648 case MSM_CAM_Q_VFE_EVT:
649 case MSM_CAM_Q_VFE_MSG:
650 data = (struct msm_vfe_resp *)(qcmd->command);
652 /* adsp event and message */
653 se.resptype = MSM_CAM_RESP_STAT_EVT_MSG;
655 /* 0 - msg from aDSP, 1 - event from mARM */
656 se.stats_event.type = data->evt_msg.type;
657 se.stats_event.msg_id = data->evt_msg.msg_id;
658 se.stats_event.len = data->evt_msg.len;
660 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
661 CDBG("length = %d\n", se.stats_event.len);
662 CDBG("msg_id = %d\n", se.stats_event.msg_id);
664 if ((data->type == VFE_MSG_STATS_AF) ||
665 (data->type == VFE_MSG_STATS_WE)) {
668 msm_pmem_stats_ptov_lookup(sync,
672 pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
678 if (copy_to_user((void *)(se.stats_event.data),
680 sizeof(struct msm_stats_buf))) {
685 } else if ((data->evt_msg.len > 0) &&
686 (data->type == VFE_MSG_GENERAL)) {
687 if (copy_to_user((void *)(se.stats_event.data),
689 data->evt_msg.len)) {
693 } else if (data->type == VFE_MSG_OUTPUT1 ||
694 data->type == VFE_MSG_OUTPUT2) {
695 if (copy_to_user((void *)(se.stats_event.data),
701 } else if (data->type == VFE_MSG_SNAPSHOT && sync->pict_pp) {
702 struct msm_postproc buf;
703 struct msm_pmem_region region;
704 buf.fmnum = msm_pmem_region_lookup(&sync->frame,
707 if (buf.fmnum == 1) {
708 buf.fmain.buffer = (unsigned long)region.vaddr;
709 buf.fmain.y_off = region.y_off;
710 buf.fmain.cbcr_off = region.cbcr_off;
711 buf.fmain.fd = region.fd;
713 buf.fmnum = msm_pmem_region_lookup(&sync->frame,
714 MSM_PMEM_RAW_MAINIMG,
716 if (buf.fmnum == 1) {
717 buf.fmain.path = MSM_FRAME_PREV_2;
719 (unsigned long)region.vaddr;
720 buf.fmain.fd = region.fd;
723 pr_err("%s: pmem lookup failed\n",
729 if (copy_to_user((void *)(se.stats_event.data), &buf,
735 CDBG("snapshot copy_to_user!\n");
740 /* control command from control thread */
741 ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
743 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
744 CDBG("length = %d\n", ctrl->length);
746 if (ctrl->length > 0) {
747 if (copy_to_user((void *)(se.ctrl_cmd.value),
756 se.resptype = MSM_CAM_RESP_CTRL;
758 /* what to control */
759 se.ctrl_cmd.type = ctrl->type;
760 se.ctrl_cmd.length = ctrl->length;
761 se.ctrl_cmd.resp_fd = ctrl->resp_fd;
764 case MSM_CAM_Q_V4L2_REQ:
765 /* control command from v4l2 client */
766 ctrl = (struct msm_ctrl_cmd *)(qcmd->command);
768 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd->type);
769 CDBG("length = %d\n", ctrl->length);
771 if (ctrl->length > 0) {
772 if (copy_to_user((void *)(se.ctrl_cmd.value),
773 ctrl->value, ctrl->length)) {
780 /* 2 tells config thread this is v4l2 request */
781 se.resptype = MSM_CAM_RESP_V4L2;
783 /* what to control */
784 se.ctrl_cmd.type = ctrl->type;
785 se.ctrl_cmd.length = ctrl->length;
791 } /* switch qcmd->type */
793 if (copy_to_user((void *)arg, &se, sizeof(se))) {
802 CDBG("msm_get_stats: %d\n", rc);
806 static int msm_ctrl_cmd_done(struct msm_control_device *ctrl_pmsm,
812 struct msm_ctrl_cmd udata, *ctrlcmd;
813 struct msm_queue_cmd *qcmd = NULL;
815 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
816 ERR_COPY_FROM_USER();
821 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
822 sizeof(struct msm_ctrl_cmd) + udata.length,
829 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
831 if (udata.length > 0) {
832 ctrlcmd->value = ctrlcmd + 1;
833 if (copy_from_user(ctrlcmd->value,
836 ERR_COPY_FROM_USER();
842 else ctrlcmd->value = NULL;
845 CDBG("msm_ctrl_cmd_done: end rc = %d\n", rc);
847 /* wake up control thread */
848 spin_lock_irqsave(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock, flags);
849 list_add_tail(&qcmd->list, &ctrl_pmsm->ctrl_q.ctrl_status_q);
850 wake_up(&ctrl_pmsm->ctrl_q.ctrl_status_wait);
851 spin_unlock_irqrestore(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock, flags);
857 static int msm_config_vfe(struct msm_sync *sync, void __user *arg)
859 struct msm_vfe_cfg_cmd cfgcmd;
860 struct msm_pmem_region region[8];
861 struct axidata axi_data;
865 memset(&axi_data, 0, sizeof(axi_data));
867 if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
868 ERR_COPY_FROM_USER();
872 switch(cfgcmd.cmd_type) {
873 case CMD_STATS_ENABLE:
875 msm_pmem_region_lookup(&sync->stats,
876 MSM_PMEM_AEC_AWB, ®ion[0],
877 NUM_WB_EXP_STAT_OUTPUT_BUFFERS);
878 if (!axi_data.bufnum1) {
879 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
882 axi_data.region = ®ion[0];
885 case CMD_STATS_AF_ENABLE:
887 msm_pmem_region_lookup(&sync->stats,
888 MSM_PMEM_AF, ®ion[0],
889 NUM_AF_STAT_OUTPUT_BUFFERS);
890 if (!axi_data.bufnum1) {
891 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
894 axi_data.region = ®ion[0];
898 case CMD_STATS_DISABLE:
901 pr_err("%s: unknown command type %d\n",
902 __FUNCTION__, cfgcmd.cmd_type);
907 if (sync->vfefn.vfe_config)
908 rc = sync->vfefn.vfe_config(&cfgcmd, data);
913 static int msm_frame_axi_cfg(struct msm_sync *sync,
914 struct msm_vfe_cfg_cmd *cfgcmd)
917 struct axidata axi_data;
918 void *data = &axi_data;
919 struct msm_pmem_region region[8];
922 memset(&axi_data, 0, sizeof(axi_data));
924 switch (cfgcmd->cmd_type) {
925 case CMD_AXI_CFG_OUT1:
926 pmem_type = MSM_PMEM_OUTPUT1;
928 msm_pmem_region_lookup(&sync->frame, pmem_type,
930 if (!axi_data.bufnum1) {
931 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
936 case CMD_AXI_CFG_OUT2:
937 pmem_type = MSM_PMEM_OUTPUT2;
939 msm_pmem_region_lookup(&sync->frame, pmem_type,
941 if (!axi_data.bufnum2) {
942 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
947 case CMD_AXI_CFG_SNAP_O1_AND_O2:
948 pmem_type = MSM_PMEM_THUMBAIL;
950 msm_pmem_region_lookup(&sync->frame, pmem_type,
952 if (!axi_data.bufnum1) {
953 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
957 pmem_type = MSM_PMEM_MAINIMG;
959 msm_pmem_region_lookup(&sync->frame, pmem_type,
960 ®ion[axi_data.bufnum1], 8);
961 if (!axi_data.bufnum2) {
962 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
967 case CMD_RAW_PICT_AXI_CFG:
968 pmem_type = MSM_PMEM_RAW_MAINIMG;
970 msm_pmem_region_lookup(&sync->frame, pmem_type,
972 if (!axi_data.bufnum2) {
973 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
983 pr_err("%s: unknown command type %d\n",
984 __FUNCTION__, cfgcmd->cmd_type);
988 axi_data.region = ®ion[0];
990 /* send the AXI configuration command to driver */
991 if (sync->vfefn.vfe_config)
992 rc = sync->vfefn.vfe_config(cfgcmd, data);
997 static int msm_get_sensor_info(struct msm_sync *sync, void __user *arg)
1000 struct msm_camsensor_info info;
1001 struct msm_camera_sensor_info *sdata;
1003 if (copy_from_user(&info,
1005 sizeof(struct msm_camsensor_info))) {
1006 ERR_COPY_FROM_USER();
1010 sdata = sync->pdev->dev.platform_data;
1011 CDBG("sensor_name %s\n", sdata->sensor_name);
1013 memcpy(&info.name[0],
1016 info.flash_enabled = sdata->flash_type != MSM_CAMERA_FLASH_NONE;
1018 /* copy back to user space */
1019 if (copy_to_user((void *)arg,
1021 sizeof(struct msm_camsensor_info))) {
1029 static int __msm_put_frame_buf(struct msm_sync *sync,
1030 struct msm_frame *pb)
1033 struct msm_vfe_cfg_cmd cfgcmd;
1037 pphy = msm_pmem_frame_vtop_lookup(sync,
1039 pb->y_off, pb->cbcr_off, pb->fd);
1042 CDBG("rel: vaddr = 0x%lx, paddr = 0x%lx\n",
1044 cfgcmd.cmd_type = CMD_FRAME_BUF_RELEASE;
1045 cfgcmd.value = (void *)pb;
1046 if (sync->vfefn.vfe_config)
1047 rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
1049 pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
1057 static int msm_put_frame_buffer(struct msm_sync *sync, void __user *arg)
1059 struct msm_frame buf_t;
1061 if (copy_from_user(&buf_t,
1063 sizeof(struct msm_frame))) {
1064 ERR_COPY_FROM_USER();
1068 return __msm_put_frame_buf(sync, &buf_t);
1071 static int __msm_register_pmem(struct msm_sync *sync,
1072 struct msm_pmem_info *pinfo)
1076 switch (pinfo->type) {
1077 case MSM_PMEM_OUTPUT1:
1078 case MSM_PMEM_OUTPUT2:
1079 case MSM_PMEM_THUMBAIL:
1080 case MSM_PMEM_MAINIMG:
1081 case MSM_PMEM_RAW_MAINIMG:
1082 rc = msm_pmem_table_add(&sync->frame, pinfo);
1085 case MSM_PMEM_AEC_AWB:
1087 rc = msm_pmem_table_add(&sync->stats, pinfo);
1098 static int msm_register_pmem(struct msm_sync *sync, void __user *arg)
1100 struct msm_pmem_info info;
1102 if (copy_from_user(&info, arg, sizeof(info))) {
1103 ERR_COPY_FROM_USER();
1107 return __msm_register_pmem(sync, &info);
1110 static int msm_stats_axi_cfg(struct msm_sync *sync,
1111 struct msm_vfe_cfg_cmd *cfgcmd)
1114 struct axidata axi_data;
1115 void *data = &axi_data;
1117 struct msm_pmem_region region[3];
1118 int pmem_type = MSM_PMEM_MAX;
1120 memset(&axi_data, 0, sizeof(axi_data));
1122 switch (cfgcmd->cmd_type) {
1123 case CMD_STATS_AXI_CFG:
1124 pmem_type = MSM_PMEM_AEC_AWB;
1126 case CMD_STATS_AF_AXI_CFG:
1127 pmem_type = MSM_PMEM_AF;
1133 pr_err("%s: unknown command type %d\n",
1134 __FUNCTION__, cfgcmd->cmd_type);
1138 if (cfgcmd->cmd_type != CMD_GENERAL) {
1140 msm_pmem_region_lookup(&sync->stats, pmem_type,
1141 ®ion[0], NUM_WB_EXP_STAT_OUTPUT_BUFFERS);
1142 if (!axi_data.bufnum1) {
1143 pr_err("%s: pmem region lookup error\n", __FUNCTION__);
1146 axi_data.region = ®ion[0];
1149 /* send the AEC/AWB STATS configuration command to driver */
1150 if (sync->vfefn.vfe_config)
1151 rc = sync->vfefn.vfe_config(cfgcmd, &axi_data);
1156 static int msm_put_stats_buffer(struct msm_sync *sync, void __user *arg)
1160 struct msm_stats_buf buf;
1162 struct msm_vfe_cfg_cmd cfgcmd;
1164 if (copy_from_user(&buf, arg,
1165 sizeof(struct msm_stats_buf))) {
1166 ERR_COPY_FROM_USER();
1170 CDBG("msm_put_stats_buffer\n");
1171 pphy = msm_pmem_stats_vtop_lookup(sync, buf.buffer, buf.fd);
1174 if (buf.type == STAT_AEAW)
1175 cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
1176 else if (buf.type == STAT_AF)
1177 cfgcmd.cmd_type = CMD_STATS_AF_BUF_RELEASE;
1179 pr_err("%s: invalid buf type %d\n",
1186 cfgcmd.value = (void *)&buf;
1188 if (sync->vfefn.vfe_config) {
1189 rc = sync->vfefn.vfe_config(&cfgcmd, &pphy);
1191 pr_err("msm_put_stats_buffer: "\
1192 "vfe_config err %d\n", rc);
1194 pr_err("msm_put_stats_buffer: vfe_config is NULL\n");
1196 pr_err("msm_put_stats_buffer: NULL physical address\n");
1204 static int msm_axi_config(struct msm_sync *sync, void __user *arg)
1206 struct msm_vfe_cfg_cmd cfgcmd;
1208 if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
1209 ERR_COPY_FROM_USER();
1213 switch (cfgcmd.cmd_type) {
1214 case CMD_AXI_CFG_OUT1:
1215 case CMD_AXI_CFG_OUT2:
1216 case CMD_AXI_CFG_SNAP_O1_AND_O2:
1217 case CMD_RAW_PICT_AXI_CFG:
1218 return msm_frame_axi_cfg(sync, &cfgcmd);
1220 case CMD_STATS_AXI_CFG:
1221 case CMD_STATS_AF_AXI_CFG:
1222 return msm_stats_axi_cfg(sync, &cfgcmd);
1225 pr_err("%s: unknown command type %d\n",
1234 static int __msm_get_pic(struct msm_sync *sync, struct msm_ctrl_cmd *ctrl)
1236 unsigned long flags;
1240 struct msm_queue_cmd *qcmd = NULL;
1242 tm = (int)ctrl->timeout_ms;
1244 rc = wait_event_interruptible_timeout(
1245 sync->pict_frame_wait,
1246 !list_empty_careful(&sync->pict_frame_q),
1247 msecs_to_jiffies(tm));
1248 if (list_empty_careful(&sync->pict_frame_q)) {
1252 pr_err("msm_camera_get_picture, rc = %d\n", rc);
1257 spin_lock_irqsave(&sync->pict_frame_q_lock, flags);
1258 BUG_ON(list_empty(&sync->pict_frame_q));
1259 qcmd = list_first_entry(&sync->pict_frame_q,
1260 struct msm_queue_cmd, list);
1261 list_del_init(&qcmd->list);
1262 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1264 if (qcmd->command != NULL) {
1265 struct msm_ctrl_cmd *q =
1266 (struct msm_ctrl_cmd *)qcmd->command;
1267 ctrl->type = q->type;
1268 ctrl->status = q->status;
1278 static int msm_get_pic(struct msm_sync *sync, void __user *arg)
1280 struct msm_ctrl_cmd ctrlcmd_t;
1283 if (copy_from_user(&ctrlcmd_t,
1285 sizeof(struct msm_ctrl_cmd))) {
1286 ERR_COPY_FROM_USER();
1290 rc = __msm_get_pic(sync, &ctrlcmd_t);
1294 if (sync->croplen) {
1295 if (ctrlcmd_t.length < sync->croplen) {
1296 pr_err("msm_get_pic: invalid len %d\n",
1300 if (copy_to_user(ctrlcmd_t.value,
1308 if (copy_to_user((void *)arg,
1310 sizeof(struct msm_ctrl_cmd))) {
1317 static int msm_set_crop(struct msm_sync *sync, void __user *arg)
1319 struct crop_info crop;
1321 if (copy_from_user(&crop,
1323 sizeof(struct crop_info))) {
1324 ERR_COPY_FROM_USER();
1328 if (!sync->croplen) {
1329 sync->cropinfo = kmalloc(crop.len, GFP_KERNEL);
1330 if (!sync->cropinfo)
1332 } else if (sync->croplen < crop.len)
1335 if (copy_from_user(sync->cropinfo,
1338 ERR_COPY_FROM_USER();
1339 kfree(sync->cropinfo);
1343 sync->croplen = crop.len;
1348 static int msm_pict_pp_done(struct msm_sync *sync, void __user *arg)
1350 struct msm_ctrl_cmd udata;
1351 struct msm_ctrl_cmd *ctrlcmd = NULL;
1352 struct msm_queue_cmd *qcmd = NULL;
1353 unsigned long flags;
1359 if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) {
1360 ERR_COPY_FROM_USER();
1365 qcmd = kmalloc(sizeof(struct msm_queue_cmd) +
1366 sizeof(struct msm_ctrl_cmd),
1373 qcmd->type = MSM_CAM_Q_VFE_MSG;
1374 qcmd->command = ctrlcmd = (struct msm_ctrl_cmd *)(qcmd + 1);
1375 memset(ctrlcmd, 0, sizeof(struct msm_ctrl_cmd));
1376 ctrlcmd->type = udata.type;
1377 ctrlcmd->status = udata.status;
1379 spin_lock_irqsave(&sync->pict_frame_q_lock, flags);
1380 list_add_tail(&qcmd->list, &sync->pict_frame_q);
1381 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1382 wake_up(&sync->pict_frame_wait);
1388 static long msm_ioctl_common(struct msm_device *pmsm,
1392 CDBG("msm_ioctl_common\n");
1394 case MSM_CAM_IOCTL_REGISTER_PMEM:
1395 return msm_register_pmem(pmsm->sync, argp);
1396 case MSM_CAM_IOCTL_UNREGISTER_PMEM:
1397 return msm_pmem_table_del(pmsm->sync, argp);
1403 static long msm_ioctl_config(struct file *filep, unsigned int cmd,
1407 void __user *argp = (void __user *)arg;
1408 struct msm_device *pmsm = filep->private_data;
1410 CDBG("msm_ioctl_config cmd = %d\n", _IOC_NR(cmd));
1413 case MSM_CAM_IOCTL_GET_SENSOR_INFO:
1414 rc = msm_get_sensor_info(pmsm->sync, argp);
1417 case MSM_CAM_IOCTL_CONFIG_VFE:
1418 /* Coming from config thread for update */
1419 rc = msm_config_vfe(pmsm->sync, argp);
1422 case MSM_CAM_IOCTL_GET_STATS:
1423 /* Coming from config thread wait
1424 * for vfe statistics and control requests */
1425 rc = msm_get_stats(pmsm->sync, argp);
1428 case MSM_CAM_IOCTL_ENABLE_VFE:
1429 /* This request comes from control thread:
1430 * enable either QCAMTASK or VFETASK */
1431 rc = msm_enable_vfe(pmsm->sync, argp);
1434 case MSM_CAM_IOCTL_DISABLE_VFE:
1435 /* This request comes from control thread:
1436 * disable either QCAMTASK or VFETASK */
1437 rc = msm_disable_vfe(pmsm->sync, argp);
1440 case MSM_CAM_IOCTL_VFE_APPS_RESET:
1441 msm_camio_vfe_blk_reset();
1445 case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER:
1446 rc = msm_put_stats_buffer(pmsm->sync, argp);
1449 case MSM_CAM_IOCTL_AXI_CONFIG:
1450 rc = msm_axi_config(pmsm->sync, argp);
1453 case MSM_CAM_IOCTL_SET_CROP:
1454 rc = msm_set_crop(pmsm->sync, argp);
1457 case MSM_CAM_IOCTL_PICT_PP: {
1459 if (copy_from_user(&enable, argp, sizeof(enable))) {
1460 ERR_COPY_FROM_USER();
1463 pmsm->sync->pict_pp = enable;
1469 case MSM_CAM_IOCTL_PICT_PP_DONE:
1470 rc = msm_pict_pp_done(pmsm->sync, argp);
1473 case MSM_CAM_IOCTL_SENSOR_IO_CFG:
1474 rc = pmsm->sync->sctrl.s_config(argp);
1477 case MSM_CAM_IOCTL_FLASH_LED_CFG: {
1479 if (copy_from_user(&led_state, argp, sizeof(led_state))) {
1480 ERR_COPY_FROM_USER();
1483 rc = msm_camera_flash_set_led_state(led_state);
1488 rc = msm_ioctl_common(pmsm, cmd, argp);
1492 CDBG("msm_ioctl_config cmd = %d DONE\n", _IOC_NR(cmd));
1496 static int msm_unblock_poll_frame(struct msm_sync *);
1498 static long msm_ioctl_frame(struct file *filep, unsigned int cmd,
1502 void __user *argp = (void __user *)arg;
1503 struct msm_device *pmsm = filep->private_data;
1507 case MSM_CAM_IOCTL_GETFRAME:
1508 /* Coming from frame thread to get frame
1509 * after SELECT is done */
1510 rc = msm_get_frame(pmsm->sync, argp);
1512 case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER:
1513 rc = msm_put_frame_buffer(pmsm->sync, argp);
1515 case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME:
1516 rc = msm_unblock_poll_frame(pmsm->sync);
1526 static long msm_ioctl_control(struct file *filep, unsigned int cmd,
1530 void __user *argp = (void __user *)arg;
1531 struct msm_control_device *ctrl_pmsm = filep->private_data;
1532 struct msm_device *pmsm = ctrl_pmsm->pmsm;
1535 case MSM_CAM_IOCTL_CTRL_COMMAND:
1536 /* Coming from control thread, may need to wait for
1538 rc = msm_control(ctrl_pmsm, 1, argp);
1540 case MSM_CAM_IOCTL_CTRL_COMMAND_2:
1541 /* Sends a message, returns immediately */
1542 rc = msm_control(ctrl_pmsm, 0, argp);
1544 case MSM_CAM_IOCTL_CTRL_CMD_DONE:
1545 /* Config thread calls the control thread to notify it
1546 * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND.
1548 rc = msm_ctrl_cmd_done(ctrl_pmsm, argp);
1550 case MSM_CAM_IOCTL_GET_PICTURE:
1551 rc = msm_get_pic(pmsm->sync, argp);
1554 rc = msm_ioctl_common(pmsm, cmd, argp);
1561 static int __msm_release(struct msm_sync *sync)
1563 struct msm_pmem_region *region;
1564 struct hlist_node *hnode;
1565 struct hlist_node *n;
1567 mutex_lock(&sync->lock);
1571 if (!sync->opencnt) {
1572 /* need to clean up system resource */
1573 if (sync->vfefn.vfe_release)
1574 sync->vfefn.vfe_release(sync->pdev);
1576 if (sync->cropinfo) {
1577 kfree(sync->cropinfo);
1578 sync->cropinfo = NULL;
1582 hlist_for_each_entry_safe(region, hnode, n,
1583 &sync->frame, list) {
1585 put_pmem_file(region->file);
1589 hlist_for_each_entry_safe(region, hnode, n,
1590 &sync->stats, list) {
1592 put_pmem_file(region->file);
1596 MSM_DRAIN_QUEUE(sync, msg_event_q);
1597 MSM_DRAIN_QUEUE(sync, prev_frame_q);
1598 MSM_DRAIN_QUEUE(sync, pict_frame_q);
1600 sync->sctrl.s_release();
1602 sync->apps_id = NULL;
1603 CDBG("msm_release completed!\n");
1605 mutex_unlock(&sync->lock);
1610 static int msm_release_config(struct inode *node, struct file *filep)
1613 struct msm_device *pmsm = filep->private_data;
1614 printk("msm_camera: RELEASE %s\n", filep->f_path.dentry->d_name.name);
1615 rc = __msm_release(pmsm->sync);
1616 atomic_set(&pmsm->opened, 0);
1620 static int msm_release_control(struct inode *node, struct file *filep)
1623 struct msm_control_device *ctrl_pmsm = filep->private_data;
1624 struct msm_device *pmsm = ctrl_pmsm->pmsm;
1625 printk("msm_camera: RELEASE %s\n", filep->f_path.dentry->d_name.name);
1626 rc = __msm_release(pmsm->sync);
1628 MSM_DRAIN_QUEUE(&ctrl_pmsm->ctrl_q, ctrl_status_q);
1629 MSM_DRAIN_QUEUE(pmsm->sync, pict_frame_q);
1635 static int msm_release_frame(struct inode *node, struct file *filep)
1638 struct msm_device *pmsm = filep->private_data;
1639 printk("msm_camera: RELEASE %s\n", filep->f_path.dentry->d_name.name);
1640 rc = __msm_release(pmsm->sync);
1642 MSM_DRAIN_QUEUE(pmsm->sync, prev_frame_q);
1643 atomic_set(&pmsm->opened, 0);
1648 static int msm_unblock_poll_frame(struct msm_sync *sync)
1650 unsigned long flags;
1651 CDBG("msm_unblock_poll_frame\n");
1652 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1653 sync->unblock_poll_frame = 1;
1654 wake_up(&sync->prev_frame_wait);
1655 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1659 static unsigned int __msm_poll_frame(struct msm_sync *sync,
1661 struct poll_table_struct *pll_table)
1664 unsigned long flags;
1666 poll_wait(filep, &sync->prev_frame_wait, pll_table);
1668 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1669 if (!list_empty_careful(&sync->prev_frame_q))
1671 rc = POLLIN | POLLRDNORM;
1672 if (sync->unblock_poll_frame) {
1673 CDBG("%s: sync->unblock_poll_frame is true\n", __func__);
1675 sync->unblock_poll_frame = 0;
1677 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1682 static unsigned int msm_poll_frame(struct file *filep,
1683 struct poll_table_struct *pll_table)
1685 struct msm_device *pmsm = filep->private_data;
1686 return __msm_poll_frame(pmsm->sync, filep, pll_table);
1690 * This function executes in interrupt context.
1693 static void *msm_vfe_sync_alloc(int size,
1694 void *syncdata __attribute__((unused)))
1696 struct msm_queue_cmd *qcmd =
1697 kmalloc(sizeof(struct msm_queue_cmd) + size, GFP_ATOMIC);
1698 return qcmd ? qcmd + 1 : NULL;
1702 * This function executes in interrupt context.
1705 static void msm_vfe_sync(struct msm_vfe_resp *vdata,
1706 enum msm_queue qtype, void *syncdata)
1708 struct msm_queue_cmd *qcmd = NULL;
1709 struct msm_queue_cmd *qcmd_frame = NULL;
1710 struct msm_vfe_phy_info *fphy;
1712 unsigned long flags;
1713 struct msm_sync *sync = (struct msm_sync *)syncdata;
1715 pr_err("msm_camera: no context in dsp callback.\n");
1719 qcmd = ((struct msm_queue_cmd *)vdata) - 1;
1722 if (qtype == MSM_CAM_Q_VFE_MSG) {
1723 switch(vdata->type) {
1724 case VFE_MSG_OUTPUT1:
1725 case VFE_MSG_OUTPUT2:
1727 kmalloc(sizeof(struct msm_queue_cmd) +
1728 sizeof(struct msm_vfe_phy_info),
1732 fphy = (struct msm_vfe_phy_info *)(qcmd_frame + 1);
1735 qcmd_frame->type = MSM_CAM_Q_VFE_MSG;
1736 qcmd_frame->command = fphy;
1738 CDBG("qcmd_frame= 0x%x phy_y= 0x%x, phy_cbcr= 0x%x\n",
1739 (int) qcmd_frame, fphy->y_phy, fphy->cbcr_phy);
1741 spin_lock_irqsave(&sync->prev_frame_q_lock, flags);
1742 list_add_tail(&qcmd_frame->list, &sync->prev_frame_q);
1743 wake_up(&sync->prev_frame_wait);
1744 spin_unlock_irqrestore(&sync->prev_frame_q_lock, flags);
1745 CDBG("woke up frame thread\n");
1747 case VFE_MSG_SNAPSHOT:
1751 CDBG("snapshot pp = %d\n", sync->pict_pp);
1753 kmalloc(sizeof(struct msm_queue_cmd),
1757 qcmd_frame->type = MSM_CAM_Q_VFE_MSG;
1758 qcmd_frame->command = NULL;
1759 spin_lock_irqsave(&sync->pict_frame_q_lock,
1761 list_add_tail(&qcmd_frame->list, &sync->pict_frame_q);
1762 wake_up(&sync->pict_frame_wait);
1763 spin_unlock_irqrestore(&sync->pict_frame_q_lock, flags);
1764 CDBG("woke up picture thread\n");
1767 CDBG("%s: qtype = %d not handled\n",
1768 __func__, vdata->type);
1773 qcmd->command = (void *)vdata;
1774 CDBG("vdata->type = %d\n", vdata->type);
1776 spin_lock_irqsave(&sync->msg_event_q_lock, flags);
1777 list_add_tail(&qcmd->list, &sync->msg_event_q);
1778 wake_up(&sync->msg_event_wait);
1779 spin_unlock_irqrestore(&sync->msg_event_q_lock, flags);
1780 CDBG("woke up config thread\n");
1787 static struct msm_vfe_callback msm_vfe_s = {
1788 .vfe_resp = msm_vfe_sync,
1789 .vfe_alloc = msm_vfe_sync_alloc,
1792 static int __msm_open(struct msm_sync *sync, const char *const apps_id)
1796 mutex_lock(&sync->lock);
1797 if (sync->apps_id && strcmp(sync->apps_id, apps_id)) {
1798 pr_err("msm_camera(%s): sensor %s is already opened for %s\n",
1800 sync->sdata->sensor_name,
1806 sync->apps_id = apps_id;
1808 if (!sync->opencnt) {
1810 msm_camvfe_fn_init(&sync->vfefn, sync);
1811 if (sync->vfefn.vfe_init) {
1812 rc = sync->vfefn.vfe_init(&msm_vfe_s,
1815 pr_err("vfe_init failed at %d\n", rc);
1818 rc = sync->sctrl.s_init(sync->sdata);
1820 pr_err("sensor init failed: %d\n", rc);
1824 pr_err("no sensor init func\n");
1830 INIT_HLIST_HEAD(&sync->frame);
1831 INIT_HLIST_HEAD(&sync->stats);
1832 sync->unblock_poll_frame = 0;
1838 mutex_unlock(&sync->lock);
1842 static int msm_open_common(struct inode *inode, struct file *filep,
1846 struct msm_device *pmsm =
1847 container_of(inode->i_cdev, struct msm_device, cdev);
1849 CDBG("msm_camera: open %s\n", filep->f_path.dentry->d_name.name);
1851 if (atomic_cmpxchg(&pmsm->opened, 0, 1) && once) {
1852 pr_err("msm_camera: %s is already opened.\n",
1853 filep->f_path.dentry->d_name.name);
1857 rc = nonseekable_open(inode, filep);
1859 pr_err("msm_open: nonseekable_open error %d\n", rc);
1863 rc = __msm_open(pmsm->sync, MSM_APPS_ID_PROP);
1867 filep->private_data = pmsm;
1869 CDBG("msm_open() open: rc = %d\n", rc);
1873 static int msm_open(struct inode *inode, struct file *filep)
1875 return msm_open_common(inode, filep, 1);
1878 static int msm_open_control(struct inode *inode, struct file *filep)
1882 struct msm_control_device *ctrl_pmsm =
1883 kmalloc(sizeof(struct msm_control_device), GFP_KERNEL);
1887 rc = msm_open_common(inode, filep, 0);
1891 ctrl_pmsm->pmsm = filep->private_data;
1892 filep->private_data = ctrl_pmsm;
1893 spin_lock_init(&ctrl_pmsm->ctrl_q.ctrl_status_q_lock);
1894 INIT_LIST_HEAD(&ctrl_pmsm->ctrl_q.ctrl_status_q);
1895 init_waitqueue_head(&ctrl_pmsm->ctrl_q.ctrl_status_wait);
1897 CDBG("msm_open() open: rc = %d\n", rc);
1901 static int __msm_v4l2_control(struct msm_sync *sync,
1902 struct msm_ctrl_cmd *out)
1906 struct msm_queue_cmd *qcmd = NULL, *rcmd = NULL;
1907 struct msm_ctrl_cmd *ctrl;
1908 struct msm_control_device_queue FIXME;
1910 /* wake up config thread, 4 is for V4L2 application */
1911 qcmd = kmalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
1913 pr_err("msm_control: cannot allocate buffer\n");
1917 qcmd->type = MSM_CAM_Q_V4L2_REQ;
1918 qcmd->command = out;
1920 rcmd = __msm_control(sync, &FIXME, qcmd, out->timeout_ms);
1926 ctrl = (struct msm_ctrl_cmd *)(rcmd->command);
1927 /* FIXME: we should just set out->length = ctrl->length; */
1928 BUG_ON(out->length < ctrl->length);
1929 memcpy(out->value, ctrl->value, ctrl->length);
1932 if (rcmd) kfree(rcmd);
1933 CDBG("__msm_v4l2_control: end rc = %d\n", rc);
1937 static const struct file_operations msm_fops_config = {
1938 .owner = THIS_MODULE,
1940 .unlocked_ioctl = msm_ioctl_config,
1941 .release = msm_release_config,
1944 static const struct file_operations msm_fops_control = {
1945 .owner = THIS_MODULE,
1946 .open = msm_open_control,
1947 .unlocked_ioctl = msm_ioctl_control,
1948 .release = msm_release_control,
1951 static const struct file_operations msm_fops_frame = {
1952 .owner = THIS_MODULE,
1954 .unlocked_ioctl = msm_ioctl_frame,
1955 .release = msm_release_frame,
1956 .poll = msm_poll_frame,
1959 static int msm_setup_cdev(struct msm_device *msm,
1963 const struct file_operations *fops)
1967 struct device *device =
1968 device_create(msm_class, NULL,
1970 "%s%d", suffix, node);
1972 if (IS_ERR(device)) {
1973 rc = PTR_ERR(device);
1974 pr_err("msm_camera: error creating device: %d\n", rc);
1978 cdev_init(&msm->cdev, fops);
1979 msm->cdev.owner = THIS_MODULE;
1981 rc = cdev_add(&msm->cdev, devno, 1);
1983 pr_err("msm_camera: error adding cdev: %d\n", rc);
1984 device_destroy(msm_class, devno);
1991 static int msm_tear_down_cdev(struct msm_device *msm, dev_t devno)
1993 cdev_del(&msm->cdev);
1994 device_destroy(msm_class, devno);
1998 int msm_v4l2_register(struct msm_v4l2_driver *drv)
2000 /* FIXME: support multiple sensors */
2001 if (list_empty(&msm_sensors))
2004 drv->sync = list_first_entry(&msm_sensors, struct msm_sync, list);
2005 drv->open = __msm_open;
2006 drv->release = __msm_release;
2007 drv->ctrl = __msm_v4l2_control;
2008 drv->reg_pmem = __msm_register_pmem;
2009 drv->get_frame = __msm_get_frame;
2010 drv->put_frame = __msm_put_frame_buf;
2011 drv->get_pict = __msm_get_pic;
2012 drv->drv_poll = __msm_poll_frame;
2016 EXPORT_SYMBOL(msm_v4l2_register);
2018 int msm_v4l2_unregister(struct msm_v4l2_driver *drv)
2023 EXPORT_SYMBOL(msm_v4l2_unregister);
2025 static int msm_sync_init(struct msm_sync *sync,
2026 struct platform_device *pdev,
2027 int (*sensor_probe)(const struct msm_camera_sensor_info *,
2028 struct msm_sensor_ctrl *))
2031 struct msm_sensor_ctrl sctrl;
2032 sync->sdata = pdev->dev.platform_data;
2034 spin_lock_init(&sync->msg_event_q_lock);
2035 INIT_LIST_HEAD(&sync->msg_event_q);
2036 init_waitqueue_head(&sync->msg_event_wait);
2038 spin_lock_init(&sync->prev_frame_q_lock);
2039 INIT_LIST_HEAD(&sync->prev_frame_q);
2040 init_waitqueue_head(&sync->prev_frame_wait);
2042 spin_lock_init(&sync->pict_frame_q_lock);
2043 INIT_LIST_HEAD(&sync->pict_frame_q);
2044 init_waitqueue_head(&sync->pict_frame_wait);
2046 rc = msm_camio_probe_on(pdev);
2049 rc = sensor_probe(sync->sdata, &sctrl);
2052 sync->sctrl = sctrl;
2054 msm_camio_probe_off(pdev);
2056 pr_err("msm_camera: failed to initialize %s\n",
2057 sync->sdata->sensor_name);
2062 mutex_init(&sync->lock);
2063 CDBG("initialized %s\n", sync->sdata->sensor_name);
2067 static int msm_sync_destroy(struct msm_sync *sync)
2072 static int msm_device_init(struct msm_device *pmsm,
2073 struct msm_sync *sync,
2076 int dev_num = 3 * node;
2077 int rc = msm_setup_cdev(pmsm, node,
2078 MKDEV(MAJOR(msm_devno), dev_num),
2079 "control", &msm_fops_control);
2081 pr_err("error creating control node: %d\n", rc);
2085 rc = msm_setup_cdev(pmsm + 1, node,
2086 MKDEV(MAJOR(msm_devno), dev_num + 1),
2087 "config", &msm_fops_config);
2089 pr_err("error creating config node: %d\n", rc);
2090 msm_tear_down_cdev(pmsm, MKDEV(MAJOR(msm_devno),
2095 rc = msm_setup_cdev(pmsm + 2, node,
2096 MKDEV(MAJOR(msm_devno), dev_num + 2),
2097 "frame", &msm_fops_frame);
2099 pr_err("error creating frame node: %d\n", rc);
2100 msm_tear_down_cdev(pmsm,
2101 MKDEV(MAJOR(msm_devno), dev_num));
2102 msm_tear_down_cdev(pmsm + 1,
2103 MKDEV(MAJOR(msm_devno), dev_num + 1));
2107 atomic_set(&pmsm[0].opened, 0);
2108 atomic_set(&pmsm[1].opened, 0);
2109 atomic_set(&pmsm[2].opened, 0);
2111 pmsm[0].sync = sync;
2112 pmsm[1].sync = sync;
2113 pmsm[2].sync = sync;
2118 int msm_camera_drv_start(struct platform_device *dev,
2119 int (*sensor_probe)(const struct msm_camera_sensor_info *,
2120 struct msm_sensor_ctrl *))
2122 struct msm_device *pmsm = NULL;
2123 struct msm_sync *sync;
2125 static int camera_node;
2127 if (camera_node >= MSM_MAX_CAMERA_SENSORS) {
2128 pr_err("msm_camera: too many camera sensors\n");
2133 /* There are three device nodes per sensor */
2134 rc = alloc_chrdev_region(&msm_devno, 0,
2135 3 * MSM_MAX_CAMERA_SENSORS,
2138 pr_err("msm_camera: failed to allocate chrdev: %d\n",
2143 msm_class = class_create(THIS_MODULE, "msm_camera");
2144 if (IS_ERR(msm_class)) {
2145 rc = PTR_ERR(msm_class);
2146 pr_err("msm_camera: create device class failed: %d\n",
2152 pmsm = kzalloc(sizeof(struct msm_device) * 3 +
2153 sizeof(struct msm_sync), GFP_ATOMIC);
2156 sync = (struct msm_sync *)(pmsm + 3);
2158 rc = msm_sync_init(sync, dev, sensor_probe);
2164 CDBG("setting camera node %d\n", camera_node);
2165 rc = msm_device_init(pmsm, sync, camera_node);
2167 msm_sync_destroy(sync);
2173 list_add(&sync->list, &msm_sensors);
2176 EXPORT_SYMBOL(msm_camera_drv_start);