2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
52 * A structure of event.
54 * @base: base of event.
57 struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
63 * A structure of memory node.
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
72 struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
82 * A structure of ipp context.
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
102 static LIST_HEAD(exynos_drm_ippdrv_list);
103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
106 int exynos_platform_device_ipp_register(void)
108 struct platform_device *pdev;
110 if (exynos_drm_ipp_pdev)
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
115 return PTR_ERR(pdev);
117 exynos_drm_ipp_pdev = pdev;
122 void exynos_platform_device_ipp_unregister(void)
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
132 mutex_lock(&exynos_drm_ippdrv_lock);
133 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
134 mutex_unlock(&exynos_drm_ippdrv_lock);
139 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
141 mutex_lock(&exynos_drm_ippdrv_lock);
142 list_del(&ippdrv->drv_list);
143 mutex_unlock(&exynos_drm_ippdrv_lock);
148 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
153 /* do the allocation under our mutexlock */
155 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
164 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
167 idr_remove(id_idr, id);
171 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
175 DRM_DEBUG_KMS("id[%d]\n", id);
179 /* find object using handle */
180 obj = idr_find(id_idr, id);
182 DRM_ERROR("failed to find object.\n");
184 return ERR_PTR(-ENODEV);
192 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
193 enum drm_exynos_ipp_cmd cmd)
196 * check dedicated flag and WB, OUTPUT operation with
199 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
200 !pm_runtime_suspended(ippdrv->dev)))
206 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
207 struct drm_exynos_ipp_property *property)
209 struct exynos_drm_ippdrv *ippdrv;
210 u32 ipp_id = property->ipp_id;
212 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
215 /* find ipp driver using idr */
216 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
218 if (IS_ERR(ippdrv)) {
219 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
224 * WB, OUTPUT opertion not supported multi-operation.
225 * so, make dedicated state at set property ioctl.
226 * when ipp driver finished operations, clear dedicated flags.
228 if (ipp_check_dedicated(ippdrv, property->cmd)) {
229 DRM_ERROR("already used choose device.\n");
230 return ERR_PTR(-EBUSY);
234 * This is necessary to find correct device in ipp drivers.
235 * ipp drivers have different abilities,
236 * so need to check property.
238 if (ippdrv->check_property &&
239 ippdrv->check_property(ippdrv->dev, property)) {
240 DRM_ERROR("not support property.\n");
241 return ERR_PTR(-EINVAL);
247 * This case is search all ipp driver for finding.
248 * user application don't set ipp_id in this case,
249 * so ipp subsystem search correct driver in driver list.
251 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
252 if (ipp_check_dedicated(ippdrv, property->cmd)) {
253 DRM_DEBUG_KMS("used device.\n");
257 if (ippdrv->check_property &&
258 ippdrv->check_property(ippdrv->dev, property)) {
259 DRM_DEBUG_KMS("not support property.\n");
266 DRM_ERROR("not support ipp driver operations.\n");
269 return ERR_PTR(-ENODEV);
272 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
274 struct exynos_drm_ippdrv *ippdrv;
275 struct drm_exynos_ipp_cmd_node *c_node;
278 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
281 * This case is search ipp driver by prop_id handle.
282 * sometimes, ipp subsystem find driver by prop_id.
283 * e.g PAUSE state, queue buf, command control.
285 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
286 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
288 mutex_lock(&ippdrv->cmd_lock);
289 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
290 if (c_node->property.prop_id == prop_id) {
291 mutex_unlock(&ippdrv->cmd_lock);
295 mutex_unlock(&ippdrv->cmd_lock);
298 return ERR_PTR(-ENODEV);
301 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
302 struct drm_file *file)
304 struct drm_exynos_file_private *file_priv = file->driver_priv;
305 struct device *dev = file_priv->ipp_dev;
306 struct ipp_context *ctx = get_ipp_context(dev);
307 struct drm_exynos_ipp_prop_list *prop_list = data;
308 struct exynos_drm_ippdrv *ippdrv;
312 DRM_ERROR("invalid context.\n");
317 DRM_ERROR("invalid property parameter.\n");
321 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
323 if (!prop_list->ipp_id) {
324 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
328 * Supports ippdrv list count for user application.
329 * First step user application getting ippdrv count.
330 * and second step getting ippdrv capability using ipp_id.
332 prop_list->count = count;
335 * Getting ippdrv capability by ipp_id.
336 * some device not supported wb, output interface.
337 * so, user application detect correct ipp driver
340 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
342 if (IS_ERR(ippdrv)) {
343 DRM_ERROR("not found ipp%d driver.\n",
345 return PTR_ERR(ippdrv);
348 *prop_list = ippdrv->prop_list;
354 static void ipp_print_property(struct drm_exynos_ipp_property *property,
357 struct drm_exynos_ipp_config *config = &property->config[idx];
358 struct drm_exynos_pos *pos = &config->pos;
359 struct drm_exynos_sz *sz = &config->sz;
361 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
362 property->prop_id, idx ? "dst" : "src", config->fmt);
364 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
365 pos->x, pos->y, pos->w, pos->h,
366 sz->hsize, sz->vsize, config->flip, config->degree);
369 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
371 struct exynos_drm_ippdrv *ippdrv;
372 struct drm_exynos_ipp_cmd_node *c_node;
373 u32 prop_id = property->prop_id;
375 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
377 ippdrv = ipp_find_drv_by_handle(prop_id);
378 if (IS_ERR(ippdrv)) {
379 DRM_ERROR("failed to get ipp driver.\n");
384 * Find command node using command list in ippdrv.
385 * when we find this command no using prop_id.
386 * return property information set in this command node.
388 mutex_lock(&ippdrv->cmd_lock);
389 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
390 if ((c_node->property.prop_id == prop_id) &&
391 (c_node->state == IPP_STATE_STOP)) {
392 mutex_unlock(&ippdrv->cmd_lock);
393 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
394 property->cmd, (int)ippdrv);
396 c_node->property = *property;
400 mutex_unlock(&ippdrv->cmd_lock);
402 DRM_ERROR("failed to search property.\n");
407 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
409 struct drm_exynos_ipp_cmd_work *cmd_work;
411 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
413 return ERR_PTR(-ENOMEM);
415 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
420 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
422 struct drm_exynos_ipp_event_work *event_work;
424 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
426 return ERR_PTR(-ENOMEM);
428 INIT_WORK(&event_work->work, ipp_sched_event);
433 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
434 struct drm_file *file)
436 struct drm_exynos_file_private *file_priv = file->driver_priv;
437 struct device *dev = file_priv->ipp_dev;
438 struct ipp_context *ctx = get_ipp_context(dev);
439 struct drm_exynos_ipp_property *property = data;
440 struct exynos_drm_ippdrv *ippdrv;
441 struct drm_exynos_ipp_cmd_node *c_node;
445 DRM_ERROR("invalid context.\n");
450 DRM_ERROR("invalid property parameter.\n");
455 * This is log print for user application property.
456 * user application set various property.
459 ipp_print_property(property, i);
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
469 return ipp_find_and_set_property(property);
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
484 /* create property id */
485 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
488 DRM_ERROR("failed to create id.\n");
492 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
493 property->prop_id, property->cmd, (int)ippdrv);
495 /* stored property information and ippdrv in private data */
497 c_node->property = *property;
498 c_node->state = IPP_STATE_IDLE;
500 c_node->start_work = ipp_create_cmd_work();
501 if (IS_ERR(c_node->start_work)) {
502 DRM_ERROR("failed to create start work.\n");
506 c_node->stop_work = ipp_create_cmd_work();
507 if (IS_ERR(c_node->stop_work)) {
508 DRM_ERROR("failed to create stop work.\n");
512 c_node->event_work = ipp_create_event_work();
513 if (IS_ERR(c_node->event_work)) {
514 DRM_ERROR("failed to create event work.\n");
518 mutex_init(&c_node->lock);
519 mutex_init(&c_node->mem_lock);
520 mutex_init(&c_node->event_lock);
522 init_completion(&c_node->start_complete);
523 init_completion(&c_node->stop_complete);
526 INIT_LIST_HEAD(&c_node->mem_list[i]);
528 INIT_LIST_HEAD(&c_node->event_list);
529 mutex_lock(&ippdrv->cmd_lock);
530 list_add_tail(&c_node->list, &ippdrv->cmd_list);
531 mutex_unlock(&ippdrv->cmd_lock);
533 /* make dedicated state without m2m */
534 if (!ipp_is_m2m_cmd(property->cmd))
535 ippdrv->dedicated = true;
540 kfree(c_node->stop_work);
542 kfree(c_node->start_work);
544 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
550 static void ipp_clean_cmd_node(struct ipp_context *ctx,
551 struct drm_exynos_ipp_cmd_node *c_node)
554 list_del(&c_node->list);
556 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
557 c_node->property.prop_id);
560 mutex_destroy(&c_node->lock);
561 mutex_destroy(&c_node->mem_lock);
562 mutex_destroy(&c_node->event_lock);
564 /* free command node */
565 kfree(c_node->start_work);
566 kfree(c_node->stop_work);
567 kfree(c_node->event_work);
571 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
573 switch (c_node->property.cmd) {
575 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
577 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
580 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
581 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
585 static struct drm_exynos_ipp_mem_node
586 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
587 struct drm_exynos_ipp_queue_buf *qbuf)
589 struct drm_exynos_ipp_mem_node *m_node;
590 struct list_head *head;
593 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
595 /* source/destination memory list */
596 head = &c_node->mem_list[qbuf->ops_id];
598 /* find memory node from memory list */
599 list_for_each_entry(m_node, head, list) {
600 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
602 /* compare buffer id */
603 if (m_node->buf_id == qbuf->buf_id)
610 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
611 struct drm_exynos_ipp_cmd_node *c_node,
612 struct drm_exynos_ipp_mem_node *m_node)
614 struct exynos_drm_ipp_ops *ops = NULL;
617 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
620 DRM_ERROR("invalid queue node.\n");
624 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
626 /* get operations callback */
627 ops = ippdrv->ops[m_node->ops_id];
629 DRM_ERROR("not support ops.\n");
633 /* set address and enable irq */
635 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
636 m_node->buf_id, IPP_BUF_ENQUEUE);
638 DRM_ERROR("failed to set addr.\n");
646 static struct drm_exynos_ipp_mem_node
647 *ipp_get_mem_node(struct drm_device *drm_dev,
648 struct drm_file *file,
649 struct drm_exynos_ipp_cmd_node *c_node,
650 struct drm_exynos_ipp_queue_buf *qbuf)
652 struct drm_exynos_ipp_mem_node *m_node;
653 struct drm_exynos_ipp_buf_info *buf_info;
656 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
658 return ERR_PTR(-ENOMEM);
660 buf_info = &m_node->buf_info;
662 /* operations, buffer id */
663 m_node->ops_id = qbuf->ops_id;
664 m_node->prop_id = qbuf->prop_id;
665 m_node->buf_id = qbuf->buf_id;
667 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
668 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
670 for_each_ipp_planar(i) {
671 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
673 /* get dma address by handle */
674 if (qbuf->handle[i]) {
677 addr = exynos_drm_gem_get_dma_addr(drm_dev,
678 qbuf->handle[i], file);
680 DRM_ERROR("failed to get addr.\n");
684 buf_info->handles[i] = qbuf->handle[i];
685 buf_info->base[i] = *addr;
686 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
687 buf_info->base[i], buf_info->handles[i]);
692 mutex_lock(&c_node->mem_lock);
693 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
694 mutex_unlock(&c_node->mem_lock);
700 return ERR_PTR(-EFAULT);
703 static int ipp_put_mem_node(struct drm_device *drm_dev,
704 struct drm_exynos_ipp_cmd_node *c_node,
705 struct drm_exynos_ipp_mem_node *m_node)
709 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
712 DRM_ERROR("invalid dequeue node.\n");
716 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
719 for_each_ipp_planar(i) {
720 unsigned long handle = m_node->buf_info.handles[i];
722 exynos_drm_gem_put_dma_addr(drm_dev, handle,
726 /* delete list in queue */
727 list_del(&m_node->list);
733 static void ipp_free_event(struct drm_pending_event *event)
738 static int ipp_get_event(struct drm_device *drm_dev,
739 struct drm_file *file,
740 struct drm_exynos_ipp_cmd_node *c_node,
741 struct drm_exynos_ipp_queue_buf *qbuf)
743 struct drm_exynos_ipp_send_event *e;
746 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
748 e = kzalloc(sizeof(*e), GFP_KERNEL);
750 spin_lock_irqsave(&drm_dev->event_lock, flags);
751 file->event_space += sizeof(e->event);
752 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
757 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
758 e->event.base.length = sizeof(e->event);
759 e->event.user_data = qbuf->user_data;
760 e->event.prop_id = qbuf->prop_id;
761 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
762 e->base.event = &e->event.base;
763 e->base.file_priv = file;
764 e->base.destroy = ipp_free_event;
765 mutex_lock(&c_node->event_lock);
766 list_add_tail(&e->base.link, &c_node->event_list);
767 mutex_unlock(&c_node->event_lock);
772 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
773 struct drm_exynos_ipp_queue_buf *qbuf)
775 struct drm_exynos_ipp_send_event *e, *te;
778 mutex_lock(&c_node->event_lock);
779 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
780 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
783 * qbuf == NULL condition means all event deletion.
784 * stop operations want to delete all event list.
785 * another case delete only same buf id.
789 list_del(&e->base.link);
793 /* compare buffer id */
794 if (qbuf && (qbuf->buf_id ==
795 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
797 list_del(&e->base.link);
804 mutex_unlock(&c_node->event_lock);
808 static void ipp_handle_cmd_work(struct device *dev,
809 struct exynos_drm_ippdrv *ippdrv,
810 struct drm_exynos_ipp_cmd_work *cmd_work,
811 struct drm_exynos_ipp_cmd_node *c_node)
813 struct ipp_context *ctx = get_ipp_context(dev);
815 cmd_work->ippdrv = ippdrv;
816 cmd_work->c_node = c_node;
817 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
820 static int ipp_queue_buf_with_run(struct device *dev,
821 struct drm_exynos_ipp_cmd_node *c_node,
822 struct drm_exynos_ipp_mem_node *m_node,
823 struct drm_exynos_ipp_queue_buf *qbuf)
825 struct exynos_drm_ippdrv *ippdrv;
826 struct drm_exynos_ipp_property *property;
827 struct exynos_drm_ipp_ops *ops;
830 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
831 if (IS_ERR(ippdrv)) {
832 DRM_ERROR("failed to get ipp driver.\n");
836 ops = ippdrv->ops[qbuf->ops_id];
838 DRM_ERROR("failed to get ops.\n");
842 property = &c_node->property;
844 if (c_node->state != IPP_STATE_START) {
845 DRM_DEBUG_KMS("bypass for invalid state.\n");
849 mutex_lock(&c_node->mem_lock);
850 if (!ipp_check_mem_list(c_node)) {
851 mutex_unlock(&c_node->mem_lock);
852 DRM_DEBUG_KMS("empty memory.\n");
857 * If set destination buffer and enabled clock,
858 * then m2m operations need start operations at queue_buf
860 if (ipp_is_m2m_cmd(property->cmd)) {
861 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
863 cmd_work->ctrl = IPP_CTRL_PLAY;
864 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
866 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
868 mutex_unlock(&c_node->mem_lock);
869 DRM_ERROR("failed to set m node.\n");
873 mutex_unlock(&c_node->mem_lock);
878 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
879 struct drm_exynos_ipp_cmd_node *c_node,
880 struct drm_exynos_ipp_queue_buf *qbuf)
882 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
885 mutex_lock(&c_node->mem_lock);
886 list_for_each_entry_safe(m_node, tm_node,
887 &c_node->mem_list[qbuf->ops_id], list) {
888 if (m_node->buf_id == qbuf->buf_id &&
889 m_node->ops_id == qbuf->ops_id)
890 ipp_put_mem_node(drm_dev, c_node, m_node);
892 mutex_unlock(&c_node->mem_lock);
895 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
896 struct drm_file *file)
898 struct drm_exynos_file_private *file_priv = file->driver_priv;
899 struct device *dev = file_priv->ipp_dev;
900 struct ipp_context *ctx = get_ipp_context(dev);
901 struct drm_exynos_ipp_queue_buf *qbuf = data;
902 struct drm_exynos_ipp_cmd_node *c_node;
903 struct drm_exynos_ipp_mem_node *m_node;
907 DRM_ERROR("invalid buf parameter.\n");
911 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
912 DRM_ERROR("invalid ops parameter.\n");
916 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
917 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
918 qbuf->buf_id, qbuf->buf_type);
920 /* find command node */
921 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
923 if (IS_ERR(c_node)) {
924 DRM_ERROR("failed to get command node.\n");
925 return PTR_ERR(c_node);
929 switch (qbuf->buf_type) {
930 case IPP_BUF_ENQUEUE:
931 /* get memory node */
932 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
933 if (IS_ERR(m_node)) {
934 DRM_ERROR("failed to get m_node.\n");
935 return PTR_ERR(m_node);
939 * first step get event for destination buffer.
940 * and second step when M2M case run with destination buffer
943 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
944 /* get event for destination buffer */
945 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
947 DRM_ERROR("failed to get event.\n");
952 * M2M case run play control for streaming feature.
953 * other case set address and waiting.
955 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
957 DRM_ERROR("failed to run command.\n");
962 case IPP_BUF_DEQUEUE:
963 mutex_lock(&c_node->lock);
965 /* put event for destination buffer */
966 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
967 ipp_put_event(c_node, qbuf);
969 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
971 mutex_unlock(&c_node->lock);
974 DRM_ERROR("invalid buffer control.\n");
981 DRM_ERROR("clean memory nodes.\n");
983 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
987 static bool exynos_drm_ipp_check_valid(struct device *dev,
988 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
990 if (ctrl != IPP_CTRL_PLAY) {
991 if (pm_runtime_suspended(dev)) {
992 DRM_ERROR("pm:runtime_suspended.\n");
999 if (state != IPP_STATE_IDLE)
1003 if (state == IPP_STATE_STOP)
1006 case IPP_CTRL_PAUSE:
1007 if (state != IPP_STATE_START)
1010 case IPP_CTRL_RESUME:
1011 if (state != IPP_STATE_STOP)
1015 DRM_ERROR("invalid state.\n");
1022 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1026 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1027 struct drm_file *file)
1029 struct drm_exynos_file_private *file_priv = file->driver_priv;
1030 struct exynos_drm_ippdrv *ippdrv = NULL;
1031 struct device *dev = file_priv->ipp_dev;
1032 struct ipp_context *ctx = get_ipp_context(dev);
1033 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1034 struct drm_exynos_ipp_cmd_work *cmd_work;
1035 struct drm_exynos_ipp_cmd_node *c_node;
1038 DRM_ERROR("invalid context.\n");
1043 DRM_ERROR("invalid control parameter.\n");
1047 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1048 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1050 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1051 if (IS_ERR(ippdrv)) {
1052 DRM_ERROR("failed to get ipp driver.\n");
1053 return PTR_ERR(ippdrv);
1056 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1058 if (IS_ERR(c_node)) {
1059 DRM_ERROR("invalid command node list.\n");
1060 return PTR_ERR(c_node);
1063 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1065 DRM_ERROR("invalid state.\n");
1069 switch (cmd_ctrl->ctrl) {
1071 if (pm_runtime_suspended(ippdrv->dev))
1072 pm_runtime_get_sync(ippdrv->dev);
1074 c_node->state = IPP_STATE_START;
1076 cmd_work = c_node->start_work;
1077 cmd_work->ctrl = cmd_ctrl->ctrl;
1078 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1081 cmd_work = c_node->stop_work;
1082 cmd_work->ctrl = cmd_ctrl->ctrl;
1083 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1085 if (!wait_for_completion_timeout(&c_node->stop_complete,
1086 msecs_to_jiffies(300))) {
1087 DRM_ERROR("timeout stop:prop_id[%d]\n",
1088 c_node->property.prop_id);
1091 c_node->state = IPP_STATE_STOP;
1092 ippdrv->dedicated = false;
1093 mutex_lock(&ippdrv->cmd_lock);
1094 ipp_clean_cmd_node(ctx, c_node);
1096 if (list_empty(&ippdrv->cmd_list))
1097 pm_runtime_put_sync(ippdrv->dev);
1098 mutex_unlock(&ippdrv->cmd_lock);
1100 case IPP_CTRL_PAUSE:
1101 cmd_work = c_node->stop_work;
1102 cmd_work->ctrl = cmd_ctrl->ctrl;
1103 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1105 if (!wait_for_completion_timeout(&c_node->stop_complete,
1106 msecs_to_jiffies(200))) {
1107 DRM_ERROR("timeout stop:prop_id[%d]\n",
1108 c_node->property.prop_id);
1111 c_node->state = IPP_STATE_STOP;
1113 case IPP_CTRL_RESUME:
1114 c_node->state = IPP_STATE_START;
1115 cmd_work = c_node->start_work;
1116 cmd_work->ctrl = cmd_ctrl->ctrl;
1117 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1120 DRM_ERROR("could not support this state currently.\n");
1124 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1125 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1130 int exynos_drm_ippnb_register(struct notifier_block *nb)
1132 return blocking_notifier_chain_register(
1133 &exynos_drm_ippnb_list, nb);
1136 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1138 return blocking_notifier_chain_unregister(
1139 &exynos_drm_ippnb_list, nb);
1142 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1144 return blocking_notifier_call_chain(
1145 &exynos_drm_ippnb_list, val, v);
1148 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1149 struct drm_exynos_ipp_property *property)
1151 struct exynos_drm_ipp_ops *ops = NULL;
1156 DRM_ERROR("invalid property parameter.\n");
1160 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1162 /* reset h/w block */
1163 if (ippdrv->reset &&
1164 ippdrv->reset(ippdrv->dev)) {
1165 DRM_ERROR("failed to reset.\n");
1169 /* set source,destination operations */
1170 for_each_ipp_ops(i) {
1171 struct drm_exynos_ipp_config *config =
1172 &property->config[i];
1174 ops = ippdrv->ops[i];
1175 if (!ops || !config) {
1176 DRM_ERROR("not support ops and config.\n");
1182 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1184 DRM_ERROR("not support format.\n");
1189 /* set transform for rotation, flip */
1190 if (ops->set_transf) {
1191 ret = ops->set_transf(ippdrv->dev, config->degree,
1192 config->flip, &swap);
1194 DRM_ERROR("not support tranf.\n");
1200 if (ops->set_size) {
1201 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1204 DRM_ERROR("not support size.\n");
1213 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1214 struct drm_exynos_ipp_cmd_node *c_node)
1216 struct drm_exynos_ipp_mem_node *m_node;
1217 struct drm_exynos_ipp_property *property = &c_node->property;
1218 struct list_head *head;
1221 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1223 /* store command info in ippdrv */
1224 ippdrv->c_node = c_node;
1226 mutex_lock(&c_node->mem_lock);
1227 if (!ipp_check_mem_list(c_node)) {
1228 DRM_DEBUG_KMS("empty memory.\n");
1233 /* set current property in ippdrv */
1234 ret = ipp_set_property(ippdrv, property);
1236 DRM_ERROR("failed to set property.\n");
1237 ippdrv->c_node = NULL;
1242 switch (property->cmd) {
1244 for_each_ipp_ops(i) {
1245 /* source/destination memory list */
1246 head = &c_node->mem_list[i];
1248 m_node = list_first_entry(head,
1249 struct drm_exynos_ipp_mem_node, list);
1251 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1253 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1255 DRM_ERROR("failed to set m node.\n");
1261 /* destination memory list */
1262 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1264 list_for_each_entry(m_node, head, list) {
1265 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1267 DRM_ERROR("failed to set m node.\n");
1272 case IPP_CMD_OUTPUT:
1273 /* source memory list */
1274 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1276 list_for_each_entry(m_node, head, list) {
1277 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1279 DRM_ERROR("failed to set m node.\n");
1285 DRM_ERROR("invalid operations.\n");
1289 mutex_unlock(&c_node->mem_lock);
1291 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1293 /* start operations */
1294 if (ippdrv->start) {
1295 ret = ippdrv->start(ippdrv->dev, property->cmd);
1297 DRM_ERROR("failed to start ops.\n");
1298 ippdrv->c_node = NULL;
1306 mutex_unlock(&c_node->mem_lock);
1307 ippdrv->c_node = NULL;
1311 static int ipp_stop_property(struct drm_device *drm_dev,
1312 struct exynos_drm_ippdrv *ippdrv,
1313 struct drm_exynos_ipp_cmd_node *c_node)
1315 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1316 struct drm_exynos_ipp_property *property = &c_node->property;
1317 struct list_head *head;
1320 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1323 ipp_put_event(c_node, NULL);
1325 mutex_lock(&c_node->mem_lock);
1328 switch (property->cmd) {
1330 for_each_ipp_ops(i) {
1331 /* source/destination memory list */
1332 head = &c_node->mem_list[i];
1334 list_for_each_entry_safe(m_node, tm_node,
1336 ret = ipp_put_mem_node(drm_dev, c_node,
1339 DRM_ERROR("failed to put m_node.\n");
1346 /* destination memory list */
1347 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1349 list_for_each_entry_safe(m_node, tm_node, head, list) {
1350 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1352 DRM_ERROR("failed to put m_node.\n");
1357 case IPP_CMD_OUTPUT:
1358 /* source memory list */
1359 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1361 list_for_each_entry_safe(m_node, tm_node, head, list) {
1362 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1364 DRM_ERROR("failed to put m_node.\n");
1370 DRM_ERROR("invalid operations.\n");
1376 mutex_unlock(&c_node->mem_lock);
1378 /* stop operations */
1380 ippdrv->stop(ippdrv->dev, property->cmd);
1385 void ipp_sched_cmd(struct work_struct *work)
1387 struct drm_exynos_ipp_cmd_work *cmd_work =
1388 (struct drm_exynos_ipp_cmd_work *)work;
1389 struct exynos_drm_ippdrv *ippdrv;
1390 struct drm_exynos_ipp_cmd_node *c_node;
1391 struct drm_exynos_ipp_property *property;
1394 ippdrv = cmd_work->ippdrv;
1396 DRM_ERROR("invalid ippdrv list.\n");
1400 c_node = cmd_work->c_node;
1402 DRM_ERROR("invalid command node list.\n");
1406 mutex_lock(&c_node->lock);
1408 property = &c_node->property;
1410 switch (cmd_work->ctrl) {
1412 case IPP_CTRL_RESUME:
1413 ret = ipp_start_property(ippdrv, c_node);
1415 DRM_ERROR("failed to start property:prop_id[%d]\n",
1416 c_node->property.prop_id);
1421 * M2M case supports wait_completion of transfer.
1422 * because M2M case supports single unit operation
1423 * with multiple queue.
1424 * M2M need to wait completion of data transfer.
1426 if (ipp_is_m2m_cmd(property->cmd)) {
1427 if (!wait_for_completion_timeout
1428 (&c_node->start_complete, msecs_to_jiffies(200))) {
1429 DRM_ERROR("timeout event:prop_id[%d]\n",
1430 c_node->property.prop_id);
1436 case IPP_CTRL_PAUSE:
1437 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1440 DRM_ERROR("failed to stop property.\n");
1444 complete(&c_node->stop_complete);
1447 DRM_ERROR("unknown control type\n");
1451 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1454 mutex_unlock(&c_node->lock);
1457 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1458 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1460 struct drm_device *drm_dev = ippdrv->drm_dev;
1461 struct drm_exynos_ipp_property *property = &c_node->property;
1462 struct drm_exynos_ipp_mem_node *m_node;
1463 struct drm_exynos_ipp_queue_buf qbuf;
1464 struct drm_exynos_ipp_send_event *e;
1465 struct list_head *head;
1467 unsigned long flags;
1468 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1472 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1475 DRM_ERROR("failed to get drm_dev.\n");
1480 DRM_ERROR("failed to get property.\n");
1484 mutex_lock(&c_node->event_lock);
1485 if (list_empty(&c_node->event_list)) {
1486 DRM_DEBUG_KMS("event list is empty.\n");
1488 goto err_event_unlock;
1491 mutex_lock(&c_node->mem_lock);
1492 if (!ipp_check_mem_list(c_node)) {
1493 DRM_DEBUG_KMS("empty memory.\n");
1495 goto err_mem_unlock;
1499 switch (property->cmd) {
1501 for_each_ipp_ops(i) {
1502 /* source/destination memory list */
1503 head = &c_node->mem_list[i];
1505 m_node = list_first_entry(head,
1506 struct drm_exynos_ipp_mem_node, list);
1508 tbuf_id[i] = m_node->buf_id;
1509 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1510 i ? "dst" : "src", tbuf_id[i]);
1512 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1514 DRM_ERROR("failed to put m_node.\n");
1518 /* clear buf for finding */
1519 memset(&qbuf, 0x0, sizeof(qbuf));
1520 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1521 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1523 /* get memory node entry */
1524 m_node = ipp_find_mem_node(c_node, &qbuf);
1526 DRM_ERROR("empty memory node.\n");
1528 goto err_mem_unlock;
1531 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1533 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1535 DRM_ERROR("failed to put m_node.\n");
1537 case IPP_CMD_OUTPUT:
1538 /* source memory list */
1539 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1541 m_node = list_first_entry(head,
1542 struct drm_exynos_ipp_mem_node, list);
1544 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1546 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1548 DRM_ERROR("failed to put m_node.\n");
1551 DRM_ERROR("invalid operations.\n");
1553 goto err_mem_unlock;
1555 mutex_unlock(&c_node->mem_lock);
1557 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1558 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1559 tbuf_id[1], buf_id[1], property->prop_id);
1562 * command node have event list of destination buffer
1563 * If destination buffer enqueue to mem list,
1564 * then we make event and link to event list tail.
1565 * so, we get first event for first enqueued buffer.
1567 e = list_first_entry(&c_node->event_list,
1568 struct drm_exynos_ipp_send_event, base.link);
1570 do_gettimeofday(&now);
1571 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1572 e->event.tv_sec = now.tv_sec;
1573 e->event.tv_usec = now.tv_usec;
1574 e->event.prop_id = property->prop_id;
1576 /* set buffer id about source destination */
1578 e->event.buf_id[i] = tbuf_id[i];
1580 spin_lock_irqsave(&drm_dev->event_lock, flags);
1581 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1582 wake_up_interruptible(&e->base.file_priv->event_wait);
1583 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1584 mutex_unlock(&c_node->event_lock);
1586 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1587 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1592 mutex_unlock(&c_node->mem_lock);
1594 mutex_unlock(&c_node->event_lock);
1598 void ipp_sched_event(struct work_struct *work)
1600 struct drm_exynos_ipp_event_work *event_work =
1601 (struct drm_exynos_ipp_event_work *)work;
1602 struct exynos_drm_ippdrv *ippdrv;
1603 struct drm_exynos_ipp_cmd_node *c_node;
1607 DRM_ERROR("failed to get event_work.\n");
1611 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1613 ippdrv = event_work->ippdrv;
1615 DRM_ERROR("failed to get ipp driver.\n");
1619 c_node = ippdrv->c_node;
1621 DRM_ERROR("failed to get command node.\n");
1626 * IPP supports command thread, event thread synchronization.
1627 * If IPP close immediately from user land, then IPP make
1628 * synchronization with command thread, so make complete event.
1629 * or going out operations.
1631 if (c_node->state != IPP_STATE_START) {
1632 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1633 c_node->state, c_node->property.prop_id);
1634 goto err_completion;
1637 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1639 DRM_ERROR("failed to send event.\n");
1640 goto err_completion;
1644 if (ipp_is_m2m_cmd(c_node->property.cmd))
1645 complete(&c_node->start_complete);
1648 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1650 struct ipp_context *ctx = get_ipp_context(dev);
1651 struct exynos_drm_ippdrv *ippdrv;
1654 /* get ipp driver entry */
1655 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1658 ippdrv->drm_dev = drm_dev;
1660 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1662 if (ret || ipp_id == 0) {
1663 DRM_ERROR("failed to create id.\n");
1667 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1668 count++, (int)ippdrv, ipp_id);
1670 ippdrv->prop_list.ipp_id = ipp_id;
1672 /* store parent device for node */
1673 ippdrv->parent_dev = dev;
1675 /* store event work queue and handler */
1676 ippdrv->event_workq = ctx->event_workq;
1677 ippdrv->sched_event = ipp_sched_event;
1678 INIT_LIST_HEAD(&ippdrv->cmd_list);
1679 mutex_init(&ippdrv->cmd_lock);
1681 if (is_drm_iommu_supported(drm_dev)) {
1682 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1684 DRM_ERROR("failed to activate iommu\n");
1693 /* get ipp driver entry */
1694 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1696 if (is_drm_iommu_supported(drm_dev))
1697 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1699 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1700 ippdrv->prop_list.ipp_id);
1706 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1708 struct exynos_drm_ippdrv *ippdrv;
1709 struct ipp_context *ctx = get_ipp_context(dev);
1711 /* get ipp driver entry */
1712 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1713 if (is_drm_iommu_supported(drm_dev))
1714 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1716 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1717 ippdrv->prop_list.ipp_id);
1719 ippdrv->drm_dev = NULL;
1720 exynos_drm_ippdrv_unregister(ippdrv);
1724 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1725 struct drm_file *file)
1727 struct drm_exynos_file_private *file_priv = file->driver_priv;
1729 file_priv->ipp_dev = dev;
1731 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1736 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1737 struct drm_file *file)
1739 struct drm_exynos_file_private *file_priv = file->driver_priv;
1740 struct exynos_drm_ippdrv *ippdrv = NULL;
1741 struct ipp_context *ctx = get_ipp_context(dev);
1742 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1745 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1747 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1748 mutex_lock(&ippdrv->cmd_lock);
1749 list_for_each_entry_safe(c_node, tc_node,
1750 &ippdrv->cmd_list, list) {
1751 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1752 count++, (int)ippdrv);
1754 if (c_node->dev == file_priv->ipp_dev) {
1756 * userland goto unnormal state. process killed.
1757 * and close the file.
1758 * so, IPP didn't called stop cmd ctrl.
1759 * so, we are make stop operation in this state.
1761 if (c_node->state == IPP_STATE_START) {
1762 ipp_stop_property(drm_dev, ippdrv,
1764 c_node->state = IPP_STATE_STOP;
1767 ippdrv->dedicated = false;
1768 ipp_clean_cmd_node(ctx, c_node);
1769 if (list_empty(&ippdrv->cmd_list))
1770 pm_runtime_put_sync(ippdrv->dev);
1773 mutex_unlock(&ippdrv->cmd_lock);
1779 static int ipp_probe(struct platform_device *pdev)
1781 struct device *dev = &pdev->dev;
1782 struct ipp_context *ctx;
1783 struct exynos_drm_subdrv *subdrv;
1786 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1790 mutex_init(&ctx->ipp_lock);
1791 mutex_init(&ctx->prop_lock);
1793 idr_init(&ctx->ipp_idr);
1794 idr_init(&ctx->prop_idr);
1797 * create single thread for ipp event
1798 * IPP supports event thread for IPP drivers.
1799 * IPP driver send event_work to this thread.
1800 * and IPP event thread send event to user process.
1802 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1803 if (!ctx->event_workq) {
1804 dev_err(dev, "failed to create event workqueue\n");
1809 * create single thread for ipp command
1810 * IPP supports command thread for user process.
1811 * user process make command node using set property ioctl.
1812 * and make start_work and send this work to command thread.
1813 * and then this command thread start property.
1815 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1816 if (!ctx->cmd_workq) {
1817 dev_err(dev, "failed to create cmd workqueue\n");
1819 goto err_event_workq;
1822 /* set sub driver informations */
1823 subdrv = &ctx->subdrv;
1825 subdrv->probe = ipp_subdrv_probe;
1826 subdrv->remove = ipp_subdrv_remove;
1827 subdrv->open = ipp_subdrv_open;
1828 subdrv->close = ipp_subdrv_close;
1830 platform_set_drvdata(pdev, ctx);
1832 ret = exynos_drm_subdrv_register(subdrv);
1834 DRM_ERROR("failed to register drm ipp device.\n");
1838 dev_info(dev, "drm ipp registered successfully.\n");
1843 destroy_workqueue(ctx->cmd_workq);
1845 destroy_workqueue(ctx->event_workq);
1849 static int ipp_remove(struct platform_device *pdev)
1851 struct ipp_context *ctx = platform_get_drvdata(pdev);
1853 /* unregister sub driver */
1854 exynos_drm_subdrv_unregister(&ctx->subdrv);
1856 /* remove,destroy ipp idr */
1857 idr_destroy(&ctx->ipp_idr);
1858 idr_destroy(&ctx->prop_idr);
1860 mutex_destroy(&ctx->ipp_lock);
1861 mutex_destroy(&ctx->prop_lock);
1863 /* destroy command, event work queue */
1864 destroy_workqueue(ctx->cmd_workq);
1865 destroy_workqueue(ctx->event_workq);
1870 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1872 DRM_DEBUG_KMS("enable[%d]\n", enable);
1877 #ifdef CONFIG_PM_SLEEP
1878 static int ipp_suspend(struct device *dev)
1880 struct ipp_context *ctx = get_ipp_context(dev);
1882 if (pm_runtime_suspended(dev))
1885 return ipp_power_ctrl(ctx, false);
1888 static int ipp_resume(struct device *dev)
1890 struct ipp_context *ctx = get_ipp_context(dev);
1892 if (!pm_runtime_suspended(dev))
1893 return ipp_power_ctrl(ctx, true);
1899 #ifdef CONFIG_PM_RUNTIME
1900 static int ipp_runtime_suspend(struct device *dev)
1902 struct ipp_context *ctx = get_ipp_context(dev);
1904 return ipp_power_ctrl(ctx, false);
1907 static int ipp_runtime_resume(struct device *dev)
1909 struct ipp_context *ctx = get_ipp_context(dev);
1911 return ipp_power_ctrl(ctx, true);
1915 static const struct dev_pm_ops ipp_pm_ops = {
1916 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1917 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1920 struct platform_driver ipp_driver = {
1922 .remove = ipp_remove,
1924 .name = "exynos-drm-ipp",
1925 .owner = THIS_MODULE,