2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
52 * A structure of event.
54 * @base: base of event.
57 struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
63 * A structure of memory node.
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
72 struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
82 * A structure of ipp context.
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
102 static LIST_HEAD(exynos_drm_ippdrv_list);
103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
106 int exynos_platform_device_ipp_register(void)
108 struct platform_device *pdev;
110 if (exynos_drm_ipp_pdev)
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
115 return PTR_ERR(pdev);
117 exynos_drm_ipp_pdev = pdev;
122 void exynos_platform_device_ipp_unregister(void)
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
135 mutex_lock(&exynos_drm_ippdrv_lock);
136 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
137 mutex_unlock(&exynos_drm_ippdrv_lock);
142 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
147 mutex_lock(&exynos_drm_ippdrv_lock);
148 list_del(&ippdrv->drv_list);
149 mutex_unlock(&exynos_drm_ippdrv_lock);
154 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
159 /* do the allocation under our mutexlock */
161 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
170 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
173 idr_remove(id_idr, id);
177 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
181 DRM_DEBUG_KMS("id[%d]\n", id);
185 /* find object using handle */
186 obj = idr_find(id_idr, id);
188 DRM_ERROR("failed to find object.\n");
190 return ERR_PTR(-ENODEV);
198 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
199 enum drm_exynos_ipp_cmd cmd)
202 * check dedicated flag and WB, OUTPUT operation with
205 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
206 !pm_runtime_suspended(ippdrv->dev)))
212 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
213 struct drm_exynos_ipp_property *property)
215 struct exynos_drm_ippdrv *ippdrv;
216 u32 ipp_id = property->ipp_id;
218 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
221 /* find ipp driver using idr */
222 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
224 if (IS_ERR(ippdrv)) {
225 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
230 * WB, OUTPUT opertion not supported multi-operation.
231 * so, make dedicated state at set property ioctl.
232 * when ipp driver finished operations, clear dedicated flags.
234 if (ipp_check_dedicated(ippdrv, property->cmd)) {
235 DRM_ERROR("already used choose device.\n");
236 return ERR_PTR(-EBUSY);
240 * This is necessary to find correct device in ipp drivers.
241 * ipp drivers have different abilities,
242 * so need to check property.
244 if (ippdrv->check_property &&
245 ippdrv->check_property(ippdrv->dev, property)) {
246 DRM_ERROR("not support property.\n");
247 return ERR_PTR(-EINVAL);
253 * This case is search all ipp driver for finding.
254 * user application don't set ipp_id in this case,
255 * so ipp subsystem search correct driver in driver list.
257 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
258 if (ipp_check_dedicated(ippdrv, property->cmd)) {
259 DRM_DEBUG_KMS("used device.\n");
263 if (ippdrv->check_property &&
264 ippdrv->check_property(ippdrv->dev, property)) {
265 DRM_DEBUG_KMS("not support property.\n");
272 DRM_ERROR("not support ipp driver operations.\n");
275 return ERR_PTR(-ENODEV);
278 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
280 struct exynos_drm_ippdrv *ippdrv;
281 struct drm_exynos_ipp_cmd_node *c_node;
284 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
287 * This case is search ipp driver by prop_id handle.
288 * sometimes, ipp subsystem find driver by prop_id.
289 * e.g PAUSE state, queue buf, command control.
291 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
292 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
294 mutex_lock(&ippdrv->cmd_lock);
295 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
296 if (c_node->property.prop_id == prop_id) {
297 mutex_unlock(&ippdrv->cmd_lock);
301 mutex_unlock(&ippdrv->cmd_lock);
304 return ERR_PTR(-ENODEV);
307 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
308 struct drm_file *file)
310 struct drm_exynos_file_private *file_priv = file->driver_priv;
311 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
312 struct device *dev = priv->dev;
313 struct ipp_context *ctx = get_ipp_context(dev);
314 struct drm_exynos_ipp_prop_list *prop_list = data;
315 struct exynos_drm_ippdrv *ippdrv;
319 DRM_ERROR("invalid context.\n");
324 DRM_ERROR("invalid property parameter.\n");
328 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
330 if (!prop_list->ipp_id) {
331 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
335 * Supports ippdrv list count for user application.
336 * First step user application getting ippdrv count.
337 * and second step getting ippdrv capability using ipp_id.
339 prop_list->count = count;
342 * Getting ippdrv capability by ipp_id.
343 * some device not supported wb, output interface.
344 * so, user application detect correct ipp driver
347 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
349 if (IS_ERR(ippdrv)) {
350 DRM_ERROR("not found ipp%d driver.\n",
352 return PTR_ERR(ippdrv);
355 *prop_list = ippdrv->prop_list;
361 static void ipp_print_property(struct drm_exynos_ipp_property *property,
364 struct drm_exynos_ipp_config *config = &property->config[idx];
365 struct drm_exynos_pos *pos = &config->pos;
366 struct drm_exynos_sz *sz = &config->sz;
368 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
369 property->prop_id, idx ? "dst" : "src", config->fmt);
371 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
372 pos->x, pos->y, pos->w, pos->h,
373 sz->hsize, sz->vsize, config->flip, config->degree);
376 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
378 struct exynos_drm_ippdrv *ippdrv;
379 struct drm_exynos_ipp_cmd_node *c_node;
380 u32 prop_id = property->prop_id;
382 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
384 ippdrv = ipp_find_drv_by_handle(prop_id);
385 if (IS_ERR(ippdrv)) {
386 DRM_ERROR("failed to get ipp driver.\n");
391 * Find command node using command list in ippdrv.
392 * when we find this command no using prop_id.
393 * return property information set in this command node.
395 mutex_lock(&ippdrv->cmd_lock);
396 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
397 if ((c_node->property.prop_id == prop_id) &&
398 (c_node->state == IPP_STATE_STOP)) {
399 mutex_unlock(&ippdrv->cmd_lock);
400 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
401 property->cmd, (int)ippdrv);
403 c_node->property = *property;
407 mutex_unlock(&ippdrv->cmd_lock);
409 DRM_ERROR("failed to search property.\n");
414 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
416 struct drm_exynos_ipp_cmd_work *cmd_work;
418 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
420 return ERR_PTR(-ENOMEM);
422 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
427 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
429 struct drm_exynos_ipp_event_work *event_work;
431 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
433 return ERR_PTR(-ENOMEM);
435 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
440 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
441 struct drm_file *file)
443 struct drm_exynos_file_private *file_priv = file->driver_priv;
444 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
445 struct device *dev = priv->dev;
446 struct ipp_context *ctx = get_ipp_context(dev);
447 struct drm_exynos_ipp_property *property = data;
448 struct exynos_drm_ippdrv *ippdrv;
449 struct drm_exynos_ipp_cmd_node *c_node;
453 DRM_ERROR("invalid context.\n");
458 DRM_ERROR("invalid property parameter.\n");
463 * This is log print for user application property.
464 * user application set various property.
467 ipp_print_property(property, i);
470 * set property ioctl generated new prop_id.
471 * but in this case already asigned prop_id using old set property.
472 * e.g PAUSE state. this case supports find current prop_id and use it
473 * instead of allocation.
475 if (property->prop_id) {
476 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
477 return ipp_find_and_set_property(property);
480 /* find ipp driver using ipp id */
481 ippdrv = ipp_find_driver(ctx, property);
482 if (IS_ERR(ippdrv)) {
483 DRM_ERROR("failed to get ipp driver.\n");
487 /* allocate command node */
488 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
492 /* create property id */
493 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
496 DRM_ERROR("failed to create id.\n");
500 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
501 property->prop_id, property->cmd, (int)ippdrv);
503 /* stored property information and ippdrv in private data */
505 c_node->property = *property;
506 c_node->state = IPP_STATE_IDLE;
508 c_node->start_work = ipp_create_cmd_work();
509 if (IS_ERR(c_node->start_work)) {
510 DRM_ERROR("failed to create start work.\n");
514 c_node->stop_work = ipp_create_cmd_work();
515 if (IS_ERR(c_node->stop_work)) {
516 DRM_ERROR("failed to create stop work.\n");
520 c_node->event_work = ipp_create_event_work();
521 if (IS_ERR(c_node->event_work)) {
522 DRM_ERROR("failed to create event work.\n");
526 mutex_init(&c_node->lock);
527 mutex_init(&c_node->mem_lock);
528 mutex_init(&c_node->event_lock);
530 init_completion(&c_node->start_complete);
531 init_completion(&c_node->stop_complete);
534 INIT_LIST_HEAD(&c_node->mem_list[i]);
536 INIT_LIST_HEAD(&c_node->event_list);
537 list_splice_init(&priv->event_list, &c_node->event_list);
538 mutex_lock(&ippdrv->cmd_lock);
539 list_add_tail(&c_node->list, &ippdrv->cmd_list);
540 mutex_unlock(&ippdrv->cmd_lock);
542 /* make dedicated state without m2m */
543 if (!ipp_is_m2m_cmd(property->cmd))
544 ippdrv->dedicated = true;
549 kfree(c_node->stop_work);
551 kfree(c_node->start_work);
553 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
559 static void ipp_clean_cmd_node(struct ipp_context *ctx,
560 struct drm_exynos_ipp_cmd_node *c_node)
563 list_del(&c_node->list);
565 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
566 c_node->property.prop_id);
569 mutex_destroy(&c_node->lock);
570 mutex_destroy(&c_node->mem_lock);
571 mutex_destroy(&c_node->event_lock);
573 /* free command node */
574 kfree(c_node->start_work);
575 kfree(c_node->stop_work);
576 kfree(c_node->event_work);
580 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
582 struct drm_exynos_ipp_property *property = &c_node->property;
583 struct drm_exynos_ipp_mem_node *m_node;
584 struct list_head *head;
585 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
587 for_each_ipp_ops(i) {
588 /* source/destination memory list */
589 head = &c_node->mem_list[i];
591 /* find memory node entry */
592 list_for_each_entry(m_node, head, list) {
593 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
594 i ? "dst" : "src", count[i], (int)m_node);
599 DRM_DEBUG_KMS("min[%d]max[%d]\n",
600 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
601 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
604 * M2M operations should be need paired memory address.
605 * so, need to check minimum count about src, dst.
606 * other case not use paired memory, so use maximum count
608 if (ipp_is_m2m_cmd(property->cmd))
609 ret = min(count[EXYNOS_DRM_OPS_SRC],
610 count[EXYNOS_DRM_OPS_DST]);
612 ret = max(count[EXYNOS_DRM_OPS_SRC],
613 count[EXYNOS_DRM_OPS_DST]);
618 static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 struct drm_exynos_ipp_queue_buf *qbuf)
622 struct drm_exynos_ipp_mem_node *m_node;
623 struct list_head *head;
626 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
628 /* source/destination memory list */
629 head = &c_node->mem_list[qbuf->ops_id];
631 /* find memory node from memory list */
632 list_for_each_entry(m_node, head, list) {
633 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
635 /* compare buffer id */
636 if (m_node->buf_id == qbuf->buf_id)
643 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
644 struct drm_exynos_ipp_cmd_node *c_node,
645 struct drm_exynos_ipp_mem_node *m_node)
647 struct exynos_drm_ipp_ops *ops = NULL;
650 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
653 DRM_ERROR("invalid queue node.\n");
657 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
659 /* get operations callback */
660 ops = ippdrv->ops[m_node->ops_id];
662 DRM_ERROR("not support ops.\n");
666 /* set address and enable irq */
668 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
669 m_node->buf_id, IPP_BUF_ENQUEUE);
671 DRM_ERROR("failed to set addr.\n");
679 static struct drm_exynos_ipp_mem_node
680 *ipp_get_mem_node(struct drm_device *drm_dev,
681 struct drm_file *file,
682 struct drm_exynos_ipp_cmd_node *c_node,
683 struct drm_exynos_ipp_queue_buf *qbuf)
685 struct drm_exynos_ipp_mem_node *m_node;
686 struct drm_exynos_ipp_buf_info buf_info;
690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
692 return ERR_PTR(-ENOMEM);
694 /* clear base address for error handling */
695 memset(&buf_info, 0x0, sizeof(buf_info));
697 /* operations, buffer id */
698 m_node->ops_id = qbuf->ops_id;
699 m_node->prop_id = qbuf->prop_id;
700 m_node->buf_id = qbuf->buf_id;
702 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
703 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
705 for_each_ipp_planar(i) {
706 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
708 /* get dma address by handle */
709 if (qbuf->handle[i]) {
710 addr = exynos_drm_gem_get_dma_addr(drm_dev,
711 qbuf->handle[i], file);
713 DRM_ERROR("failed to get addr.\n");
717 buf_info.handles[i] = qbuf->handle[i];
718 buf_info.base[i] = *(dma_addr_t *) addr;
719 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
720 i, buf_info.base[i], (int)buf_info.handles[i]);
725 m_node->buf_info = buf_info;
726 mutex_lock(&c_node->mem_lock);
727 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
728 mutex_unlock(&c_node->mem_lock);
734 return ERR_PTR(-EFAULT);
737 static int ipp_put_mem_node(struct drm_device *drm_dev,
738 struct drm_exynos_ipp_cmd_node *c_node,
739 struct drm_exynos_ipp_mem_node *m_node)
743 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
746 DRM_ERROR("invalid dequeue node.\n");
750 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
753 for_each_ipp_planar(i) {
754 unsigned long handle = m_node->buf_info.handles[i];
756 exynos_drm_gem_put_dma_addr(drm_dev, handle,
760 /* delete list in queue */
761 list_del(&m_node->list);
767 static void ipp_free_event(struct drm_pending_event *event)
772 static int ipp_get_event(struct drm_device *drm_dev,
773 struct drm_file *file,
774 struct drm_exynos_ipp_cmd_node *c_node,
775 struct drm_exynos_ipp_queue_buf *qbuf)
777 struct drm_exynos_ipp_send_event *e;
780 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
782 e = kzalloc(sizeof(*e), GFP_KERNEL);
784 spin_lock_irqsave(&drm_dev->event_lock, flags);
785 file->event_space += sizeof(e->event);
786 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
791 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
792 e->event.base.length = sizeof(e->event);
793 e->event.user_data = qbuf->user_data;
794 e->event.prop_id = qbuf->prop_id;
795 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
796 e->base.event = &e->event.base;
797 e->base.file_priv = file;
798 e->base.destroy = ipp_free_event;
799 mutex_lock(&c_node->event_lock);
800 list_add_tail(&e->base.link, &c_node->event_list);
801 mutex_unlock(&c_node->event_lock);
806 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
807 struct drm_exynos_ipp_queue_buf *qbuf)
809 struct drm_exynos_ipp_send_event *e, *te;
812 mutex_lock(&c_node->event_lock);
813 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
814 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
817 * qbuf == NULL condition means all event deletion.
818 * stop operations want to delete all event list.
819 * another case delete only same buf id.
823 list_del(&e->base.link);
827 /* compare buffer id */
828 if (qbuf && (qbuf->buf_id ==
829 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
831 list_del(&e->base.link);
838 mutex_unlock(&c_node->event_lock);
842 static void ipp_handle_cmd_work(struct device *dev,
843 struct exynos_drm_ippdrv *ippdrv,
844 struct drm_exynos_ipp_cmd_work *cmd_work,
845 struct drm_exynos_ipp_cmd_node *c_node)
847 struct ipp_context *ctx = get_ipp_context(dev);
849 cmd_work->ippdrv = ippdrv;
850 cmd_work->c_node = c_node;
851 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
854 static int ipp_queue_buf_with_run(struct device *dev,
855 struct drm_exynos_ipp_cmd_node *c_node,
856 struct drm_exynos_ipp_mem_node *m_node,
857 struct drm_exynos_ipp_queue_buf *qbuf)
859 struct exynos_drm_ippdrv *ippdrv;
860 struct drm_exynos_ipp_property *property;
861 struct exynos_drm_ipp_ops *ops;
864 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
865 if (IS_ERR(ippdrv)) {
866 DRM_ERROR("failed to get ipp driver.\n");
870 ops = ippdrv->ops[qbuf->ops_id];
872 DRM_ERROR("failed to get ops.\n");
876 property = &c_node->property;
878 if (c_node->state != IPP_STATE_START) {
879 DRM_DEBUG_KMS("bypass for invalid state.\n");
883 mutex_lock(&c_node->mem_lock);
884 if (!ipp_check_mem_list(c_node)) {
885 mutex_unlock(&c_node->mem_lock);
886 DRM_DEBUG_KMS("empty memory.\n");
891 * If set destination buffer and enabled clock,
892 * then m2m operations need start operations at queue_buf
894 if (ipp_is_m2m_cmd(property->cmd)) {
895 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
897 cmd_work->ctrl = IPP_CTRL_PLAY;
898 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
900 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
902 mutex_unlock(&c_node->mem_lock);
903 DRM_ERROR("failed to set m node.\n");
907 mutex_unlock(&c_node->mem_lock);
912 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
913 struct drm_exynos_ipp_cmd_node *c_node,
914 struct drm_exynos_ipp_queue_buf *qbuf)
916 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
919 mutex_lock(&c_node->mem_lock);
920 list_for_each_entry_safe(m_node, tm_node,
921 &c_node->mem_list[qbuf->ops_id], list) {
922 if (m_node->buf_id == qbuf->buf_id &&
923 m_node->ops_id == qbuf->ops_id)
924 ipp_put_mem_node(drm_dev, c_node, m_node);
926 mutex_unlock(&c_node->mem_lock);
929 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
930 struct drm_file *file)
932 struct drm_exynos_file_private *file_priv = file->driver_priv;
933 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
934 struct device *dev = priv->dev;
935 struct ipp_context *ctx = get_ipp_context(dev);
936 struct drm_exynos_ipp_queue_buf *qbuf = data;
937 struct drm_exynos_ipp_cmd_node *c_node;
938 struct drm_exynos_ipp_mem_node *m_node;
942 DRM_ERROR("invalid buf parameter.\n");
946 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
947 DRM_ERROR("invalid ops parameter.\n");
951 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
952 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
953 qbuf->buf_id, qbuf->buf_type);
955 /* find command node */
956 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
958 if (IS_ERR(c_node)) {
959 DRM_ERROR("failed to get command node.\n");
960 return PTR_ERR(c_node);
964 switch (qbuf->buf_type) {
965 case IPP_BUF_ENQUEUE:
966 /* get memory node */
967 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
968 if (IS_ERR(m_node)) {
969 DRM_ERROR("failed to get m_node.\n");
970 return PTR_ERR(m_node);
974 * first step get event for destination buffer.
975 * and second step when M2M case run with destination buffer
978 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
979 /* get event for destination buffer */
980 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
982 DRM_ERROR("failed to get event.\n");
987 * M2M case run play control for streaming feature.
988 * other case set address and waiting.
990 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
992 DRM_ERROR("failed to run command.\n");
997 case IPP_BUF_DEQUEUE:
998 mutex_lock(&c_node->lock);
1000 /* put event for destination buffer */
1001 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1002 ipp_put_event(c_node, qbuf);
1004 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1006 mutex_unlock(&c_node->lock);
1009 DRM_ERROR("invalid buffer control.\n");
1016 DRM_ERROR("clean memory nodes.\n");
1018 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1022 static bool exynos_drm_ipp_check_valid(struct device *dev,
1023 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1025 if (ctrl != IPP_CTRL_PLAY) {
1026 if (pm_runtime_suspended(dev)) {
1027 DRM_ERROR("pm:runtime_suspended.\n");
1034 if (state != IPP_STATE_IDLE)
1038 if (state == IPP_STATE_STOP)
1041 case IPP_CTRL_PAUSE:
1042 if (state != IPP_STATE_START)
1045 case IPP_CTRL_RESUME:
1046 if (state != IPP_STATE_STOP)
1050 DRM_ERROR("invalid state.\n");
1057 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1061 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1062 struct drm_file *file)
1064 struct drm_exynos_file_private *file_priv = file->driver_priv;
1065 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1066 struct exynos_drm_ippdrv *ippdrv = NULL;
1067 struct device *dev = priv->dev;
1068 struct ipp_context *ctx = get_ipp_context(dev);
1069 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1070 struct drm_exynos_ipp_cmd_work *cmd_work;
1071 struct drm_exynos_ipp_cmd_node *c_node;
1074 DRM_ERROR("invalid context.\n");
1079 DRM_ERROR("invalid control parameter.\n");
1083 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1084 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1086 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1087 if (IS_ERR(ippdrv)) {
1088 DRM_ERROR("failed to get ipp driver.\n");
1089 return PTR_ERR(ippdrv);
1092 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1094 if (IS_ERR(c_node)) {
1095 DRM_ERROR("invalid command node list.\n");
1096 return PTR_ERR(c_node);
1099 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1101 DRM_ERROR("invalid state.\n");
1105 switch (cmd_ctrl->ctrl) {
1107 if (pm_runtime_suspended(ippdrv->dev))
1108 pm_runtime_get_sync(ippdrv->dev);
1110 c_node->state = IPP_STATE_START;
1112 cmd_work = c_node->start_work;
1113 cmd_work->ctrl = cmd_ctrl->ctrl;
1114 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1117 cmd_work = c_node->stop_work;
1118 cmd_work->ctrl = cmd_ctrl->ctrl;
1119 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1121 if (!wait_for_completion_timeout(&c_node->stop_complete,
1122 msecs_to_jiffies(300))) {
1123 DRM_ERROR("timeout stop:prop_id[%d]\n",
1124 c_node->property.prop_id);
1127 c_node->state = IPP_STATE_STOP;
1128 ippdrv->dedicated = false;
1129 mutex_lock(&ippdrv->cmd_lock);
1130 ipp_clean_cmd_node(ctx, c_node);
1132 if (list_empty(&ippdrv->cmd_list))
1133 pm_runtime_put_sync(ippdrv->dev);
1134 mutex_unlock(&ippdrv->cmd_lock);
1136 case IPP_CTRL_PAUSE:
1137 cmd_work = c_node->stop_work;
1138 cmd_work->ctrl = cmd_ctrl->ctrl;
1139 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1141 if (!wait_for_completion_timeout(&c_node->stop_complete,
1142 msecs_to_jiffies(200))) {
1143 DRM_ERROR("timeout stop:prop_id[%d]\n",
1144 c_node->property.prop_id);
1147 c_node->state = IPP_STATE_STOP;
1149 case IPP_CTRL_RESUME:
1150 c_node->state = IPP_STATE_START;
1151 cmd_work = c_node->start_work;
1152 cmd_work->ctrl = cmd_ctrl->ctrl;
1153 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1156 DRM_ERROR("could not support this state currently.\n");
1160 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1161 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1166 int exynos_drm_ippnb_register(struct notifier_block *nb)
1168 return blocking_notifier_chain_register(
1169 &exynos_drm_ippnb_list, nb);
1172 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1174 return blocking_notifier_chain_unregister(
1175 &exynos_drm_ippnb_list, nb);
1178 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1180 return blocking_notifier_call_chain(
1181 &exynos_drm_ippnb_list, val, v);
1184 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1185 struct drm_exynos_ipp_property *property)
1187 struct exynos_drm_ipp_ops *ops = NULL;
1192 DRM_ERROR("invalid property parameter.\n");
1196 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1198 /* reset h/w block */
1199 if (ippdrv->reset &&
1200 ippdrv->reset(ippdrv->dev)) {
1201 DRM_ERROR("failed to reset.\n");
1205 /* set source,destination operations */
1206 for_each_ipp_ops(i) {
1207 struct drm_exynos_ipp_config *config =
1208 &property->config[i];
1210 ops = ippdrv->ops[i];
1211 if (!ops || !config) {
1212 DRM_ERROR("not support ops and config.\n");
1218 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1220 DRM_ERROR("not support format.\n");
1225 /* set transform for rotation, flip */
1226 if (ops->set_transf) {
1227 ret = ops->set_transf(ippdrv->dev, config->degree,
1228 config->flip, &swap);
1230 DRM_ERROR("not support tranf.\n");
1236 if (ops->set_size) {
1237 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1240 DRM_ERROR("not support size.\n");
1249 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1250 struct drm_exynos_ipp_cmd_node *c_node)
1252 struct drm_exynos_ipp_mem_node *m_node;
1253 struct drm_exynos_ipp_property *property = &c_node->property;
1254 struct list_head *head;
1257 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1259 /* store command info in ippdrv */
1260 ippdrv->c_node = c_node;
1262 mutex_lock(&c_node->mem_lock);
1263 if (!ipp_check_mem_list(c_node)) {
1264 DRM_DEBUG_KMS("empty memory.\n");
1269 /* set current property in ippdrv */
1270 ret = ipp_set_property(ippdrv, property);
1272 DRM_ERROR("failed to set property.\n");
1273 ippdrv->c_node = NULL;
1278 switch (property->cmd) {
1280 for_each_ipp_ops(i) {
1281 /* source/destination memory list */
1282 head = &c_node->mem_list[i];
1284 m_node = list_first_entry(head,
1285 struct drm_exynos_ipp_mem_node, list);
1287 DRM_ERROR("failed to get node.\n");
1292 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1294 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1296 DRM_ERROR("failed to set m node.\n");
1302 /* destination memory list */
1303 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1305 list_for_each_entry(m_node, head, list) {
1306 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1308 DRM_ERROR("failed to set m node.\n");
1313 case IPP_CMD_OUTPUT:
1314 /* source memory list */
1315 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1317 list_for_each_entry(m_node, head, list) {
1318 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1320 DRM_ERROR("failed to set m node.\n");
1326 DRM_ERROR("invalid operations.\n");
1330 mutex_unlock(&c_node->mem_lock);
1332 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1334 /* start operations */
1335 if (ippdrv->start) {
1336 ret = ippdrv->start(ippdrv->dev, property->cmd);
1338 DRM_ERROR("failed to start ops.\n");
1339 ippdrv->c_node = NULL;
1347 mutex_unlock(&c_node->mem_lock);
1348 ippdrv->c_node = NULL;
1352 static int ipp_stop_property(struct drm_device *drm_dev,
1353 struct exynos_drm_ippdrv *ippdrv,
1354 struct drm_exynos_ipp_cmd_node *c_node)
1356 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1357 struct drm_exynos_ipp_property *property = &c_node->property;
1358 struct list_head *head;
1361 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1364 ipp_put_event(c_node, NULL);
1366 mutex_lock(&c_node->mem_lock);
1369 switch (property->cmd) {
1371 for_each_ipp_ops(i) {
1372 /* source/destination memory list */
1373 head = &c_node->mem_list[i];
1375 list_for_each_entry_safe(m_node, tm_node,
1377 ret = ipp_put_mem_node(drm_dev, c_node,
1380 DRM_ERROR("failed to put m_node.\n");
1387 /* destination memory list */
1388 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1390 list_for_each_entry_safe(m_node, tm_node, head, list) {
1391 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1393 DRM_ERROR("failed to put m_node.\n");
1398 case IPP_CMD_OUTPUT:
1399 /* source memory list */
1400 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1402 list_for_each_entry_safe(m_node, tm_node, head, list) {
1403 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1405 DRM_ERROR("failed to put m_node.\n");
1411 DRM_ERROR("invalid operations.\n");
1417 mutex_unlock(&c_node->mem_lock);
1419 /* stop operations */
1421 ippdrv->stop(ippdrv->dev, property->cmd);
1426 void ipp_sched_cmd(struct work_struct *work)
1428 struct drm_exynos_ipp_cmd_work *cmd_work =
1429 (struct drm_exynos_ipp_cmd_work *)work;
1430 struct exynos_drm_ippdrv *ippdrv;
1431 struct drm_exynos_ipp_cmd_node *c_node;
1432 struct drm_exynos_ipp_property *property;
1435 ippdrv = cmd_work->ippdrv;
1437 DRM_ERROR("invalid ippdrv list.\n");
1441 c_node = cmd_work->c_node;
1443 DRM_ERROR("invalid command node list.\n");
1447 mutex_lock(&c_node->lock);
1449 property = &c_node->property;
1451 switch (cmd_work->ctrl) {
1453 case IPP_CTRL_RESUME:
1454 ret = ipp_start_property(ippdrv, c_node);
1456 DRM_ERROR("failed to start property:prop_id[%d]\n",
1457 c_node->property.prop_id);
1462 * M2M case supports wait_completion of transfer.
1463 * because M2M case supports single unit operation
1464 * with multiple queue.
1465 * M2M need to wait completion of data transfer.
1467 if (ipp_is_m2m_cmd(property->cmd)) {
1468 if (!wait_for_completion_timeout
1469 (&c_node->start_complete, msecs_to_jiffies(200))) {
1470 DRM_ERROR("timeout event:prop_id[%d]\n",
1471 c_node->property.prop_id);
1477 case IPP_CTRL_PAUSE:
1478 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1481 DRM_ERROR("failed to stop property.\n");
1485 complete(&c_node->stop_complete);
1488 DRM_ERROR("unknown control type\n");
1492 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1495 mutex_unlock(&c_node->lock);
1498 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1499 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1501 struct drm_device *drm_dev = ippdrv->drm_dev;
1502 struct drm_exynos_ipp_property *property = &c_node->property;
1503 struct drm_exynos_ipp_mem_node *m_node;
1504 struct drm_exynos_ipp_queue_buf qbuf;
1505 struct drm_exynos_ipp_send_event *e;
1506 struct list_head *head;
1508 unsigned long flags;
1509 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1513 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1516 DRM_ERROR("failed to get drm_dev.\n");
1521 DRM_ERROR("failed to get property.\n");
1525 mutex_lock(&c_node->event_lock);
1526 if (list_empty(&c_node->event_list)) {
1527 DRM_DEBUG_KMS("event list is empty.\n");
1529 goto err_event_unlock;
1532 mutex_lock(&c_node->mem_lock);
1533 if (!ipp_check_mem_list(c_node)) {
1534 DRM_DEBUG_KMS("empty memory.\n");
1536 goto err_mem_unlock;
1540 switch (property->cmd) {
1542 for_each_ipp_ops(i) {
1543 /* source/destination memory list */
1544 head = &c_node->mem_list[i];
1546 m_node = list_first_entry(head,
1547 struct drm_exynos_ipp_mem_node, list);
1549 DRM_ERROR("empty memory node.\n");
1551 goto err_mem_unlock;
1554 tbuf_id[i] = m_node->buf_id;
1555 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1556 i ? "dst" : "src", tbuf_id[i]);
1558 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1560 DRM_ERROR("failed to put m_node.\n");
1564 /* clear buf for finding */
1565 memset(&qbuf, 0x0, sizeof(qbuf));
1566 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1567 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1569 /* get memory node entry */
1570 m_node = ipp_find_mem_node(c_node, &qbuf);
1572 DRM_ERROR("empty memory node.\n");
1574 goto err_mem_unlock;
1577 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1579 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1581 DRM_ERROR("failed to put m_node.\n");
1583 case IPP_CMD_OUTPUT:
1584 /* source memory list */
1585 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1587 m_node = list_first_entry(head,
1588 struct drm_exynos_ipp_mem_node, list);
1590 DRM_ERROR("empty memory node.\n");
1592 goto err_mem_unlock;
1595 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1597 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1599 DRM_ERROR("failed to put m_node.\n");
1602 DRM_ERROR("invalid operations.\n");
1604 goto err_mem_unlock;
1606 mutex_unlock(&c_node->mem_lock);
1608 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1609 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1610 tbuf_id[1], buf_id[1], property->prop_id);
1613 * command node have event list of destination buffer
1614 * If destination buffer enqueue to mem list,
1615 * then we make event and link to event list tail.
1616 * so, we get first event for first enqueued buffer.
1618 e = list_first_entry(&c_node->event_list,
1619 struct drm_exynos_ipp_send_event, base.link);
1621 do_gettimeofday(&now);
1622 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1623 e->event.tv_sec = now.tv_sec;
1624 e->event.tv_usec = now.tv_usec;
1625 e->event.prop_id = property->prop_id;
1627 /* set buffer id about source destination */
1629 e->event.buf_id[i] = tbuf_id[i];
1631 spin_lock_irqsave(&drm_dev->event_lock, flags);
1632 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1633 wake_up_interruptible(&e->base.file_priv->event_wait);
1634 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1635 mutex_unlock(&c_node->event_lock);
1637 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1638 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1643 mutex_unlock(&c_node->mem_lock);
1645 mutex_unlock(&c_node->event_lock);
1649 void ipp_sched_event(struct work_struct *work)
1651 struct drm_exynos_ipp_event_work *event_work =
1652 (struct drm_exynos_ipp_event_work *)work;
1653 struct exynos_drm_ippdrv *ippdrv;
1654 struct drm_exynos_ipp_cmd_node *c_node;
1658 DRM_ERROR("failed to get event_work.\n");
1662 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1664 ippdrv = event_work->ippdrv;
1666 DRM_ERROR("failed to get ipp driver.\n");
1670 c_node = ippdrv->c_node;
1672 DRM_ERROR("failed to get command node.\n");
1677 * IPP supports command thread, event thread synchronization.
1678 * If IPP close immediately from user land, then IPP make
1679 * synchronization with command thread, so make complete event.
1680 * or going out operations.
1682 if (c_node->state != IPP_STATE_START) {
1683 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1684 c_node->state, c_node->property.prop_id);
1685 goto err_completion;
1688 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1690 DRM_ERROR("failed to send event.\n");
1691 goto err_completion;
1695 if (ipp_is_m2m_cmd(c_node->property.cmd))
1696 complete(&c_node->start_complete);
1699 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1701 struct ipp_context *ctx = get_ipp_context(dev);
1702 struct exynos_drm_ippdrv *ippdrv;
1705 /* get ipp driver entry */
1706 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1709 ippdrv->drm_dev = drm_dev;
1711 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1713 if (ret || ipp_id == 0) {
1714 DRM_ERROR("failed to create id.\n");
1718 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1719 count++, (int)ippdrv, ipp_id);
1721 ippdrv->prop_list.ipp_id = ipp_id;
1723 /* store parent device for node */
1724 ippdrv->parent_dev = dev;
1726 /* store event work queue and handler */
1727 ippdrv->event_workq = ctx->event_workq;
1728 ippdrv->sched_event = ipp_sched_event;
1729 INIT_LIST_HEAD(&ippdrv->cmd_list);
1730 mutex_init(&ippdrv->cmd_lock);
1732 if (is_drm_iommu_supported(drm_dev)) {
1733 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1735 DRM_ERROR("failed to activate iommu\n");
1744 /* get ipp driver entry */
1745 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1747 if (is_drm_iommu_supported(drm_dev))
1748 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1750 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1751 ippdrv->prop_list.ipp_id);
1757 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1759 struct exynos_drm_ippdrv *ippdrv;
1760 struct ipp_context *ctx = get_ipp_context(dev);
1762 /* get ipp driver entry */
1763 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1764 if (is_drm_iommu_supported(drm_dev))
1765 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1767 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1768 ippdrv->prop_list.ipp_id);
1770 ippdrv->drm_dev = NULL;
1771 exynos_drm_ippdrv_unregister(ippdrv);
1775 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1776 struct drm_file *file)
1778 struct drm_exynos_file_private *file_priv = file->driver_priv;
1779 struct exynos_drm_ipp_private *priv;
1781 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1785 file_priv->ipp_priv = priv;
1787 INIT_LIST_HEAD(&priv->event_list);
1789 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1794 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1795 struct drm_file *file)
1797 struct drm_exynos_file_private *file_priv = file->driver_priv;
1798 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1799 struct exynos_drm_ippdrv *ippdrv = NULL;
1800 struct ipp_context *ctx = get_ipp_context(dev);
1801 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1804 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1806 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1807 mutex_lock(&ippdrv->cmd_lock);
1808 list_for_each_entry_safe(c_node, tc_node,
1809 &ippdrv->cmd_list, list) {
1810 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1811 count++, (int)ippdrv);
1813 if (c_node->priv == priv) {
1815 * userland goto unnormal state. process killed.
1816 * and close the file.
1817 * so, IPP didn't called stop cmd ctrl.
1818 * so, we are make stop operation in this state.
1820 if (c_node->state == IPP_STATE_START) {
1821 ipp_stop_property(drm_dev, ippdrv,
1823 c_node->state = IPP_STATE_STOP;
1826 ippdrv->dedicated = false;
1827 ipp_clean_cmd_node(ctx, c_node);
1828 if (list_empty(&ippdrv->cmd_list))
1829 pm_runtime_put_sync(ippdrv->dev);
1832 mutex_unlock(&ippdrv->cmd_lock);
1839 static int ipp_probe(struct platform_device *pdev)
1841 struct device *dev = &pdev->dev;
1842 struct ipp_context *ctx;
1843 struct exynos_drm_subdrv *subdrv;
1846 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1850 mutex_init(&ctx->ipp_lock);
1851 mutex_init(&ctx->prop_lock);
1853 idr_init(&ctx->ipp_idr);
1854 idr_init(&ctx->prop_idr);
1857 * create single thread for ipp event
1858 * IPP supports event thread for IPP drivers.
1859 * IPP driver send event_work to this thread.
1860 * and IPP event thread send event to user process.
1862 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1863 if (!ctx->event_workq) {
1864 dev_err(dev, "failed to create event workqueue\n");
1869 * create single thread for ipp command
1870 * IPP supports command thread for user process.
1871 * user process make command node using set property ioctl.
1872 * and make start_work and send this work to command thread.
1873 * and then this command thread start property.
1875 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1876 if (!ctx->cmd_workq) {
1877 dev_err(dev, "failed to create cmd workqueue\n");
1879 goto err_event_workq;
1882 /* set sub driver informations */
1883 subdrv = &ctx->subdrv;
1885 subdrv->probe = ipp_subdrv_probe;
1886 subdrv->remove = ipp_subdrv_remove;
1887 subdrv->open = ipp_subdrv_open;
1888 subdrv->close = ipp_subdrv_close;
1890 platform_set_drvdata(pdev, ctx);
1892 ret = exynos_drm_subdrv_register(subdrv);
1894 DRM_ERROR("failed to register drm ipp device.\n");
1898 dev_info(dev, "drm ipp registered successfully.\n");
1903 destroy_workqueue(ctx->cmd_workq);
1905 destroy_workqueue(ctx->event_workq);
1909 static int ipp_remove(struct platform_device *pdev)
1911 struct ipp_context *ctx = platform_get_drvdata(pdev);
1913 /* unregister sub driver */
1914 exynos_drm_subdrv_unregister(&ctx->subdrv);
1916 /* remove,destroy ipp idr */
1917 idr_destroy(&ctx->ipp_idr);
1918 idr_destroy(&ctx->prop_idr);
1920 mutex_destroy(&ctx->ipp_lock);
1921 mutex_destroy(&ctx->prop_lock);
1923 /* destroy command, event work queue */
1924 destroy_workqueue(ctx->cmd_workq);
1925 destroy_workqueue(ctx->event_workq);
1930 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1932 DRM_DEBUG_KMS("enable[%d]\n", enable);
1937 #ifdef CONFIG_PM_SLEEP
1938 static int ipp_suspend(struct device *dev)
1940 struct ipp_context *ctx = get_ipp_context(dev);
1942 if (pm_runtime_suspended(dev))
1945 return ipp_power_ctrl(ctx, false);
1948 static int ipp_resume(struct device *dev)
1950 struct ipp_context *ctx = get_ipp_context(dev);
1952 if (!pm_runtime_suspended(dev))
1953 return ipp_power_ctrl(ctx, true);
1959 #ifdef CONFIG_PM_RUNTIME
1960 static int ipp_runtime_suspend(struct device *dev)
1962 struct ipp_context *ctx = get_ipp_context(dev);
1964 return ipp_power_ctrl(ctx, false);
1967 static int ipp_runtime_resume(struct device *dev)
1969 struct ipp_context *ctx = get_ipp_context(dev);
1971 return ipp_power_ctrl(ctx, true);
1975 static const struct dev_pm_ops ipp_pm_ops = {
1976 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1977 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1980 struct platform_driver ipp_driver = {
1982 .remove = ipp_remove,
1984 .name = "exynos-drm-ipp",
1985 .owner = THIS_MODULE,