2 * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief This file contains the IPU driver common API functions.
21 #include <linux/clk.h>
22 #include <linux/clk-provider.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
28 #include <linux/ipu-v3.h>
29 #include <linux/irq.h>
30 #include <linux/irqdesc.h>
31 #include <linux/module.h>
32 #include <linux/mod_devicetable.h>
33 #include <linux/of_device.h>
34 #include <linux/platform_device.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/reset.h>
37 #include <linux/spinlock.h>
38 #include <linux/types.h>
40 #include <asm/cacheflush.h>
42 #include "ipu_param_mem.h"
45 static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
48 /* Static functions */
49 static irqreturn_t ipu_sync_irq_handler(int irq, void *desc);
50 static irqreturn_t ipu_err_irq_handler(int irq, void *desc);
52 static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
54 return ((uint32_t) ch >> (6 * type)) & 0x3F;
57 static inline int _ipu_is_ic_chan(uint32_t dma_chan)
59 return (((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) &&
63 static inline int _ipu_is_vdi_out_chan(uint32_t dma_chan)
65 return (dma_chan == 5);
68 static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
70 return (dma_chan == 14 || dma_chan == 15);
73 /* Either DP BG or DP FG can be graphic window */
74 static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
76 return (dma_chan == 23 || dma_chan == 27);
79 static inline int _ipu_is_irt_chan(uint32_t dma_chan)
81 return ((dma_chan >= 45) && (dma_chan <= 50));
84 static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
86 return ((dma_chan >= 23) && (dma_chan <= 29));
89 static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
91 return ((dma_chan >= 0) && (dma_chan <= 3));
94 static inline int _ipu_is_trb_chan(uint32_t dma_chan)
96 return (((dma_chan == 8) || (dma_chan == 9) ||
97 (dma_chan == 10) || (dma_chan == 13) ||
98 (dma_chan == 21) || (dma_chan == 23) ||
99 (dma_chan == 27) || (dma_chan == 28)) &&
100 (g_ipu_hw_rev >= IPU_V3DEX));
104 * We usually use IDMAC 23 as full plane and IDMAC 27 as partial
106 * IDMAC 23/24/28/41 can drive a display respectively - primary
107 * IDMAC 27 depends on IDMAC 23 - nonprimary
109 static inline int _ipu_is_primary_disp_chan(uint32_t dma_chan)
111 return ((dma_chan == 23) || (dma_chan == 24) ||
112 (dma_chan == 28) || (dma_chan == 41));
115 static inline int _ipu_is_sync_irq(uint32_t irq)
117 /* sync interrupt register number */
118 int reg_num = irq / 32 + 1;
120 return ((reg_num == 1) || (reg_num == 2) || (reg_num == 3) ||
121 (reg_num == 4) || (reg_num == 7) || (reg_num == 8) ||
122 (reg_num == 11) || (reg_num == 12) || (reg_num == 13) ||
123 (reg_num == 14) || (reg_num == 15));
126 #define idma_is_valid(ch) (ch != NO_DMA)
127 #define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
128 #define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
129 #define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
130 #define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
132 static int ipu_clk_setup_enable(struct ipu_soc *ipu,
133 struct ipu_pltfm_data *pdata)
135 char pixel_clk_0[] = "ipu1_pclk_0";
136 char pixel_clk_1[] = "ipu1_pclk_1";
137 char pixel_clk_0_sel[] = "ipu1_pclk0_sel";
138 char pixel_clk_1_sel[] = "ipu1_pclk1_sel";
139 char pixel_clk_0_div[] = "ipu1_pclk0_div";
140 char pixel_clk_1_div[] = "ipu1_pclk1_div";
141 char *ipu_pixel_clk_sel[] = { "ipu1", "ipu1_di0", "ipu1_di1", };
147 pixel_clk_0[3] += pdata->id;
148 pixel_clk_1[3] += pdata->id;
149 pixel_clk_0_sel[3] += pdata->id;
150 pixel_clk_1_sel[3] += pdata->id;
151 pixel_clk_0_div[3] += pdata->id;
152 pixel_clk_1_div[3] += pdata->id;
153 for (i = 0; i < ARRAY_SIZE(ipu_pixel_clk_sel); i++) {
154 pclk_sel = ipu_pixel_clk_sel[i];
155 pclk_sel[3] += pdata->id;
157 dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
159 clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_0_sel,
160 (const char **)ipu_pixel_clk_sel,
161 ARRAY_SIZE(ipu_pixel_clk_sel),
164 dev_err(ipu->dev, "clk_register mux di0 failed");
167 ipu->pixel_clk_sel[0] = clk;
168 clk = clk_register_mux_pix_clk(ipu->dev, pixel_clk_1_sel,
169 (const char **)ipu_pixel_clk_sel,
170 ARRAY_SIZE(ipu_pixel_clk_sel),
173 dev_err(ipu->dev, "clk_register mux di1 failed");
176 ipu->pixel_clk_sel[1] = clk;
178 clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_0_div,
179 pixel_clk_0_sel, 0, pdata->id, 0, 0);
181 dev_err(ipu->dev, "clk register di0 div failed");
184 clk = clk_register_div_pix_clk(ipu->dev, pixel_clk_1_div,
185 pixel_clk_1_sel, CLK_SET_RATE_PARENT, pdata->id, 1, 0);
187 dev_err(ipu->dev, "clk register di1 div failed");
191 ipu->pixel_clk[0] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_0,
192 pixel_clk_0_div, CLK_SET_RATE_PARENT,
194 if (IS_ERR(ipu->pixel_clk[0])) {
195 dev_err(ipu->dev, "clk register di0 gate failed");
196 return PTR_ERR(ipu->pixel_clk[0]);
198 ipu->pixel_clk[1] = clk_register_gate_pix_clk(ipu->dev, pixel_clk_1,
199 pixel_clk_1_div, CLK_SET_RATE_PARENT,
201 if (IS_ERR(ipu->pixel_clk[1])) {
202 dev_err(ipu->dev, "clk register di1 gate failed");
203 return PTR_ERR(ipu->pixel_clk[1]);
206 ret = clk_set_parent(ipu->pixel_clk_sel[0], ipu->ipu_clk);
208 dev_err(ipu->dev, "clk set parent failed");
212 ret = clk_set_parent(ipu->pixel_clk_sel[1], ipu->ipu_clk);
214 dev_err(ipu->dev, "clk set parent failed");
218 ipu->di_clk[0] = devm_clk_get(ipu->dev, "di0");
219 if (IS_ERR(ipu->di_clk[0])) {
220 dev_err(ipu->dev, "clk_get di0 failed");
221 return PTR_ERR(ipu->di_clk[0]);
223 ipu->di_clk[1] = devm_clk_get(ipu->dev, "di1");
224 if (IS_ERR(ipu->di_clk[1])) {
225 dev_err(ipu->dev, "clk_get di1 failed");
226 return PTR_ERR(ipu->di_clk[1]);
229 ipu->di_clk_sel[0] = devm_clk_get(ipu->dev, "di0_sel");
230 if (IS_ERR(ipu->di_clk_sel[0])) {
231 dev_err(ipu->dev, "clk_get di0_sel failed");
232 return PTR_ERR(ipu->di_clk_sel[0]);
234 ipu->di_clk_sel[1] = devm_clk_get(ipu->dev, "di1_sel");
235 if (IS_ERR(ipu->di_clk_sel[1])) {
236 dev_err(ipu->dev, "clk_get di1_sel failed");
237 return PTR_ERR(ipu->di_clk_sel[1]);
243 static int ipu_mem_reset(struct ipu_soc *ipu)
247 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
249 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
258 struct ipu_soc *ipu_get_soc(int id)
260 if (id >= MXC_IPU_MAX_NUM)
261 return ERR_PTR(-ENODEV);
262 else if (!ipu_array[id].online)
263 return ERR_PTR(-ENODEV);
265 return &(ipu_array[id]);
267 EXPORT_SYMBOL_GPL(ipu_get_soc);
269 void _ipu_get(struct ipu_soc *ipu)
273 ret = clk_enable(ipu->ipu_clk);
278 void _ipu_put(struct ipu_soc *ipu)
280 clk_disable(ipu->ipu_clk);
283 void ipu_disable_hsp_clk(struct ipu_soc *ipu)
287 EXPORT_SYMBOL(ipu_disable_hsp_clk);
289 static struct platform_device_id imx_ipu_type[] = {
292 .driver_data = IPU_V3H,
297 MODULE_DEVICE_TABLE(platform, imx_ipu_type);
299 static const struct of_device_id imx_ipuv3_dt_ids[] = {
300 { .compatible = "fsl,imx6q-ipu", .data = &imx_ipu_type[IMX6Q_IPU], },
303 MODULE_DEVICE_TABLE(of, imx_ipuv3_dt_ids);
306 * This function is called by the driver framework to initialize the IPU
309 * @param dev The device structure for the IPU passed in by the
312 * @return Returns 0 on success or negative error code on error
314 static int ipu_probe(struct platform_device *pdev)
317 struct resource *res;
318 unsigned long ipu_base;
319 const struct of_device_id *of_id =
320 of_match_device(imx_ipuv3_dt_ids, &pdev->dev);
321 struct ipu_pltfm_data *pltfm_data;
325 dev_dbg(&pdev->dev, "<%s>\n", __func__);
327 pltfm_data = devm_kzalloc(&pdev->dev, sizeof(struct ipu_pltfm_data),
332 ret = of_property_read_u32(pdev->dev.of_node,
333 "bypass_reset", &bypass_reset);
335 dev_dbg(&pdev->dev, "can not get bypass_reset\n");
338 pltfm_data->bypass_reset = (bool)bypass_reset;
340 pltfm_data->id = of_alias_get_id(pdev->dev.of_node, "ipu");
341 if (pltfm_data->id < 0) {
342 dev_dbg(&pdev->dev, "can not get alias id\n");
343 return pltfm_data->id;
347 pdev->id_entry = of_id->data;
348 pltfm_data->devtype = pdev->id_entry->driver_data;
349 g_ipu_hw_rev = pltfm_data->devtype;
351 ipu = &ipu_array[pltfm_data->id];
352 memset(ipu, 0, sizeof(struct ipu_soc));
353 ipu->dev = &pdev->dev;
354 ipu->pdata = pltfm_data;
355 dev_dbg(ipu->dev, "IPU rev:%d\n", g_ipu_hw_rev);
356 spin_lock_init(&ipu->int_reg_spin_lock);
357 spin_lock_init(&ipu->rdy_reg_spin_lock);
358 mutex_init(&ipu->mutex_lock);
360 ipu->irq_sync = platform_get_irq(pdev, 0);
361 ipu->irq_err = platform_get_irq(pdev, 1);
362 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
364 if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
365 dev_err(&pdev->dev, "can't get device resources\n");
369 if (!devm_request_mem_region(&pdev->dev, res->start,
370 resource_size(res), pdev->name))
373 ret = devm_request_irq(&pdev->dev, ipu->irq_sync,
374 ipu_sync_irq_handler, 0, pdev->name, ipu);
376 dev_err(ipu->dev, "request SYNC interrupt failed\n");
379 ret = devm_request_irq(&pdev->dev, ipu->irq_err,
380 ipu_err_irq_handler, 0, pdev->name, ipu);
382 dev_err(ipu->dev, "request ERR interrupt failed\n");
386 ipu_base = res->start;
388 if (g_ipu_hw_rev == IPU_V3H) /* IPUv3H */
389 ipu_base += IPUV3H_REG_BASE;
390 else if (g_ipu_hw_rev == IPU_V3M) /* IPUv3M */
391 ipu_base += IPUV3M_REG_BASE;
392 else /* IPUv3D, v3E, v3EX */
393 ipu_base += IPUV3DEX_REG_BASE;
395 ipu->cm_reg = devm_ioremap(&pdev->dev,
396 ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
397 ipu->ic_reg = devm_ioremap(&pdev->dev,
398 ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
399 ipu->idmac_reg = devm_ioremap(&pdev->dev,
400 ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
401 /* DP Registers are accessed thru the SRM */
402 ipu->dp_reg = devm_ioremap(&pdev->dev,
403 ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
404 ipu->dc_reg = devm_ioremap(&pdev->dev,
405 ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
406 ipu->dmfc_reg = devm_ioremap(&pdev->dev,
407 ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
408 ipu->di_reg[0] = devm_ioremap(&pdev->dev,
409 ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
410 ipu->di_reg[1] = devm_ioremap(&pdev->dev,
411 ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
412 ipu->smfc_reg = devm_ioremap(&pdev->dev,
413 ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
414 ipu->csi_reg[0] = devm_ioremap(&pdev->dev,
415 ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
416 ipu->csi_reg[1] = devm_ioremap(&pdev->dev,
417 ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
418 ipu->cpmem_base = devm_ioremap(&pdev->dev,
419 ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
420 ipu->tpmem_base = devm_ioremap(&pdev->dev,
421 ipu_base + IPU_TPM_REG_BASE, SZ_64K);
422 ipu->dc_tmpl_reg = devm_ioremap(&pdev->dev,
423 ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
424 ipu->vdi_reg = devm_ioremap(&pdev->dev,
425 ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
426 ipu->disp_base[1] = devm_ioremap(&pdev->dev,
427 ipu_base + IPU_DISP1_BASE, SZ_4K);
428 if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
429 !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
430 !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
431 !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
432 !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
436 dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
437 dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
438 dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
439 dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
440 dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
441 dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
442 dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
443 dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
444 dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
445 dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
446 dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
447 dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
448 dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
449 dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
450 dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
451 dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
453 ipu->ipu_clk = devm_clk_get(ipu->dev, "bus");
454 if (IS_ERR(ipu->ipu_clk)) {
455 dev_err(ipu->dev, "clk_get ipu failed");
456 return PTR_ERR(ipu->ipu_clk);
459 /* ipu_clk is always prepared */
460 ret = clk_prepare_enable(ipu->ipu_clk);
462 dev_err(ipu->dev, "ipu clk enable failed\n");
468 platform_set_drvdata(pdev, ipu);
470 if (!pltfm_data->bypass_reset) {
471 ret = device_reset(&pdev->dev);
473 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
481 /* Set MCU_T to divide MCU access window into 2 */
482 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
486 /* setup ipu clk tree after ipu reset */
487 ret = ipu_clk_setup_enable(ipu, pltfm_data);
489 dev_err(ipu->dev, "ipu clk setup failed\n");
494 /* Set sync refresh channels and CSI->mem channel as high priority */
495 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
497 /* Enable error interrupts by default */
498 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
499 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
500 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
501 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
503 if (!pltfm_data->bypass_reset)
504 clk_disable(ipu->ipu_clk);
506 register_ipu_device(ipu, ipu->pdata->id);
508 pm_runtime_enable(&pdev->dev);
513 int ipu_remove(struct platform_device *pdev)
515 struct ipu_soc *ipu = platform_get_drvdata(pdev);
517 unregister_ipu_device(ipu, ipu->pdata->id);
519 clk_put(ipu->ipu_clk);
524 void ipu_dump_registers(struct ipu_soc *ipu)
526 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
527 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
528 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
529 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
530 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
531 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
532 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
533 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
534 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
535 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
536 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
537 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
538 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
539 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
540 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
541 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
542 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
543 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
544 if (g_ipu_hw_rev >= IPU_V3DEX) {
545 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
546 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
547 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
548 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
550 dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
551 ipu_dmfc_read(ipu, DMFC_WR_CHAN));
552 dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
553 ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
554 dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
555 ipu_dmfc_read(ipu, DMFC_DP_CHAN));
556 dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
557 ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
558 dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
559 ipu_dmfc_read(ipu, DMFC_IC_CTRL));
560 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
561 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
562 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
563 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
564 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
565 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
566 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
567 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
568 dev_dbg(ipu->dev, "IPU_VDIC_VDI_FSIZE = \t0x%08X\n",
569 ipu_vdi_read(ipu, VDI_FSIZE));
570 dev_dbg(ipu->dev, "IPU_VDIC_VDI_C = \t0x%08X\n",
571 ipu_vdi_read(ipu, VDI_C));
572 dev_dbg(ipu->dev, "IPU_IC_CONF = \t0x%08X\n",
573 ipu_ic_read(ipu, IC_CONF));
577 * This function is called to initialize a logical IPU channel.
579 * @param ipu ipu handler
580 * @param channel Input parameter for the logical channel ID to init.
582 * @param params Input parameter containing union of channel
583 * initialization parameters.
585 * @return Returns 0 on success or negative error code on fail
587 int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
593 dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
595 ret = pm_runtime_get_sync(ipu->dev);
597 dev_err(ipu->dev, "ch = %d, pm_runtime_get failed:%d!\n",
598 IPU_CHAN_ID(channel), ret);
603 * Here, ret could be 1 if the device's runtime PM status was
604 * already 'active', so clear it to be 0.
610 mutex_lock(&ipu->mutex_lock);
612 /* Re-enable error interrupts every time a channel is initialized */
613 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
614 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
615 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
616 ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
618 if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
619 dev_warn(ipu->dev, "Warning: channel already initialized %d\n",
620 IPU_CHAN_ID(channel));
623 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
630 if (params->csi_mem.csi > 1) {
635 if (params->csi_mem.interlaced)
636 ipu->chan_is_interlaced[channel_2_dma(channel,
637 IPU_OUTPUT_BUFFER)] = true;
639 ipu->chan_is_interlaced[channel_2_dma(channel,
640 IPU_OUTPUT_BUFFER)] = false;
642 ipu->smfc_use_count++;
643 ipu->csi_channel[params->csi_mem.csi] = channel;
646 if (params->csi_mem.mipi_en) {
647 ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
648 params->csi_mem.csi));
649 _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_vc,
650 params->csi_mem.csi);
651 _ipu_csi_set_mipi_di(ipu, params->csi_mem.mipi_vc,
652 params->csi_mem.mipi_id, params->csi_mem.csi);
654 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
655 params->csi_mem.csi));
656 _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
659 /*CSI data (include compander) dest*/
660 _ipu_csi_init(ipu, channel, params->csi_mem.csi);
662 case CSI_PRP_ENC_MEM:
663 if (params->csi_prp_enc_mem.csi > 1) {
667 if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
668 (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
672 ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
675 ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
677 if (params->csi_prp_enc_mem.mipi_en) {
678 ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
679 params->csi_prp_enc_mem.csi));
680 _ipu_csi_set_mipi_di(ipu,
681 params->csi_prp_enc_mem.mipi_vc,
682 params->csi_prp_enc_mem.mipi_id,
683 params->csi_prp_enc_mem.csi);
685 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
686 params->csi_prp_enc_mem.csi));
688 /*CSI0/1 feed into IC*/
689 ipu_conf &= ~IPU_CONF_IC_INPUT;
690 if (params->csi_prp_enc_mem.csi)
691 ipu_conf |= IPU_CONF_CSI_SEL;
693 ipu_conf &= ~IPU_CONF_CSI_SEL;
695 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
696 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
697 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
699 /*CSI data (include compander) dest*/
700 _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
701 _ipu_ic_init_prpenc(ipu, params, true);
704 if (params->csi_prp_vf_mem.csi > 1) {
708 if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
709 (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
713 ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
716 ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
718 if (params->csi_prp_vf_mem.mipi_en) {
719 ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
720 params->csi_prp_vf_mem.csi));
721 _ipu_csi_set_mipi_di(ipu,
722 params->csi_prp_vf_mem.mipi_vc,
723 params->csi_prp_vf_mem.mipi_id,
724 params->csi_prp_vf_mem.csi);
726 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
727 params->csi_prp_vf_mem.csi));
729 /*CSI0/1 feed into IC*/
730 ipu_conf &= ~IPU_CONF_IC_INPUT;
731 if (params->csi_prp_vf_mem.csi)
732 ipu_conf |= IPU_CONF_CSI_SEL;
734 ipu_conf &= ~IPU_CONF_CSI_SEL;
736 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
737 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
738 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
740 /*CSI data (include compander) dest*/
741 _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
742 _ipu_ic_init_prpvf(ipu, params, true);
746 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
747 ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
749 if (params->mem_prp_vf_mem.graphics_combine_en)
750 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
751 if (params->mem_prp_vf_mem.alpha_chan_en)
752 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
754 _ipu_ic_init_prpvf(ipu, params, false);
756 case MEM_VDI_PRP_VF_MEM:
757 if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
758 (ipu->using_ic_dirct_ch == MEM_VDI_MEM) ||
759 (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
763 ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
765 ipu->vdi_use_count++;
766 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
767 reg &= ~FS_VDI_SRC_SEL_MASK;
768 ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
770 if (params->mem_prp_vf_mem.graphics_combine_en)
771 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
772 _ipu_ic_init_prpvf(ipu, params, false);
773 _ipu_vdi_init(ipu, channel, params);
775 case MEM_VDI_PRP_VF_MEM_P:
776 case MEM_VDI_PRP_VF_MEM_N:
779 _ipu_vdi_init(ipu, channel, params);
782 if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
783 (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
784 (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
788 ipu->using_ic_dirct_ch = MEM_VDI_MEM;
790 ipu->vdi_use_count++;
791 _ipu_ic_init_prpvf(ipu, params, false);
792 _ipu_vdi_init(ipu, channel, params);
796 ipu->rot_use_count++;
797 _ipu_ic_init_rotate_vf(ipu, params);
799 case MEM_PRP_ENC_MEM:
801 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
802 ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
803 _ipu_ic_init_prpenc(ipu, params, false);
805 case MEM_ROT_ENC_MEM:
807 ipu->rot_use_count++;
808 _ipu_ic_init_rotate_enc(ipu, params);
811 if (params->mem_pp_mem.graphics_combine_en)
812 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
813 if (params->mem_pp_mem.alpha_chan_en)
814 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
815 _ipu_ic_init_pp(ipu, params);
819 _ipu_ic_init_rotate_pp(ipu, params);
821 ipu->rot_use_count++;
824 if (params->mem_dc_sync.di > 1) {
829 ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
830 _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
831 params->mem_dc_sync.interlaced,
832 params->mem_dc_sync.out_pixel_fmt);
833 ipu->di_use_count[params->mem_dc_sync.di]++;
835 ipu->dmfc_use_count++;
838 if (params->mem_dp_bg_sync.di > 1) {
843 if (params->mem_dp_bg_sync.alpha_chan_en)
844 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
846 ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
847 _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
848 params->mem_dp_bg_sync.out_pixel_fmt);
849 _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
850 params->mem_dp_bg_sync.interlaced,
851 params->mem_dp_bg_sync.out_pixel_fmt);
852 ipu->di_use_count[params->mem_dp_bg_sync.di]++;
855 ipu->dmfc_use_count++;
858 _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
859 params->mem_dp_fg_sync.out_pixel_fmt);
861 if (params->mem_dp_fg_sync.alpha_chan_en)
862 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
866 ipu->dmfc_use_count++;
869 if (params->direct_async.di > 1) {
874 ipu->dc_di_assignment[8] = params->direct_async.di;
875 _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
876 ipu->di_use_count[params->direct_async.di]++;
880 if (params->direct_async.di > 1) {
885 ipu->dc_di_assignment[9] = params->direct_async.di;
886 _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
887 ipu->di_use_count[params->direct_async.di]++;
891 dev_err(ipu->dev, "Missing channel initialization\n");
895 ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
897 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
900 mutex_unlock(&ipu->mutex_lock);
903 EXPORT_SYMBOL(ipu_init_channel);
906 * This function is called to uninitialize a logical IPU channel.
908 * @param ipu ipu handler
909 * @param channel Input parameter for the logical channel ID to uninit.
911 void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
914 uint32_t in_dma, out_dma = 0;
916 uint32_t dc_chan = 0;
919 mutex_lock(&ipu->mutex_lock);
921 if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
922 dev_dbg(ipu->dev, "Channel already uninitialized %d\n",
923 IPU_CHAN_ID(channel));
924 mutex_unlock(&ipu->mutex_lock);
928 /* Make sure channel is disabled */
929 /* Get input and output dma channels */
930 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
931 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
933 if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
934 idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
936 "Channel %d is not disabled, disable first\n",
937 IPU_CHAN_ID(channel));
938 mutex_unlock(&ipu->mutex_lock);
942 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
944 /* Reset the double buffer */
945 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
946 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
947 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
948 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
950 /* Reset the triple buffer */
951 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
952 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
953 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
954 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
956 if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
957 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
958 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
966 ipu->smfc_use_count--;
967 if (ipu->csi_channel[0] == channel) {
968 ipu->csi_channel[0] = CHAN_NONE;
969 } else if (ipu->csi_channel[1] == channel) {
970 ipu->csi_channel[1] = CHAN_NONE;
973 case CSI_PRP_ENC_MEM:
975 if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
976 ipu->using_ic_dirct_ch = 0;
977 _ipu_ic_uninit_prpenc(ipu);
978 if (ipu->csi_channel[0] == channel) {
979 ipu->csi_channel[0] = CHAN_NONE;
980 } else if (ipu->csi_channel[1] == channel) {
981 ipu->csi_channel[1] = CHAN_NONE;
986 if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
987 ipu->using_ic_dirct_ch = 0;
988 _ipu_ic_uninit_prpvf(ipu);
989 if (ipu->csi_channel[0] == channel) {
990 ipu->csi_channel[0] = CHAN_NONE;
991 } else if (ipu->csi_channel[1] == channel) {
992 ipu->csi_channel[1] = CHAN_NONE;
997 _ipu_ic_uninit_prpvf(ipu);
998 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
999 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1001 case MEM_VDI_PRP_VF_MEM:
1002 ipu->ic_use_count--;
1003 ipu->vdi_use_count--;
1004 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
1005 ipu->using_ic_dirct_ch = 0;
1006 _ipu_ic_uninit_prpvf(ipu);
1007 _ipu_vdi_uninit(ipu);
1008 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1009 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1012 ipu->ic_use_count--;
1013 ipu->vdi_use_count--;
1014 if (ipu->using_ic_dirct_ch == MEM_VDI_MEM)
1015 ipu->using_ic_dirct_ch = 0;
1016 _ipu_ic_uninit_prpvf(ipu);
1017 _ipu_vdi_uninit(ipu);
1019 case MEM_VDI_PRP_VF_MEM_P:
1020 case MEM_VDI_PRP_VF_MEM_N:
1024 case MEM_ROT_VF_MEM:
1025 ipu->rot_use_count--;
1026 ipu->ic_use_count--;
1027 _ipu_ic_uninit_rotate_vf(ipu);
1029 case MEM_PRP_ENC_MEM:
1030 ipu->ic_use_count--;
1031 _ipu_ic_uninit_prpenc(ipu);
1032 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1033 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
1035 case MEM_ROT_ENC_MEM:
1036 ipu->rot_use_count--;
1037 ipu->ic_use_count--;
1038 _ipu_ic_uninit_rotate_enc(ipu);
1041 ipu->ic_use_count--;
1042 _ipu_ic_uninit_pp(ipu);
1044 case MEM_ROT_PP_MEM:
1045 ipu->rot_use_count--;
1046 ipu->ic_use_count--;
1047 _ipu_ic_uninit_rotate_pp(ipu);
1051 _ipu_dc_uninit(ipu, 1);
1052 ipu->di_use_count[ipu->dc_di_assignment[1]]--;
1053 ipu->dc_use_count--;
1054 ipu->dmfc_use_count--;
1058 _ipu_dp_uninit(ipu, channel);
1059 _ipu_dc_uninit(ipu, 5);
1060 ipu->di_use_count[ipu->dc_di_assignment[5]]--;
1061 ipu->dc_use_count--;
1062 ipu->dp_use_count--;
1063 ipu->dmfc_use_count--;
1066 _ipu_dp_uninit(ipu, channel);
1067 ipu->dc_use_count--;
1068 ipu->dp_use_count--;
1069 ipu->dmfc_use_count--;
1073 _ipu_dc_uninit(ipu, 8);
1074 ipu->di_use_count[ipu->dc_di_assignment[8]]--;
1075 ipu->dc_use_count--;
1079 _ipu_dc_uninit(ipu, 9);
1080 ipu->di_use_count[ipu->dc_di_assignment[9]]--;
1081 ipu->dc_use_count--;
1087 if (ipu->ic_use_count == 0)
1088 ipu_conf &= ~IPU_CONF_IC_EN;
1089 if (ipu->vdi_use_count == 0) {
1090 ipu_conf &= ~IPU_CONF_ISP_EN;
1091 ipu_conf &= ~IPU_CONF_VDI_EN;
1092 ipu_conf &= ~IPU_CONF_IC_INPUT;
1094 if (ipu->rot_use_count == 0)
1095 ipu_conf &= ~IPU_CONF_ROT_EN;
1096 if (ipu->dc_use_count == 0)
1097 ipu_conf &= ~IPU_CONF_DC_EN;
1098 if (ipu->dp_use_count == 0)
1099 ipu_conf &= ~IPU_CONF_DP_EN;
1100 if (ipu->dmfc_use_count == 0)
1101 ipu_conf &= ~IPU_CONF_DMFC_EN;
1102 if (ipu->di_use_count[0] == 0) {
1103 ipu_conf &= ~IPU_CONF_DI0_EN;
1105 if (ipu->di_use_count[1] == 0) {
1106 ipu_conf &= ~IPU_CONF_DI1_EN;
1108 if (ipu->smfc_use_count == 0)
1109 ipu_conf &= ~IPU_CONF_SMFC_EN;
1111 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
1113 ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
1116 * Disable pixel clk and its parent clock(if the parent clock
1117 * usecount is 1) after clearing DC/DP/DI bits in IPU_CONF
1118 * register to prevent LVDS display channel starvation.
1120 if (_ipu_is_primary_disp_chan(in_dma))
1121 clk_disable_unprepare(ipu->pixel_clk[ipu->dc_di_assignment[dc_chan]]);
1123 mutex_unlock(&ipu->mutex_lock);
1127 ret = pm_runtime_put_sync_suspend(ipu->dev);
1129 dev_err(ipu->dev, "ch = %d, pm_runtime_put failed:%d!\n",
1130 IPU_CHAN_ID(channel), ret);
1134 WARN_ON(ipu->ic_use_count < 0);
1135 WARN_ON(ipu->vdi_use_count < 0);
1136 WARN_ON(ipu->rot_use_count < 0);
1137 WARN_ON(ipu->dc_use_count < 0);
1138 WARN_ON(ipu->dp_use_count < 0);
1139 WARN_ON(ipu->dmfc_use_count < 0);
1140 WARN_ON(ipu->smfc_use_count < 0);
1142 EXPORT_SYMBOL(ipu_uninit_channel);
1145 * This function is called to initialize buffer(s) for logical IPU channel.
1147 * @param ipu ipu handler
1149 * @param channel Input parameter for the logical channel ID.
1151 * @param type Input parameter which buffer to initialize.
1153 * @param pixel_fmt Input parameter for pixel format of buffer.
1154 * Pixel format is a FOURCC ASCII code.
1156 * @param width Input parameter for width of buffer in pixels.
1158 * @param height Input parameter for height of buffer in pixels.
1160 * @param stride Input parameter for stride length of buffer
1163 * @param rot_mode Input parameter for rotation setting of buffer.
1164 * A rotation setting other than
1165 * IPU_ROTATE_VERT_FLIP
1166 * should only be used for input buffers of
1167 * rotation channels.
1169 * @param phyaddr_0 Input parameter buffer 0 physical address.
1171 * @param phyaddr_1 Input parameter buffer 1 physical address.
1172 * Setting this to a value other than NULL enables
1173 * double buffering mode.
1175 * @param phyaddr_2 Input parameter buffer 2 physical address.
1176 * Setting this to a value other than NULL enables
1177 * triple buffering mode, phyaddr_1 should not be
1180 * @param u private u offset for additional cropping,
1183 * @param v private v offset for additional cropping,
1186 * @return Returns 0 on success or negative error code on fail
1188 int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1191 uint16_t width, uint16_t height,
1193 ipu_rotate_mode_t rot_mode,
1194 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
1195 dma_addr_t phyaddr_2,
1196 uint32_t u, uint32_t v)
1200 uint32_t burst_size;
1202 dma_chan = channel_2_dma(channel, type);
1203 if (!idma_is_valid(dma_chan))
1206 if (stride < width * bytes_per_pixel(pixel_fmt))
1207 stride = width * bytes_per_pixel(pixel_fmt);
1211 "Stride not 32-bit aligned, stride = %d\n", stride);
1214 /* IC & IRT channels' width must be multiple of 8 pixels */
1215 if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
1217 dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
1221 if (_ipu_is_vdi_out_chan(dma_chan) &&
1222 ((width < 16) || (height < 16) || (width % 2) || (height % 4))) {
1223 dev_err(ipu->dev, "vdi width/height limited err\n");
1227 /* IPUv3EX and IPUv3M support triple buffer */
1228 if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
1229 dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
1230 "mode\n", dma_chan);
1233 if (!phyaddr_1 && phyaddr_2) {
1234 dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
1235 "triple buffer mode\n", dma_chan);
1239 mutex_lock(&ipu->mutex_lock);
1241 /* Build parameter memory data for DMA channel */
1242 _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
1243 phyaddr_0, phyaddr_1, phyaddr_2);
1245 /* Set correlative channel parameter of local alpha channel */
1246 if ((_ipu_is_ic_graphic_chan(dma_chan) ||
1247 _ipu_is_dp_graphic_chan(dma_chan)) &&
1248 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
1249 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
1250 _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
1251 _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
1252 /* fix alpha width as 8 and burst size as 16*/
1253 _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
1254 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1255 } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
1256 ipu_pixel_format_has_alpha(pixel_fmt))
1257 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
1260 _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
1262 /* IC and ROT channels have restriction of 8 or 16 pix burst length */
1263 if (_ipu_is_ic_chan(dma_chan) || _ipu_is_vdi_out_chan(dma_chan)) {
1264 if ((width % 16) == 0)
1265 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1267 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1268 } else if (_ipu_is_irt_chan(dma_chan)) {
1269 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1270 _ipu_ch_param_set_block_mode(ipu, dma_chan);
1271 } else if (_ipu_is_dmfc_chan(dma_chan)) {
1272 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1273 _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
1274 _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
1277 if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
1278 ipu->chan_is_interlaced[dma_chan])
1279 _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
1281 if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan) ||
1282 _ipu_is_vdi_out_chan(dma_chan)) {
1283 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1284 _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
1286 } else if (_ipu_is_smfc_chan(dma_chan)) {
1287 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1289 * This is different from IPUv3 spec, but it is confirmed
1290 * in IPUforum that SMFC burst size should be NPB[6:3]
1291 * when IDMAC works in 16-bit generic data mode.
1293 if (pixel_fmt == IPU_PIX_FMT_GENERIC)
1294 /* 8 bits per pixel */
1295 burst_size = burst_size >> 4;
1296 else if (pixel_fmt == IPU_PIX_FMT_GENERIC_16)
1297 /* 16 bits per pixel */
1298 burst_size = burst_size >> 3;
1300 burst_size = burst_size >> 2;
1301 _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
1305 if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan)) {
1306 unsigned reg = IDMAC_CH_LOCK_EN_1;
1308 if (ipu->pdata->devtype == IPU_V3H) {
1309 _ipu_ch_param_set_axi_id(ipu, dma_chan, 0);
1345 reg = IDMAC_CH_LOCK_EN_2;
1349 reg = IDMAC_CH_LOCK_EN_2;
1353 reg = IDMAC_CH_LOCK_EN_2;
1357 reg = IDMAC_CH_LOCK_EN_2;
1361 reg = IDMAC_CH_LOCK_EN_2;
1365 reg = IDMAC_CH_LOCK_EN_2;
1371 value |= ipu_idmac_read(ipu, reg);
1372 ipu_idmac_write(ipu, value, reg);
1374 _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
1376 if (ipu->pdata->devtype == IPU_V3H)
1377 _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
1380 _ipu_ch_param_dump(ipu, dma_chan);
1382 if (phyaddr_2 && g_ipu_hw_rev >= IPU_V3DEX) {
1383 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1384 reg &= ~idma_mask(dma_chan);
1385 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1387 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1388 reg |= idma_mask(dma_chan);
1389 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1391 /* Set IDMAC third buffer's cpmem number */
1392 /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
1393 ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
1394 ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
1395 ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
1397 /* Reset to buffer 0 */
1398 ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
1399 IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
1401 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1402 reg &= ~idma_mask(dma_chan);
1403 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1405 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1407 reg |= idma_mask(dma_chan);
1409 reg &= ~idma_mask(dma_chan);
1410 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1412 /* Reset to buffer 0 */
1413 ipu_cm_write(ipu, idma_mask(dma_chan),
1414 IPU_CHA_CUR_BUF(dma_chan));
1418 mutex_unlock(&ipu->mutex_lock);
1422 EXPORT_SYMBOL(ipu_init_channel_buffer);
1425 * This function is called to update the physical address of a buffer for
1426 * a logical IPU channel.
1428 * @param ipu ipu handler
1429 * @param channel Input parameter for the logical channel ID.
1431 * @param type Input parameter which buffer to initialize.
1433 * @param bufNum Input parameter for buffer number to update.
1434 * 0 or 1 are the only valid values.
1436 * @param phyaddr Input parameter buffer physical address.
1438 * @return This function returns 0 on success or negative error code on
1439 * fail. This function will fail if the buffer is set to ready.
1441 int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1442 ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
1446 uint32_t dma_chan = channel_2_dma(channel, type);
1447 unsigned long lock_flags;
1449 if (dma_chan == IDMA_CHAN_INVALID)
1452 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
1454 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
1455 else if (bufNum == 1)
1456 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
1458 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
1460 if ((reg & idma_mask(dma_chan)) == 0)
1461 _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
1464 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
1468 EXPORT_SYMBOL(ipu_update_channel_buffer);
1471 * This function is called to update the band mode setting for
1472 * a logical IPU channel.
1474 * @param ipu ipu handler
1476 * @param channel Input parameter for the logical channel ID.
1478 * @param type Input parameter which buffer to initialize.
1480 * @param band_height Input parameter for band lines:
1481 * shoule be log2(4/8/16/32/64/128/256).
1483 * @return This function returns 0 on success or negative error code on
1486 int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
1487 ipu_buffer_t type, uint32_t band_height)
1491 uint32_t dma_chan = channel_2_dma(channel, type);
1493 if ((2 > band_height) || (8 < band_height))
1496 mutex_lock(&ipu->mutex_lock);
1498 reg = ipu_idmac_read(ipu, IDMAC_BAND_EN(dma_chan));
1499 reg |= 1 << (dma_chan % 32);
1500 ipu_idmac_write(ipu, reg, IDMAC_BAND_EN(dma_chan));
1502 _ipu_ch_param_set_bandmode(ipu, dma_chan, band_height);
1503 dev_dbg(ipu->dev, "dma_chan:%d, band_height:%d.\n\n",
1504 dma_chan, 1 << band_height);
1505 mutex_unlock(&ipu->mutex_lock);
1509 EXPORT_SYMBOL(ipu_set_channel_bandmode);
1512 * This function is called to initialize a buffer for logical IPU channel.
1514 * @param ipu ipu handler
1515 * @param channel Input parameter for the logical channel ID.
1517 * @param type Input parameter which buffer to initialize.
1519 * @param pixel_fmt Input parameter for pixel format of buffer.
1520 * Pixel format is a FOURCC ASCII code.
1522 * @param width Input parameter for width of buffer in pixels.
1524 * @param height Input parameter for height of buffer in pixels.
1526 * @param stride Input parameter for stride length of buffer
1529 * @param u predefined private u offset for additional cropping,
1532 * @param v predefined private v offset for additional cropping,
1535 * @param vertical_offset vertical offset for Y coordinate
1536 * in the existed frame
1539 * @param horizontal_offset horizontal offset for X coordinate
1540 * in the existed frame
1543 * @return Returns 0 on success or negative error code on fail
1544 * This function will fail if any buffer is set to ready.
1547 int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
1548 ipu_channel_t channel, ipu_buffer_t type,
1550 uint16_t width, uint16_t height,
1552 uint32_t u, uint32_t v,
1553 uint32_t vertical_offset, uint32_t horizontal_offset)
1556 uint32_t dma_chan = channel_2_dma(channel, type);
1557 unsigned long lock_flags;
1559 if (dma_chan == IDMA_CHAN_INVALID)
1562 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
1563 if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1564 (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1565 ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
1566 (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
1567 _ipu_is_trb_chan(dma_chan)))
1570 _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
1571 u, v, 0, vertical_offset, horizontal_offset);
1572 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
1576 EXPORT_SYMBOL(ipu_update_channel_offset);
1580 * This function is called to set a channel's buffer as ready.
1582 * @param ipu ipu handler
1583 * @param channel Input parameter for the logical channel ID.
1585 * @param type Input parameter which buffer to initialize.
1587 * @param bufNum Input parameter for which buffer number set to
1590 * @return Returns 0 on success or negative error code on fail
1592 int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1593 ipu_buffer_t type, uint32_t bufNum)
1595 uint32_t dma_chan = channel_2_dma(channel, type);
1596 unsigned long lock_flags;
1598 if (dma_chan == IDMA_CHAN_INVALID)
1601 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
1602 /* Mark buffer to be ready. */
1604 ipu_cm_write(ipu, idma_mask(dma_chan),
1605 IPU_CHA_BUF0_RDY(dma_chan));
1606 else if (bufNum == 1)
1607 ipu_cm_write(ipu, idma_mask(dma_chan),
1608 IPU_CHA_BUF1_RDY(dma_chan));
1610 ipu_cm_write(ipu, idma_mask(dma_chan),
1611 IPU_CHA_BUF2_RDY(dma_chan));
1612 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
1616 EXPORT_SYMBOL(ipu_select_buffer);
1619 * This function is called to set a channel's buffer as ready.
1621 * @param ipu ipu handler
1622 * @param bufNum Input parameter for which buffer number set to
1625 * @return Returns 0 on success or negative error code on fail
1627 int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
1630 uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
1632 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
1633 idma_mask(dma_chan)|
1634 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
1635 unsigned long lock_flags;
1637 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
1638 /* Mark buffers to be ready. */
1640 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
1642 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
1643 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
1647 EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
1650 static int proc_dest_sel[] = {
1651 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
1652 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
1653 static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
1654 NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
1655 static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
1656 NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
1660 * This function links 2 channels together for automatic frame
1661 * synchronization. The output of the source channel is linked to the input of
1662 * the destination channel.
1664 * @param ipu ipu handler
1665 * @param src_ch Input parameter for the logical channel ID of
1666 * the source channel.
1668 * @param dest_ch Input parameter for the logical channel ID of
1669 * the destination channel.
1671 * @return This function returns 0 on success or negative error code on
1674 int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1677 uint32_t fs_proc_flow1;
1678 uint32_t fs_proc_flow2;
1679 uint32_t fs_proc_flow3;
1680 uint32_t fs_disp_flow1;
1682 mutex_lock(&ipu->mutex_lock);
1684 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1685 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1686 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1687 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1691 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1693 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1694 FS_SMFC0_DEST_SEL_OFFSET;
1697 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1699 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1700 FS_SMFC1_DEST_SEL_OFFSET;
1703 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1705 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1706 FS_SMFC2_DEST_SEL_OFFSET;
1709 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1711 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1712 FS_SMFC3_DEST_SEL_OFFSET;
1714 case CSI_PRP_ENC_MEM:
1715 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1717 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1718 FS_PRPENC_DEST_SEL_OFFSET;
1720 case CSI_PRP_VF_MEM:
1721 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1723 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1724 FS_PRPVF_DEST_SEL_OFFSET;
1727 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1729 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1730 FS_PP_DEST_SEL_OFFSET;
1732 case MEM_ROT_PP_MEM:
1733 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1735 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1736 FS_PP_ROT_DEST_SEL_OFFSET;
1738 case MEM_PRP_ENC_MEM:
1739 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1741 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1742 FS_PRPENC_DEST_SEL_OFFSET;
1744 case MEM_ROT_ENC_MEM:
1745 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1747 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1748 FS_PRPENC_ROT_DEST_SEL_OFFSET;
1750 case MEM_PRP_VF_MEM:
1751 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1753 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1754 FS_PRPVF_DEST_SEL_OFFSET;
1756 case MEM_VDI_PRP_VF_MEM:
1757 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1759 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1760 FS_PRPVF_DEST_SEL_OFFSET;
1762 case MEM_ROT_VF_MEM:
1763 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1765 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1766 FS_PRPVF_ROT_DEST_SEL_OFFSET;
1769 fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
1770 if (MEM_VDI_MEM == dest_ch)
1771 fs_proc_flow3 |= FS_VDOA_DEST_SEL_VDI;
1772 else if (MEM_PP_MEM == dest_ch)
1773 fs_proc_flow3 |= FS_VDOA_DEST_SEL_IC;
1786 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1787 if (MEM_VDOA_MEM == src_ch)
1788 fs_proc_flow1 |= FS_PP_SRC_SEL_VDOA;
1790 fs_proc_flow1 |= proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1791 FS_PP_SRC_SEL_OFFSET;
1793 case MEM_ROT_PP_MEM:
1794 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1796 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1797 FS_PP_ROT_SRC_SEL_OFFSET;
1799 case MEM_PRP_ENC_MEM:
1800 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1802 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1804 case MEM_ROT_ENC_MEM:
1805 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1807 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1808 FS_PRPENC_ROT_SRC_SEL_OFFSET;
1810 case MEM_PRP_VF_MEM:
1811 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1813 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1815 case MEM_VDI_PRP_VF_MEM:
1816 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1818 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1820 case MEM_ROT_VF_MEM:
1821 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1823 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1824 FS_PRPVF_ROT_SRC_SEL_OFFSET;
1827 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1829 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
1832 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1834 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1835 FS_DP_SYNC0_SRC_SEL_OFFSET;
1838 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1840 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1841 FS_DP_SYNC1_SRC_SEL_OFFSET;
1844 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1846 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
1849 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1851 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1852 FS_DP_ASYNC0_SRC_SEL_OFFSET;
1855 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1857 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1858 FS_DP_ASYNC1_SRC_SEL_OFFSET;
1861 fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
1862 if (MEM_VDOA_MEM == src_ch)
1863 fs_proc_flow1 |= FS_VDI_SRC_SEL_VDOA;
1874 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1875 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1876 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1877 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1880 mutex_unlock(&ipu->mutex_lock);
1883 EXPORT_SYMBOL(ipu_link_channels);
1886 * This function unlinks 2 channels and disables automatic frame
1889 * @param ipu ipu handler
1890 * @param src_ch Input parameter for the logical channel ID of
1891 * the source channel.
1893 * @param dest_ch Input parameter for the logical channel ID of
1894 * the destination channel.
1896 * @return This function returns 0 on success or negative error code on
1899 int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1902 uint32_t fs_proc_flow1;
1903 uint32_t fs_proc_flow2;
1904 uint32_t fs_proc_flow3;
1905 uint32_t fs_disp_flow1;
1907 mutex_lock(&ipu->mutex_lock);
1909 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1910 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1911 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1912 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1916 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1919 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1922 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1925 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1927 case CSI_PRP_ENC_MEM:
1928 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1930 case CSI_PRP_VF_MEM:
1931 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1934 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1936 case MEM_ROT_PP_MEM:
1937 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1939 case MEM_PRP_ENC_MEM:
1940 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1942 case MEM_ROT_ENC_MEM:
1943 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1945 case MEM_PRP_VF_MEM:
1946 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1948 case MEM_VDI_PRP_VF_MEM:
1949 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1951 case MEM_ROT_VF_MEM:
1952 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1955 fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
1964 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1966 case MEM_ROT_PP_MEM:
1967 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1969 case MEM_PRP_ENC_MEM:
1970 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1972 case MEM_ROT_ENC_MEM:
1973 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1975 case MEM_PRP_VF_MEM:
1976 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1978 case MEM_VDI_PRP_VF_MEM:
1979 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1981 case MEM_ROT_VF_MEM:
1982 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1985 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1988 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1991 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1994 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1997 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
2000 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
2003 fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
2010 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
2011 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
2012 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
2013 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
2016 mutex_unlock(&ipu->mutex_lock);
2019 EXPORT_SYMBOL(ipu_unlink_channels);
2022 * This function check whether a logical channel was enabled.
2024 * @param ipu ipu handler
2025 * @param channel Input parameter for the logical channel ID.
2027 * @return This function returns 1 while request channel is enabled or
2028 * 0 for not enabled.
2030 int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
2036 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2037 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2039 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2040 if (reg & idma_mask(in_dma))
2042 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2043 if (reg & idma_mask(out_dma))
2047 EXPORT_SYMBOL(ipu_is_channel_busy);
2050 * This function enables a logical channel.
2052 * @param ipu ipu handler
2053 * @param channel Input parameter for the logical channel ID.
2055 * @return This function returns 0 on success or negative error code on
2058 int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
2067 mutex_lock(&ipu->mutex_lock);
2069 if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
2070 dev_err(ipu->dev, "Warning: channel already enabled %d\n",
2071 IPU_CHAN_ID(channel));
2072 mutex_unlock(&ipu->mutex_lock);
2076 /* Get input and output dma channels */
2077 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2078 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2080 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
2081 if (ipu->di_use_count[0] > 0) {
2082 ipu_conf |= IPU_CONF_DI0_EN;
2084 if (ipu->di_use_count[1] > 0) {
2085 ipu_conf |= IPU_CONF_DI1_EN;
2087 if (ipu->dp_use_count > 0)
2088 ipu_conf |= IPU_CONF_DP_EN;
2089 if (ipu->dc_use_count > 0)
2090 ipu_conf |= IPU_CONF_DC_EN;
2091 if (ipu->dmfc_use_count > 0)
2092 ipu_conf |= IPU_CONF_DMFC_EN;
2093 if (ipu->ic_use_count > 0)
2094 ipu_conf |= IPU_CONF_IC_EN;
2095 if (ipu->vdi_use_count > 0) {
2096 ipu_conf |= IPU_CONF_ISP_EN;
2097 ipu_conf |= IPU_CONF_VDI_EN;
2098 ipu_conf |= IPU_CONF_IC_INPUT;
2100 if (ipu->rot_use_count > 0)
2101 ipu_conf |= IPU_CONF_ROT_EN;
2102 if (ipu->smfc_use_count > 0)
2103 ipu_conf |= IPU_CONF_SMFC_EN;
2104 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
2106 if (idma_is_valid(in_dma)) {
2107 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2108 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2110 if (idma_is_valid(out_dma)) {
2111 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2112 ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2115 if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
2116 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
2117 (channel == MEM_VDI_PRP_VF_MEM))) {
2118 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2119 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2120 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2122 if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2123 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
2124 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2125 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2126 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2128 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2129 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2130 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2131 } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2132 ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
2133 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2134 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2135 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2136 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2137 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
2140 if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
2141 (channel == MEM_FG_SYNC)) {
2142 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2143 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2145 _ipu_dp_dc_enable(ipu, channel);
2148 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2149 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
2150 _ipu_is_vdi_out_chan(out_dma))
2151 _ipu_ic_enable_task(ipu, channel);
2153 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
2155 mutex_unlock(&ipu->mutex_lock);
2159 EXPORT_SYMBOL(ipu_enable_channel);
2162 * This function check buffer ready for a logical channel.
2164 * @param ipu ipu handler
2165 * @param channel Input parameter for the logical channel ID.
2167 * @param type Input parameter which buffer to clear.
2169 * @param bufNum Input parameter for which buffer number clear
2173 int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2176 uint32_t dma_chan = channel_2_dma(channel, type);
2178 unsigned long lock_flags;
2180 if (dma_chan == IDMA_CHAN_INVALID)
2183 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
2185 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
2186 else if (bufNum == 1)
2187 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
2189 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
2190 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
2192 if (reg & idma_mask(dma_chan))
2197 EXPORT_SYMBOL(ipu_check_buffer_ready);
2200 * This function clear buffer ready for a logical channel.
2202 * @param ipu ipu handler
2203 * @param channel Input parameter for the logical channel ID.
2205 * @param type Input parameter which buffer to clear.
2207 * @param bufNum Input parameter for which buffer number clear
2211 void _ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2214 uint32_t dma_ch = channel_2_dma(channel, type);
2216 if (!idma_is_valid(dma_ch))
2219 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
2221 ipu_cm_write(ipu, idma_mask(dma_ch),
2222 IPU_CHA_BUF0_RDY(dma_ch));
2223 else if (bufNum == 1)
2224 ipu_cm_write(ipu, idma_mask(dma_ch),
2225 IPU_CHA_BUF1_RDY(dma_ch));
2227 ipu_cm_write(ipu, idma_mask(dma_ch),
2228 IPU_CHA_BUF2_RDY(dma_ch));
2229 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
2232 void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2235 unsigned long lock_flags;
2237 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
2238 _ipu_clear_buffer_ready(ipu, channel, type, bufNum);
2239 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
2241 EXPORT_SYMBOL(ipu_clear_buffer_ready);
2244 * This function disables a logical channel.
2246 * @param ipu ipu handler
2247 * @param channel Input parameter for the logical channel ID.
2249 * @param wait_for_stop Flag to set whether to wait for channel end
2250 * of frame or return immediately.
2252 * @return This function returns 0 on success or negative error code on
2255 int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
2260 uint32_t sec_dma = NO_DMA;
2261 uint32_t thrd_dma = NO_DMA;
2262 uint16_t fg_pos_x, fg_pos_y;
2263 unsigned long lock_flags;
2265 mutex_lock(&ipu->mutex_lock);
2267 if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
2268 dev_dbg(ipu->dev, "Channel already disabled %d\n",
2269 IPU_CHAN_ID(channel));
2270 mutex_unlock(&ipu->mutex_lock);
2274 /* Get input and output dma channels */
2275 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2276 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2278 if ((idma_is_valid(in_dma) &&
2279 !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
2280 && (idma_is_valid(out_dma) &&
2281 !idma_is_set(ipu, IDMAC_CHA_EN, out_dma))) {
2282 mutex_unlock(&ipu->mutex_lock);
2286 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
2287 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2288 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
2289 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2290 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2293 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2294 (channel == MEM_DC_SYNC)) {
2295 if (channel == MEM_FG_SYNC) {
2296 _ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
2297 _ipu_disp_set_window_pos(ipu, channel, 0, 0);
2300 _ipu_dp_dc_disable(ipu, channel, false);
2303 * wait for BG channel EOF then disable FG-IDMAC,
2304 * it avoid FG NFB4EOF error.
2306 if ((channel == MEM_FG_SYNC) && (ipu_is_channel_busy(ipu, MEM_BG_SYNC))) {
2309 ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
2310 IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
2311 while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
2312 IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
2316 dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
2321 } else if (wait_for_stop && !_ipu_is_smfc_chan(out_dma) &&
2322 channel != CSI_PRP_VF_MEM && channel != CSI_PRP_ENC_MEM) {
2323 while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
2324 idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
2325 (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2326 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
2327 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2328 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
2329 uint32_t irq = 0xffffffff;
2330 int timeout = 50000;
2332 if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
2334 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2335 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
2337 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2338 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
2340 if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
2343 if (irq == 0xffffffff) {
2344 dev_dbg(ipu->dev, "warning: no channel busy, break\n");
2348 ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
2349 IPUIRQ_2_STATREG(irq));
2351 dev_dbg(ipu->dev, "warning: channel %d busy, need wait\n", irq);
2353 while (((ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq))
2354 & IPUIRQ_2_MASK(irq)) == 0) &&
2355 (idma_is_set(ipu, IDMAC_CHA_BUSY, irq))) {
2359 ipu_dump_registers(ipu);
2360 dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
2364 dev_dbg(ipu->dev, "wait_time:%d\n", 50000 - timeout);
2369 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2370 (channel == MEM_DC_SYNC)) {
2371 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2372 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2375 /* Disable IC task */
2376 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2377 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
2378 _ipu_is_vdi_out_chan(out_dma))
2379 _ipu_ic_disable_task(ipu, channel);
2381 /* Disable DMA channel(s) */
2382 if (idma_is_valid(in_dma)) {
2383 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2384 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2385 ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
2386 ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
2387 IPU_CHA_TRIPLE_CUR_BUF(in_dma));
2389 if (idma_is_valid(out_dma)) {
2390 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2391 ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2392 ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
2393 ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
2394 IPU_CHA_TRIPLE_CUR_BUF(out_dma));
2396 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2397 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2398 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2399 ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
2401 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2402 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2403 ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2404 if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
2405 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2406 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
2408 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2409 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2411 ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
2414 if (channel == MEM_FG_SYNC)
2415 _ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
2417 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
2418 /* Set channel buffers NOT to be ready */
2419 if (idma_is_valid(in_dma)) {
2420 _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
2421 _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
2422 _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
2424 if (idma_is_valid(out_dma)) {
2425 _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
2426 _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
2428 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2429 _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
2430 _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
2432 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2433 _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
2434 _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
2436 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
2438 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
2440 mutex_unlock(&ipu->mutex_lock);
2444 EXPORT_SYMBOL(ipu_disable_channel);
2447 * This function enables CSI.
2449 * @param ipu ipu handler
2450 * @param csi csi num 0 or 1
2452 * @return This function returns 0 on success or negative error code on
2455 int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
2460 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2465 mutex_lock(&ipu->mutex_lock);
2466 ipu->csi_use_count[csi]++;
2468 if (ipu->csi_use_count[csi] == 1) {
2469 reg = ipu_cm_read(ipu, IPU_CONF);
2471 ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
2473 ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
2475 mutex_unlock(&ipu->mutex_lock);
2479 EXPORT_SYMBOL(ipu_enable_csi);
2482 * This function disables CSI.
2484 * @param ipu ipu handler
2485 * @param csi csi num 0 or 1
2487 * @return This function returns 0 on success or negative error code on
2490 int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
2495 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2499 mutex_lock(&ipu->mutex_lock);
2500 ipu->csi_use_count[csi]--;
2501 if (ipu->csi_use_count[csi] == 0) {
2502 _ipu_csi_wait4eof(ipu, ipu->csi_channel[csi]);
2503 reg = ipu_cm_read(ipu, IPU_CONF);
2505 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
2507 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
2509 mutex_unlock(&ipu->mutex_lock);
2513 EXPORT_SYMBOL(ipu_disable_csi);
2515 static irqreturn_t ipu_sync_irq_handler(int irq, void *desc)
2517 struct ipu_soc *ipu = desc;
2519 uint32_t line, bit, int_stat, int_ctrl;
2520 irqreturn_t result = IRQ_NONE;
2521 const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
2523 spin_lock(&ipu->int_reg_spin_lock);
2525 for (i = 0; int_reg[i] != 0; i++) {
2526 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
2527 int_ctrl = ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
2528 int_stat &= int_ctrl;
2529 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
2530 while ((line = ffs(int_stat)) != 0) {
2532 int_stat &= ~(1UL << line);
2533 line += (int_reg[i] - 1) * 32;
2535 ipu->irq_list[line].handler(line,
2536 ipu->irq_list[line].
2538 if (ipu->irq_list[line].flags & IPU_IRQF_ONESHOT) {
2539 int_ctrl &= ~(1UL << bit);
2540 ipu_cm_write(ipu, int_ctrl,
2541 IPU_INT_CTRL(int_reg[i]));
2546 spin_unlock(&ipu->int_reg_spin_lock);
2551 static irqreturn_t ipu_err_irq_handler(int irq, void *desc)
2553 struct ipu_soc *ipu = desc;
2556 const int err_reg[] = { 5, 6, 9, 10, 0 };
2558 spin_lock(&ipu->int_reg_spin_lock);
2560 for (i = 0; err_reg[i] != 0; i++) {
2561 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
2562 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
2564 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
2566 "IPU Warning - IPU_INT_STAT_%d = 0x%08X\n",
2567 err_reg[i], int_stat);
2568 /* Disable interrupts so we only get error once */
2569 int_stat = ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) &
2571 ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
2575 spin_unlock(&ipu->int_reg_spin_lock);
2581 * This function enables the interrupt for the specified interrupt line.
2582 * The interrupt lines are defined in \b ipu_irq_line enum.
2584 * @param ipu ipu handler
2585 * @param irq Interrupt line to enable interrupt for.
2587 * @return This function returns 0 on success or negative error code on
2590 int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
2593 unsigned long lock_flags;
2598 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2601 * Check sync interrupt handler only, since we do nothing for
2602 * error interrupts but than print out register values in the
2603 * error interrupt source handler.
2605 if (_ipu_is_sync_irq(irq) && (ipu->irq_list[irq].handler == NULL)) {
2606 dev_err(ipu->dev, "handler hasn't been registered on sync "
2612 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2613 reg |= IPUIRQ_2_MASK(irq);
2614 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2616 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2622 EXPORT_SYMBOL(ipu_enable_irq);
2625 * This function disables the interrupt for the specified interrupt line.
2626 * The interrupt lines are defined in \b ipu_irq_line enum.
2628 * @param ipu ipu handler
2629 * @param irq Interrupt line to disable interrupt for.
2632 void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
2635 unsigned long lock_flags;
2639 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2641 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2642 reg &= ~IPUIRQ_2_MASK(irq);
2643 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2645 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2649 EXPORT_SYMBOL(ipu_disable_irq);
2652 * This function clears the interrupt for the specified interrupt line.
2653 * The interrupt lines are defined in \b ipu_irq_line enum.
2655 * @param ipu ipu handler
2656 * @param irq Interrupt line to clear interrupt for.
2659 void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
2661 unsigned long lock_flags;
2665 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2667 ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
2669 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2673 EXPORT_SYMBOL(ipu_clear_irq);
2676 * This function returns the current interrupt status for the specified
2677 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2679 * @param ipu ipu handler
2680 * @param irq Interrupt line to get status for.
2682 * @return Returns true if the interrupt is pending/asserted or false if
2683 * the interrupt is not pending.
2685 bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
2688 unsigned long lock_flags;
2692 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2693 reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
2694 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2698 if (reg & IPUIRQ_2_MASK(irq))
2703 EXPORT_SYMBOL(ipu_get_irq_status);
2706 * This function registers an interrupt handler function for the specified
2707 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2709 * @param ipu ipu handler
2710 * @param irq Interrupt line to get status for.
2712 * @param handler Input parameter for address of the handler
2715 * @param irq_flags Flags for interrupt mode. Currently not used.
2717 * @param devname Input parameter for string name of driver
2718 * registering the handler.
2720 * @param dev_id Input parameter for pointer of data to be
2721 * passed to the handler.
2723 * @return This function returns 0 on success or negative error code on
2726 int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
2727 irqreturn_t(*handler) (int, void *),
2728 uint32_t irq_flags, const char *devname, void *dev_id)
2731 unsigned long lock_flags;
2734 BUG_ON(irq >= IPU_IRQ_COUNT);
2738 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2740 if (ipu->irq_list[irq].handler != NULL) {
2742 "handler already installed on irq %d\n", irq);
2748 * Check sync interrupt handler only, since we do nothing for
2749 * error interrupts but than print out register values in the
2750 * error interrupt source handler.
2752 if (_ipu_is_sync_irq(irq) && (handler == NULL)) {
2753 dev_err(ipu->dev, "handler is NULL for sync irq %d\n", irq);
2758 ipu->irq_list[irq].handler = handler;
2759 ipu->irq_list[irq].flags = irq_flags;
2760 ipu->irq_list[irq].dev_id = dev_id;
2761 ipu->irq_list[irq].name = devname;
2763 /* clear irq stat for previous use */
2764 ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
2765 /* enable the interrupt */
2766 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2767 reg |= IPUIRQ_2_MASK(irq);
2768 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2770 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2776 EXPORT_SYMBOL(ipu_request_irq);
2779 * This function unregisters an interrupt handler for the specified interrupt
2780 * line. The interrupt lines are defined in \b ipu_irq_line enum.
2782 * @param ipu ipu handler
2783 * @param irq Interrupt line to get status for.
2785 * @param dev_id Input parameter for pointer of data to be passed
2786 * to the handler. This must match value passed to
2787 * ipu_request_irq().
2790 void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
2793 unsigned long lock_flags;
2797 spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
2799 /* disable the interrupt */
2800 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2801 reg &= ~IPUIRQ_2_MASK(irq);
2802 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2803 if (ipu->irq_list[irq].dev_id == dev_id)
2804 memset(&ipu->irq_list[irq], 0, sizeof(ipu->irq_list[irq]));
2806 spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
2810 EXPORT_SYMBOL(ipu_free_irq);
2812 uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
2814 uint32_t reg, dma_chan;
2816 dma_chan = channel_2_dma(channel, type);
2817 if (!idma_is_valid(dma_chan))
2820 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
2821 if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
2822 reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
2823 return (reg & tri_cur_buf_mask(dma_chan)) >>
2824 tri_cur_buf_shift(dma_chan);
2826 reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
2827 if (reg & idma_mask(dma_chan))
2833 EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
2835 uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
2838 uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
2841 case MEM_PRP_VF_MEM:
2842 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2844 case MEM_VDI_PRP_VF_MEM:
2845 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2847 case MEM_ROT_VF_MEM:
2849 (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
2851 case MEM_PRP_ENC_MEM:
2852 stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
2854 case MEM_ROT_ENC_MEM:
2856 (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
2857 TSTAT_ENC_ROT_OFFSET;
2860 stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
2862 case MEM_ROT_PP_MEM:
2864 (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
2868 stat = TASK_STAT_IDLE;
2875 * This function check for a logical channel status
2877 * @param ipu ipu handler
2878 * @param channel Input parameter for the logical channel ID.
2880 * @return This function returns 0 on idle and 1 on busy.
2883 uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
2885 uint32_t dma_status;
2888 mutex_lock(&ipu->mutex_lock);
2889 dma_status = ipu_is_channel_busy(ipu, channel);
2890 mutex_unlock(&ipu->mutex_lock);
2893 dev_dbg(ipu->dev, "%s, dma_status:%d.\n", __func__, dma_status);
2897 EXPORT_SYMBOL(ipu_channel_status);
2899 int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
2902 unsigned long lock_flags;
2903 int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
2904 int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
2906 mutex_lock(&ipu->mutex_lock);
2908 /* enable target channel */
2909 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
2910 ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
2912 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
2915 _ipu_dp_dc_disable(ipu, from_ch, true);
2917 /* disable source channel */
2918 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
2919 ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
2920 ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
2921 ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
2922 IPU_CHA_TRIPLE_CUR_BUF(from_dma));
2924 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
2926 spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
2927 _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
2928 _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
2929 _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
2930 spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
2932 mutex_unlock(&ipu->mutex_lock);
2936 EXPORT_SYMBOL(ipu_swap_channel);
2938 uint32_t bytes_per_pixel(uint32_t fmt)
2941 case IPU_PIX_FMT_GENERIC: /*generic data */
2942 case IPU_PIX_FMT_RGB332:
2943 case IPU_PIX_FMT_YUV420P:
2944 case IPU_PIX_FMT_YVU420P:
2945 case IPU_PIX_FMT_YUV422P:
2946 case IPU_PIX_FMT_YUV444P:
2949 case IPU_PIX_FMT_GENERIC_16: /* generic data */
2950 case IPU_PIX_FMT_RGB565:
2951 case IPU_PIX_FMT_YUYV:
2952 case IPU_PIX_FMT_UYVY:
2955 case IPU_PIX_FMT_BGR24:
2956 case IPU_PIX_FMT_RGB24:
2957 case IPU_PIX_FMT_YUV444:
2960 case IPU_PIX_FMT_GENERIC_32: /*generic data */
2961 case IPU_PIX_FMT_BGR32:
2962 case IPU_PIX_FMT_BGRA32:
2963 case IPU_PIX_FMT_RGB32:
2964 case IPU_PIX_FMT_RGBA32:
2965 case IPU_PIX_FMT_ABGR32:
2974 EXPORT_SYMBOL(bytes_per_pixel);
2976 ipu_color_space_t format_to_colorspace(uint32_t fmt)
2979 case IPU_PIX_FMT_RGB666:
2980 case IPU_PIX_FMT_RGB565:
2981 case IPU_PIX_FMT_BGR24:
2982 case IPU_PIX_FMT_RGB24:
2983 case IPU_PIX_FMT_GBR24:
2984 case IPU_PIX_FMT_BGR32:
2985 case IPU_PIX_FMT_BGRA32:
2986 case IPU_PIX_FMT_RGB32:
2987 case IPU_PIX_FMT_RGBA32:
2988 case IPU_PIX_FMT_ABGR32:
2989 case IPU_PIX_FMT_LVDS666:
2990 case IPU_PIX_FMT_LVDS888:
3001 bool ipu_pixel_format_has_alpha(uint32_t fmt)
3004 case IPU_PIX_FMT_RGBA32:
3005 case IPU_PIX_FMT_BGRA32:
3006 case IPU_PIX_FMT_ABGR32:
3017 static int ipu_suspend(struct device *dev)
3019 struct ipu_soc *ipu = dev_get_drvdata(dev);
3021 /* All IDMAC channel and IPU clock should be disabled.*/
3025 dev_dbg(dev, "ipu suspend.\n");
3029 static int ipu_resume(struct device *dev)
3031 struct ipu_soc *ipu = dev_get_drvdata(dev);
3033 if (ipu->pdata->pg) {
3037 _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
3038 /* Set sync refresh channels as high priority */
3039 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
3042 dev_dbg(dev, "ipu resume.\n");
3046 int ipu_runtime_suspend(struct device *dev)
3049 release_bus_freq(BUS_FREQ_HIGH);
3050 dev_dbg(dev, "ipu busfreq high release.\n");
3055 int ipu_runtime_resume(struct device *dev)
3058 request_bus_freq(BUS_FREQ_HIGH);
3059 dev_dbg(dev, "ipu busfreq high requst.\n");
3064 static const struct dev_pm_ops ipu_pm_ops = {
3065 SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
3066 SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
3071 * This structure contains pointers to the power management callback functions.
3073 static struct platform_driver mxcipu_driver = {
3075 .name = "imx-ipuv3",
3076 .of_match_table = imx_ipuv3_dt_ids,
3082 .id_table = imx_ipu_type,
3083 .remove = ipu_remove,
3086 int32_t __init ipu_gen_init(void)
3090 ret = platform_driver_register(&mxcipu_driver);
3094 subsys_initcall(ipu_gen_init);
3096 static void __exit ipu_gen_uninit(void)
3098 platform_driver_unregister(&mxcipu_driver);
3101 module_exit(ipu_gen_uninit);