2 * Copyright 2005-2011 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief This file contains the IPU driver common API functions.
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/platform_device.h>
24 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/irqdesc.h>
31 #include <linux/ipu.h>
32 #include <linux/clk.h>
33 #include <linux/clkdev.h>
34 #include <mach/clock.h>
35 #include <mach/hardware.h>
36 #include <mach/ipu-v3.h>
37 #include <mach/devices-common.h>
41 #include "ipu_param_mem.h"
43 static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
46 /* Static functions */
47 static irqreturn_t ipu_irq_handler(int irq, void *desc);
49 static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
51 return ((uint32_t) ch >> (6 * type)) & 0x3F;
54 static inline int _ipu_is_ic_chan(uint32_t dma_chan)
56 return ((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) && (dma_chan != 18));
59 static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
61 return (dma_chan == 14 || dma_chan == 15);
64 /* Either DP BG or DP FG can be graphic window */
65 static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
67 return (dma_chan == 23 || dma_chan == 27);
70 static inline int _ipu_is_irt_chan(uint32_t dma_chan)
72 return ((dma_chan >= 45) && (dma_chan <= 50));
75 static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
77 return ((dma_chan >= 23) && (dma_chan <= 29));
80 static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
82 return ((dma_chan >= 0) && (dma_chan <= 3));
85 static inline int _ipu_is_trb_chan(uint32_t dma_chan)
87 return (((dma_chan == 8) || (dma_chan == 9) ||
88 (dma_chan == 10) || (dma_chan == 13) ||
89 (dma_chan == 21) || (dma_chan == 23) ||
90 (dma_chan == 27) || (dma_chan == 28)) &&
94 #define idma_is_valid(ch) (ch != NO_DMA)
95 #define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
96 #define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
97 #define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
98 #define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
100 static int ipu_reset(struct ipu_soc *ipu)
104 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
106 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
115 static inline struct ipu_soc *pixelclk2ipu(struct clk *clk)
118 struct clk *base = clk - clk->id;
120 ipu = container_of(base, struct ipu_soc, pixel_clk[0]);
125 static unsigned long _ipu_pixel_clk_get_rate(struct clk *clk)
127 struct ipu_soc *ipu = pixelclk2ipu(clk);
128 u32 div = ipu_di_read(ipu, clk->id, DI_BS_CLKGEN0);
131 return (clk_get_rate(clk->parent) * 16) / div;
134 static unsigned long _ipu_pixel_clk_round_rate(struct clk *clk, unsigned long rate)
137 u32 parent_rate = clk_get_rate(clk->parent) * 16;
140 * Fractional part is 4 bits,
141 * so simply multiply by 2^4 to get fractional part.
143 div = parent_rate / rate;
145 if (div < 0x10) /* Min DI disp clock divider is 1 */
150 /* Round up divider if it gets us closer to desired pix clk */
151 if ((div & 0xC) == 0xC) {
156 return parent_rate / div;
159 static int _ipu_pixel_clk_set_rate(struct clk *clk, unsigned long rate)
161 struct ipu_soc *ipu = pixelclk2ipu(clk);
162 u32 div = (clk_get_rate(clk->parent) * 16) / rate;
163 unsigned long lock_flags;
165 /* Round up divider if it gets us closer to desired pix clk */
166 if ((div & 0xC) == 0xC) {
171 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
172 ipu_di_write(ipu, clk->id, div, DI_BS_CLKGEN0);
174 /* Setup pixel clock timing */
175 /* FIXME: needs to be more flexible */
176 /* Down time is half of period */
177 ipu_di_write(ipu, clk->id, (div / 16) << 16, DI_BS_CLKGEN1);
178 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
183 static int _ipu_pixel_clk_enable(struct clk *clk)
185 struct ipu_soc *ipu = pixelclk2ipu(clk);
186 u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
187 disp_gen |= clk->id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
188 ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
193 static void _ipu_pixel_clk_disable(struct clk *clk)
195 struct ipu_soc *ipu = pixelclk2ipu(clk);
197 u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
198 disp_gen &= clk->id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
199 ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
202 static int _ipu_pixel_clk_set_parent(struct clk *clk, struct clk *parent)
204 struct ipu_soc *ipu = pixelclk2ipu(clk);
205 unsigned long lock_flags;
208 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
209 di_gen = ipu_di_read(ipu, clk->id, DI_GENERAL);
210 if (parent == ipu->ipu_clk)
211 di_gen &= ~DI_GEN_DI_CLK_EXT;
212 else if (!IS_ERR(ipu->di_clk[clk->id]) && parent == ipu->di_clk[clk->id])
213 di_gen |= DI_GEN_DI_CLK_EXT;
215 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
219 ipu_di_write(ipu, clk->id, di_gen, DI_GENERAL);
220 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
224 #ifdef CONFIG_CLK_DEBUG
225 #define __INIT_CLK_DEBUG(n) .name = #n,
227 #define __INIT_CLK_DEBUG(n)
229 static int __devinit ipu_clk_setup_enable(struct ipu_soc *ipu,
230 struct platform_device *pdev)
232 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
233 static struct clk ipu_pixel_clk[] = {
235 __INIT_CLK_DEBUG(pixel_clk_0)
237 .get_rate = _ipu_pixel_clk_get_rate,
238 .set_rate = _ipu_pixel_clk_set_rate,
239 .round_rate = _ipu_pixel_clk_round_rate,
240 .set_parent = _ipu_pixel_clk_set_parent,
241 .enable = _ipu_pixel_clk_enable,
242 .disable = _ipu_pixel_clk_disable,
245 __INIT_CLK_DEBUG(pixel_clk_1)
247 .get_rate = _ipu_pixel_clk_get_rate,
248 .set_rate = _ipu_pixel_clk_set_rate,
249 .round_rate = _ipu_pixel_clk_round_rate,
250 .set_parent = _ipu_pixel_clk_set_parent,
251 .enable = _ipu_pixel_clk_enable,
252 .disable = _ipu_pixel_clk_disable,
255 static struct clk_lookup ipu_lookups[] = {
258 .con_id = "pixel_clk_0",
262 .con_id = "pixel_clk_1",
265 char ipu_clk[] = "ipu1_clk";
266 char di0_clk[] = "ipu1_di0_clk";
267 char di1_clk[] = "ipu1_di1_clk";
269 ipu_clk[3] += pdev->id;
270 di0_clk[3] += pdev->id;
271 di1_clk[3] += pdev->id;
273 ipu->ipu_clk = clk_get(ipu->dev, ipu_clk);
274 if (IS_ERR(ipu->ipu_clk)) {
275 dev_err(ipu->dev, "clk_get failed");
276 return PTR_ERR(ipu->ipu_clk);
278 dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
280 ipu->pixel_clk[0] = ipu_pixel_clk[0];
281 ipu->pixel_clk[1] = ipu_pixel_clk[1];
283 ipu_lookups[0].clk = &ipu->pixel_clk[0];
284 ipu_lookups[1].clk = &ipu->pixel_clk[1];
285 clkdev_add(&ipu_lookups[0]);
286 clkdev_add(&ipu_lookups[1]);
288 clk_debug_register(&ipu->pixel_clk[0]);
289 clk_debug_register(&ipu->pixel_clk[1]);
291 clk_enable(ipu->ipu_clk);
293 clk_set_parent(&ipu->pixel_clk[0], ipu->ipu_clk);
294 clk_set_parent(&ipu->pixel_clk[1], ipu->ipu_clk);
296 ipu->di_clk[0] = clk_get(ipu->dev, di0_clk);
297 ipu->di_clk[1] = clk_get(ipu->dev, di1_clk);
299 ipu->csi_clk[0] = clk_get(ipu->dev, plat_data->csi_clk[0]);
300 ipu->csi_clk[1] = clk_get(ipu->dev, plat_data->csi_clk[1]);
306 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
308 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
309 const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
317 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
318 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
320 while ((line = ffs(status))) {
322 status &= ~(1UL << line);
323 line += ipu->irq_start + (int_reg[i] - 1) * 32;
324 generic_handle_irq(line);
330 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
332 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
333 const int int_reg[] = { 5, 6, 9, 10, 0 };
341 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
342 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
344 while ((line = ffs(status))) {
346 status &= ~(1UL << line);
347 line += ipu->irq_start + (int_reg[i] - 1) * 32;
348 generic_handle_irq(line);
354 static void ipu_ack_irq(struct irq_data *d)
356 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
357 unsigned int irq = d->irq - ipu->irq_start;
360 spin_lock_irqsave(&ipu->ipu_lock, flags);
361 ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32 + 1));
362 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
365 static void ipu_unmask_irq(struct irq_data *d)
367 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
368 unsigned int irq = d->irq - ipu->irq_start;
372 spin_lock_irqsave(&ipu->ipu_lock, flags);
373 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
374 reg |= 1 << (irq % 32);
375 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
376 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
379 static void ipu_mask_irq(struct irq_data *d)
381 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
382 unsigned int irq = d->irq - ipu->irq_start;
386 spin_lock_irqsave(&ipu->ipu_lock, flags);
387 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
388 reg &= ~(1 << (irq % 32));
389 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
390 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
393 static struct irq_chip ipu_irq_chip = {
395 .irq_ack = ipu_ack_irq,
396 .irq_mask = ipu_mask_irq,
397 .irq_unmask = ipu_unmask_irq,
400 static void __devinit ipu_irq_setup(struct ipu_soc *ipu)
404 for (i = ipu->irq_start; i < ipu->irq_start + MX5_IPU_IRQS; i++) {
405 irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
406 set_irq_flags(i, IRQF_VALID);
407 irq_set_chip_data(i, ipu);
410 irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
411 irq_set_handler_data(ipu->irq_sync, ipu);
412 irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
413 irq_set_handler_data(ipu->irq_err, ipu);
416 int ipu_request_irq(struct ipu_soc *ipu, unsigned int irq,
417 irq_handler_t handler, unsigned long flags,
418 const char *name, void *dev)
420 return request_irq(ipu->irq_start + irq, handler, flags, name, dev);
422 EXPORT_SYMBOL_GPL(ipu_request_irq);
424 void ipu_enable_irq(struct ipu_soc *ipu, unsigned int irq)
426 return enable_irq(ipu->irq_start + irq);
428 EXPORT_SYMBOL_GPL(ipu_disable_irq);
430 void ipu_disable_irq(struct ipu_soc *ipu, unsigned int irq)
432 return disable_irq(ipu->irq_start + irq);
434 EXPORT_SYMBOL_GPL(ipu_disable_irq);
436 void ipu_free_irq(struct ipu_soc *ipu, unsigned int irq, void *dev_id)
438 free_irq(ipu->irq_start + irq, dev_id);
440 EXPORT_SYMBOL_GPL(ipu_free_irq);
442 static irqreturn_t ipu_completion_handler(int irq, void *dev)
444 struct completion *completion = dev;
446 complete(completion);
450 int ipu_wait_for_interrupt(struct ipu_soc *ipu, int interrupt, int timeout_ms)
452 DECLARE_COMPLETION_ONSTACK(completion);
455 ret = ipu_request_irq(ipu, interrupt, ipu_completion_handler,
456 0, NULL, &completion);
459 "ipu request irq %d fail\n", interrupt);
463 ret = wait_for_completion_timeout(&completion,
464 msecs_to_jiffies(timeout_ms));
466 ipu_free_irq(ipu, interrupt, &completion);
468 return ret > 0 ? 0 : -ETIMEDOUT;
470 EXPORT_SYMBOL_GPL(ipu_wait_for_interrupt);
473 struct ipu_soc *ipu_get_soc(int id)
475 if (id >= MXC_IPU_MAX_NUM)
476 return ERR_PTR(-ENODEV);
478 return &(ipu_array[id]);
482 * This function is called by the driver framework to initialize the IPU
485 * @param dev The device structure for the IPU passed in by the
488 * @return Returns 0 on success or negative error code on error
490 static int __devinit ipu_probe(struct platform_device *pdev)
492 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
494 struct resource *res;
495 unsigned long ipu_base;
498 if (pdev->id >= MXC_IPU_MAX_NUM)
501 ipu = &ipu_array[pdev->id];
502 memset(ipu, 0, sizeof(struct ipu_soc));
504 spin_lock_init(&ipu->ipu_lock);
506 g_ipu_hw_rev = plat_data->rev;
508 ipu->dev = &pdev->dev;
511 plat_data->init(pdev->id);
513 ipu->irq_sync = platform_get_irq(pdev, 0);
514 ipu->irq_err = platform_get_irq(pdev, 1);
515 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
522 if (request_irq(ipu->irq_sync, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
523 dev_err(ipu->dev, "request SYNC interrupt failed\n");
525 goto failed_req_irq_sync;
527 /* Some platforms have 2 IPU interrupts */
528 if (ipu->irq_err >= 0) {
530 (ipu->irq_err, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
531 dev_err(ipu->dev, "request ERR interrupt failed\n");
533 goto failed_req_irq_err;
537 ipu_base = res->start;
539 if (g_ipu_hw_rev == 4) /* IPUv3H */
540 ipu_base += IPUV3H_REG_BASE;
541 else if (g_ipu_hw_rev == 3) /* IPUv3M */
542 ipu_base += IPUV3M_REG_BASE;
543 else /* IPUv3D, v3E, v3EX */
544 ipu_base += IPUV3DEX_REG_BASE;
546 ipu->cm_reg = ioremap(ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
547 ipu->ic_reg = ioremap(ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
548 ipu->idmac_reg = ioremap(ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
549 /* DP Registers are accessed thru the SRM */
550 ipu->dp_reg = ioremap(ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
551 ipu->dc_reg = ioremap(ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
552 ipu->dmfc_reg = ioremap(ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
553 ipu->di_reg[0] = ioremap(ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
554 ipu->di_reg[1] = ioremap(ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
555 ipu->smfc_reg = ioremap(ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
556 ipu->csi_reg[0] = ioremap(ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
557 ipu->csi_reg[1] = ioremap(ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
558 ipu->cpmem_base = ioremap(ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
559 ipu->tpmem_base = ioremap(ipu_base + IPU_TPM_REG_BASE, SZ_64K);
560 ipu->dc_tmpl_reg = ioremap(ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
561 ipu->vdi_reg = ioremap(ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
562 ipu->disp_base[1] = ioremap(ipu_base + IPU_DISP1_BASE, SZ_4K);
564 if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
565 !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
566 !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
567 !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
568 !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
574 dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
575 dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
576 dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
577 dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
578 dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
579 dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
580 dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
581 dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
582 dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
583 dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
584 dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
585 dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
586 dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
587 dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
588 dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
589 dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
591 ret = ipu_clk_setup_enable(ipu, pdev);
593 dev_err(ipu->dev, "ipu clk setup failed\n");
594 goto failed_clk_setup;
597 platform_set_drvdata(pdev, ipu);
603 /* Set sync refresh channels and CSI->mem channel as high priority */
604 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
606 /* Set MCU_T to divide MCU access window into 2 */
607 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), IPU_DISP_GEN);
609 clk_disable(ipu->ipu_clk);
611 register_ipu_device(ipu, pdev->id);
616 iounmap(ipu->cm_reg);
617 iounmap(ipu->ic_reg);
618 iounmap(ipu->idmac_reg);
619 iounmap(ipu->dc_reg);
620 iounmap(ipu->dp_reg);
621 iounmap(ipu->dmfc_reg);
622 iounmap(ipu->di_reg[0]);
623 iounmap(ipu->di_reg[1]);
624 iounmap(ipu->smfc_reg);
625 iounmap(ipu->csi_reg[0]);
626 iounmap(ipu->csi_reg[1]);
627 iounmap(ipu->cpmem_base);
628 iounmap(ipu->tpmem_base);
629 iounmap(ipu->dc_tmpl_reg);
630 iounmap(ipu->disp_base[1]);
631 iounmap(ipu->vdi_reg);
634 free_irq(ipu->irq_err, ipu);
636 free_irq(ipu->irq_sync, ipu);
642 int __devexit ipu_remove(struct platform_device *pdev)
644 struct ipu_soc *ipu = platform_get_drvdata(pdev);
646 unregister_ipu_device(ipu, pdev->id);
649 free_irq(ipu->irq_sync, ipu);
651 free_irq(ipu->irq_err, ipu);
653 clk_put(ipu->ipu_clk);
655 iounmap(ipu->cm_reg);
656 iounmap(ipu->ic_reg);
657 iounmap(ipu->idmac_reg);
658 iounmap(ipu->dc_reg);
659 iounmap(ipu->dp_reg);
660 iounmap(ipu->dmfc_reg);
661 iounmap(ipu->di_reg[0]);
662 iounmap(ipu->di_reg[1]);
663 iounmap(ipu->smfc_reg);
664 iounmap(ipu->csi_reg[0]);
665 iounmap(ipu->csi_reg[1]);
666 iounmap(ipu->cpmem_base);
667 iounmap(ipu->tpmem_base);
668 iounmap(ipu->dc_tmpl_reg);
669 iounmap(ipu->disp_base[1]);
670 iounmap(ipu->vdi_reg);
675 void ipu_dump_registers(struct ipu_soc *ipu)
677 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
678 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
679 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
680 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
681 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
682 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
683 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
684 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
685 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
686 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
687 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
688 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
689 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
690 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
691 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
692 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
693 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
694 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
695 if (g_ipu_hw_rev >= 2) {
696 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
697 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
698 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
699 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
701 dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
702 ipu_dmfc_read(ipu, DMFC_WR_CHAN));
703 dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
704 ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
705 dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
706 ipu_dmfc_read(ipu, DMFC_DP_CHAN));
707 dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
708 ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
709 dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
710 ipu_dmfc_read(ipu, DMFC_IC_CTRL));
711 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
712 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
713 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
714 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
715 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
716 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
717 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
718 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
722 * This function is called to initialize a logical IPU channel.
724 * @param ipu ipu handler
725 * @param channel Input parameter for the logical channel ID to init.
727 * @param params Input parameter containing union of channel
728 * initialization parameters.
730 * @return Returns 0 on success or negative error code on fail
732 int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
737 unsigned long lock_flags;
739 dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
741 if (ipu->clk_enabled == false) {
742 ipu->clk_enabled = true;
743 clk_enable(ipu->ipu_clk);
746 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
748 if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
749 dev_err(ipu->dev, "Warning: channel already initialized %d\n",
750 IPU_CHAN_ID(channel));
753 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
760 if (params->csi_mem.csi > 1) {
765 if (params->csi_mem.interlaced)
766 ipu->chan_is_interlaced[channel_2_dma(channel,
767 IPU_OUTPUT_BUFFER)] = true;
769 ipu->chan_is_interlaced[channel_2_dma(channel,
770 IPU_OUTPUT_BUFFER)] = false;
772 ipu->smfc_use_count++;
773 ipu->csi_channel[params->csi_mem.csi] = channel;
776 if (params->csi_mem.mipi_en) {
777 ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
778 params->csi_mem.csi));
779 _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_id,
780 params->csi_mem.csi);
782 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
783 params->csi_mem.csi));
784 _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
787 /*CSI data (include compander) dest*/
788 _ipu_csi_init(ipu, channel, params->csi_mem.csi);
790 case CSI_PRP_ENC_MEM:
791 if (params->csi_prp_enc_mem.csi > 1) {
795 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
799 ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
802 ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
804 /*Without SMFC, CSI only support parallel data source*/
805 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
806 params->csi_prp_enc_mem.csi));
808 /*CSI0/1 feed into IC*/
809 ipu_conf &= ~IPU_CONF_IC_INPUT;
810 if (params->csi_prp_enc_mem.csi)
811 ipu_conf |= IPU_CONF_CSI_SEL;
813 ipu_conf &= ~IPU_CONF_CSI_SEL;
815 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
816 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
817 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
819 /*CSI data (include compander) dest*/
820 _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
821 _ipu_ic_init_prpenc(ipu, params, true);
824 if (params->csi_prp_vf_mem.csi > 1) {
828 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
832 ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
835 ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
837 /*Without SMFC, CSI only support parallel data source*/
838 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
839 params->csi_prp_vf_mem.csi));
841 /*CSI0/1 feed into IC*/
842 ipu_conf &= ~IPU_CONF_IC_INPUT;
843 if (params->csi_prp_vf_mem.csi)
844 ipu_conf |= IPU_CONF_CSI_SEL;
846 ipu_conf &= ~IPU_CONF_CSI_SEL;
848 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
849 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
850 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
852 /*CSI data (include compander) dest*/
853 _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
854 _ipu_ic_init_prpvf(ipu, params, true);
858 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
859 ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
861 if (params->mem_prp_vf_mem.graphics_combine_en)
862 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
863 if (params->mem_prp_vf_mem.alpha_chan_en)
864 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
866 _ipu_ic_init_prpvf(ipu, params, false);
868 case MEM_VDI_PRP_VF_MEM:
869 if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
870 (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
874 ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
876 ipu->vdi_use_count++;
877 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
878 reg &= ~FS_VDI_SRC_SEL_MASK;
879 ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
881 if (params->mem_prp_vf_mem.graphics_combine_en)
882 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
883 _ipu_ic_init_prpvf(ipu, params, false);
884 _ipu_vdi_init(ipu, channel, params);
886 case MEM_VDI_PRP_VF_MEM_P:
887 _ipu_vdi_init(ipu, channel, params);
889 case MEM_VDI_PRP_VF_MEM_N:
890 _ipu_vdi_init(ipu, channel, params);
894 ipu->rot_use_count++;
895 _ipu_ic_init_rotate_vf(ipu, params);
897 case MEM_PRP_ENC_MEM:
899 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
900 ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
901 _ipu_ic_init_prpenc(ipu, params, false);
903 case MEM_ROT_ENC_MEM:
905 ipu->rot_use_count++;
906 _ipu_ic_init_rotate_enc(ipu, params);
909 if (params->mem_pp_mem.graphics_combine_en)
910 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
911 if (params->mem_pp_mem.alpha_chan_en)
912 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
913 _ipu_ic_init_pp(ipu, params);
917 _ipu_ic_init_rotate_pp(ipu, params);
919 ipu->rot_use_count++;
922 if (params->mem_dc_sync.di > 1) {
927 ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
928 _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
929 params->mem_dc_sync.interlaced,
930 params->mem_dc_sync.out_pixel_fmt);
931 ipu->di_use_count[params->mem_dc_sync.di]++;
933 ipu->dmfc_use_count++;
936 if (params->mem_dp_bg_sync.di > 1) {
941 if (params->mem_dp_bg_sync.alpha_chan_en)
942 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
944 ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
945 _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
946 params->mem_dp_bg_sync.out_pixel_fmt);
947 _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
948 params->mem_dp_bg_sync.interlaced,
949 params->mem_dp_bg_sync.out_pixel_fmt);
950 ipu->di_use_count[params->mem_dp_bg_sync.di]++;
953 ipu->dmfc_use_count++;
956 _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
957 params->mem_dp_fg_sync.out_pixel_fmt);
959 if (params->mem_dp_fg_sync.alpha_chan_en)
960 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
964 ipu->dmfc_use_count++;
967 if (params->direct_async.di > 1) {
972 ipu->dc_di_assignment[8] = params->direct_async.di;
973 _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
974 ipu->di_use_count[params->direct_async.di]++;
978 if (params->direct_async.di > 1) {
983 ipu->dc_di_assignment[9] = params->direct_async.di;
984 _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
985 ipu->di_use_count[params->direct_async.di]++;
989 dev_err(ipu->dev, "Missing channel initialization\n");
993 /* Enable IPU sub module */
994 ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
996 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
999 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1002 EXPORT_SYMBOL(ipu_init_channel);
1005 * This function is called to uninitialize a logical IPU channel.
1007 * @param ipu ipu handler
1008 * @param channel Input parameter for the logical channel ID to uninit.
1010 void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1012 unsigned long lock_flags;
1014 uint32_t in_dma, out_dma = 0;
1017 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1019 if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
1020 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1021 IPU_CHAN_ID(channel));
1022 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1026 /* Make sure channel is disabled */
1027 /* Get input and output dma channels */
1028 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1029 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1031 if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
1032 idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
1034 "Channel %d is not disabled, disable first\n",
1035 IPU_CHAN_ID(channel));
1036 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1040 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
1042 /* Reset the double buffer */
1043 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
1044 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
1045 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
1046 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
1048 /* Reset the triple buffer */
1049 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
1050 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
1051 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
1052 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
1054 if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
1055 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
1056 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
1064 ipu->smfc_use_count--;
1065 if (ipu->csi_channel[0] == channel) {
1066 ipu->csi_channel[0] = CHAN_NONE;
1067 } else if (ipu->csi_channel[1] == channel) {
1068 ipu->csi_channel[1] = CHAN_NONE;
1071 case CSI_PRP_ENC_MEM:
1072 ipu->ic_use_count--;
1073 if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
1074 ipu->using_ic_dirct_ch = 0;
1075 _ipu_ic_uninit_prpenc(ipu);
1076 if (ipu->csi_channel[0] == channel) {
1077 ipu->csi_channel[0] = CHAN_NONE;
1078 } else if (ipu->csi_channel[1] == channel) {
1079 ipu->csi_channel[1] = CHAN_NONE;
1082 case CSI_PRP_VF_MEM:
1083 ipu->ic_use_count--;
1084 if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
1085 ipu->using_ic_dirct_ch = 0;
1086 _ipu_ic_uninit_prpvf(ipu);
1087 if (ipu->csi_channel[0] == channel) {
1088 ipu->csi_channel[0] = CHAN_NONE;
1089 } else if (ipu->csi_channel[1] == channel) {
1090 ipu->csi_channel[1] = CHAN_NONE;
1093 case MEM_PRP_VF_MEM:
1094 ipu->ic_use_count--;
1095 _ipu_ic_uninit_prpvf(ipu);
1096 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1097 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1099 case MEM_VDI_PRP_VF_MEM:
1100 ipu->ic_use_count--;
1101 ipu->vdi_use_count--;
1102 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
1103 ipu->using_ic_dirct_ch = 0;
1104 _ipu_ic_uninit_prpvf(ipu);
1105 _ipu_vdi_uninit(ipu);
1106 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1107 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1109 case MEM_VDI_PRP_VF_MEM_P:
1110 case MEM_VDI_PRP_VF_MEM_N:
1112 case MEM_ROT_VF_MEM:
1113 ipu->rot_use_count--;
1114 ipu->ic_use_count--;
1115 _ipu_ic_uninit_rotate_vf(ipu);
1117 case MEM_PRP_ENC_MEM:
1118 ipu->ic_use_count--;
1119 _ipu_ic_uninit_prpenc(ipu);
1120 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1121 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
1123 case MEM_ROT_ENC_MEM:
1124 ipu->rot_use_count--;
1125 ipu->ic_use_count--;
1126 _ipu_ic_uninit_rotate_enc(ipu);
1129 ipu->ic_use_count--;
1130 _ipu_ic_uninit_pp(ipu);
1132 case MEM_ROT_PP_MEM:
1133 ipu->rot_use_count--;
1134 ipu->ic_use_count--;
1135 _ipu_ic_uninit_rotate_pp(ipu);
1138 _ipu_dc_uninit(ipu, 1);
1139 ipu->di_use_count[ipu->dc_di_assignment[1]]--;
1140 ipu->dc_use_count--;
1141 ipu->dmfc_use_count--;
1144 _ipu_dp_uninit(ipu, channel);
1145 _ipu_dc_uninit(ipu, 5);
1146 ipu->di_use_count[ipu->dc_di_assignment[5]]--;
1147 ipu->dc_use_count--;
1148 ipu->dp_use_count--;
1149 ipu->dmfc_use_count--;
1152 _ipu_dp_uninit(ipu, channel);
1153 ipu->dc_use_count--;
1154 ipu->dp_use_count--;
1155 ipu->dmfc_use_count--;
1158 _ipu_dc_uninit(ipu, 8);
1159 ipu->di_use_count[ipu->dc_di_assignment[8]]--;
1160 ipu->dc_use_count--;
1163 _ipu_dc_uninit(ipu, 9);
1164 ipu->di_use_count[ipu->dc_di_assignment[9]]--;
1165 ipu->dc_use_count--;
1171 ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
1173 if (ipu->ic_use_count == 0)
1174 ipu_conf &= ~IPU_CONF_IC_EN;
1175 if (ipu->vdi_use_count == 0) {
1176 ipu_conf &= ~IPU_CONF_ISP_EN;
1177 ipu_conf &= ~IPU_CONF_VDI_EN;
1178 ipu_conf &= ~IPU_CONF_IC_INPUT;
1180 if (ipu->rot_use_count == 0)
1181 ipu_conf &= ~IPU_CONF_ROT_EN;
1182 if (ipu->dc_use_count == 0)
1183 ipu_conf &= ~IPU_CONF_DC_EN;
1184 if (ipu->dp_use_count == 0)
1185 ipu_conf &= ~IPU_CONF_DP_EN;
1186 if (ipu->dmfc_use_count == 0)
1187 ipu_conf &= ~IPU_CONF_DMFC_EN;
1188 if (ipu->di_use_count[0] == 0) {
1189 ipu_conf &= ~IPU_CONF_DI0_EN;
1191 if (ipu->di_use_count[1] == 0) {
1192 ipu_conf &= ~IPU_CONF_DI1_EN;
1194 if (ipu->smfc_use_count == 0)
1195 ipu_conf &= ~IPU_CONF_SMFC_EN;
1197 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
1199 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1201 if (ipu_conf == 0) {
1202 clk_disable(ipu->ipu_clk);
1203 ipu->clk_enabled = false;
1206 WARN_ON(ipu->ic_use_count < 0);
1207 WARN_ON(ipu->vdi_use_count < 0);
1208 WARN_ON(ipu->rot_use_count < 0);
1209 WARN_ON(ipu->dc_use_count < 0);
1210 WARN_ON(ipu->dp_use_count < 0);
1211 WARN_ON(ipu->dmfc_use_count < 0);
1212 WARN_ON(ipu->smfc_use_count < 0);
1214 EXPORT_SYMBOL(ipu_uninit_channel);
1217 * This function is called to initialize buffer(s) for logical IPU channel.
1219 * @param ipu ipu handler
1221 * @param channel Input parameter for the logical channel ID.
1223 * @param type Input parameter which buffer to initialize.
1225 * @param pixel_fmt Input parameter for pixel format of buffer.
1226 * Pixel format is a FOURCC ASCII code.
1228 * @param width Input parameter for width of buffer in pixels.
1230 * @param height Input parameter for height of buffer in pixels.
1232 * @param stride Input parameter for stride length of buffer
1235 * @param rot_mode Input parameter for rotation setting of buffer.
1236 * A rotation setting other than
1237 * IPU_ROTATE_VERT_FLIP
1238 * should only be used for input buffers of
1239 * rotation channels.
1241 * @param phyaddr_0 Input parameter buffer 0 physical address.
1243 * @param phyaddr_1 Input parameter buffer 1 physical address.
1244 * Setting this to a value other than NULL enables
1245 * double buffering mode.
1247 * @param phyaddr_2 Input parameter buffer 2 physical address.
1248 * Setting this to a value other than NULL enables
1249 * triple buffering mode, phyaddr_1 should not be
1252 * @param u private u offset for additional cropping,
1255 * @param v private v offset for additional cropping,
1258 * @return Returns 0 on success or negative error code on fail
1260 int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1263 uint16_t width, uint16_t height,
1265 ipu_rotate_mode_t rot_mode,
1266 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
1267 dma_addr_t phyaddr_2,
1268 uint32_t u, uint32_t v)
1270 unsigned long lock_flags;
1273 uint32_t burst_size;
1275 dma_chan = channel_2_dma(channel, type);
1276 if (!idma_is_valid(dma_chan))
1279 if (stride < width * bytes_per_pixel(pixel_fmt))
1280 stride = width * bytes_per_pixel(pixel_fmt);
1284 "Stride not 32-bit aligned, stride = %d\n", stride);
1287 /* IC & IRT channels' width must be multiple of 8 pixels */
1288 if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
1290 dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
1294 /* IPUv3EX and IPUv3M support triple buffer */
1295 if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
1296 dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
1297 "mode\n", dma_chan);
1300 if (!phyaddr_1 && phyaddr_2) {
1301 dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
1302 "triple buffer mode\n", dma_chan);
1306 /* Build parameter memory data for DMA channel */
1307 _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
1308 phyaddr_0, phyaddr_1, phyaddr_2);
1310 /* Set correlative channel parameter of local alpha channel */
1311 if ((_ipu_is_ic_graphic_chan(dma_chan) ||
1312 _ipu_is_dp_graphic_chan(dma_chan)) &&
1313 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
1314 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
1315 _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
1316 _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
1317 /* fix alpha width as 8 and burst size as 16*/
1318 _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
1319 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1320 } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
1321 ipu_pixel_format_has_alpha(pixel_fmt))
1322 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
1325 _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
1327 /* IC and ROT channels have restriction of 8 or 16 pix burst length */
1328 if (_ipu_is_ic_chan(dma_chan)) {
1329 if ((width % 16) == 0)
1330 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1332 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1333 } else if (_ipu_is_irt_chan(dma_chan)) {
1334 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1335 _ipu_ch_param_set_block_mode(ipu, dma_chan);
1336 } else if (_ipu_is_dmfc_chan(dma_chan)) {
1337 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1338 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1339 _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
1340 _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
1341 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1344 if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
1345 ipu->chan_is_interlaced[dma_chan])
1346 _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
1348 if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan)) {
1349 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1350 _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
1352 } else if (_ipu_is_smfc_chan(dma_chan)) {
1353 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1354 if ((pixel_fmt == IPU_PIX_FMT_GENERIC) &&
1355 ((_ipu_ch_param_get_bpp(ipu, dma_chan) == 5) ||
1356 (_ipu_ch_param_get_bpp(ipu, dma_chan) == 3)))
1357 burst_size = burst_size >> 4;
1359 burst_size = burst_size >> 2;
1360 _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
1363 if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan) && !cpu_is_mx53()
1365 _ipu_ch_param_set_high_priority(ipu, dma_chan);
1367 _ipu_ch_param_dump(ipu, dma_chan);
1369 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1370 if (phyaddr_2 && g_ipu_hw_rev >= 2) {
1371 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1372 reg &= ~idma_mask(dma_chan);
1373 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1375 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1376 reg |= idma_mask(dma_chan);
1377 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1379 /* Set IDMAC third buffer's cpmem number */
1380 /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
1381 ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
1382 ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
1383 ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
1385 /* Reset to buffer 0 */
1386 ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
1387 IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
1389 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1390 reg &= ~idma_mask(dma_chan);
1391 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1393 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1395 reg |= idma_mask(dma_chan);
1397 reg &= ~idma_mask(dma_chan);
1398 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1400 /* Reset to buffer 0 */
1401 ipu_cm_write(ipu, idma_mask(dma_chan),
1402 IPU_CHA_CUR_BUF(dma_chan));
1405 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1409 EXPORT_SYMBOL(ipu_init_channel_buffer);
1412 * This function is called to update the physical address of a buffer for
1413 * a logical IPU channel.
1415 * @param ipu ipu handler
1416 * @param channel Input parameter for the logical channel ID.
1418 * @param type Input parameter which buffer to initialize.
1420 * @param bufNum Input parameter for buffer number to update.
1421 * 0 or 1 are the only valid values.
1423 * @param phyaddr Input parameter buffer physical address.
1425 * @return This function returns 0 on success or negative error code on
1426 * fail. This function will fail if the buffer is set to ready.
1428 int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1429 ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
1433 unsigned long lock_flags;
1434 uint32_t dma_chan = channel_2_dma(channel, type);
1435 if (dma_chan == IDMA_CHAN_INVALID)
1438 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1441 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
1442 else if (bufNum == 1)
1443 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
1445 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
1447 if ((reg & idma_mask(dma_chan)) == 0)
1448 _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
1452 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1455 EXPORT_SYMBOL(ipu_update_channel_buffer);
1459 * This function is called to initialize a buffer for logical IPU channel.
1461 * @param ipu ipu handler
1462 * @param channel Input parameter for the logical channel ID.
1464 * @param type Input parameter which buffer to initialize.
1466 * @param pixel_fmt Input parameter for pixel format of buffer.
1467 * Pixel format is a FOURCC ASCII code.
1469 * @param width Input parameter for width of buffer in pixels.
1471 * @param height Input parameter for height of buffer in pixels.
1473 * @param stride Input parameter for stride length of buffer
1476 * @param u predefined private u offset for additional cropping,
1479 * @param v predefined private v offset for additional cropping,
1482 * @param vertical_offset vertical offset for Y coordinate
1483 * in the existed frame
1486 * @param horizontal_offset horizontal offset for X coordinate
1487 * in the existed frame
1490 * @return Returns 0 on success or negative error code on fail
1491 * This function will fail if any buffer is set to ready.
1494 int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
1495 ipu_channel_t channel, ipu_buffer_t type,
1497 uint16_t width, uint16_t height,
1499 uint32_t u, uint32_t v,
1500 uint32_t vertical_offset, uint32_t horizontal_offset)
1503 unsigned long lock_flags;
1504 uint32_t dma_chan = channel_2_dma(channel, type);
1506 if (dma_chan == IDMA_CHAN_INVALID)
1509 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1511 if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1512 (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1513 ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
1514 (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
1515 _ipu_is_trb_chan(dma_chan)))
1518 _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
1519 u, v, 0, vertical_offset, horizontal_offset);
1521 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1524 EXPORT_SYMBOL(ipu_update_channel_offset);
1528 * This function is called to set a channel's buffer as ready.
1530 * @param ipu ipu handler
1531 * @param channel Input parameter for the logical channel ID.
1533 * @param type Input parameter which buffer to initialize.
1535 * @param bufNum Input parameter for which buffer number set to
1538 * @return Returns 0 on success or negative error code on fail
1540 int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1541 ipu_buffer_t type, uint32_t bufNum)
1543 uint32_t dma_chan = channel_2_dma(channel, type);
1544 unsigned long lock_flags;
1546 if (dma_chan == IDMA_CHAN_INVALID)
1549 /* Mark buffer to be ready. */
1550 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1552 ipu_cm_write(ipu, idma_mask(dma_chan),
1553 IPU_CHA_BUF0_RDY(dma_chan));
1554 else if (bufNum == 1)
1555 ipu_cm_write(ipu, idma_mask(dma_chan),
1556 IPU_CHA_BUF1_RDY(dma_chan));
1558 ipu_cm_write(ipu, idma_mask(dma_chan),
1559 IPU_CHA_BUF2_RDY(dma_chan));
1560 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1563 EXPORT_SYMBOL(ipu_select_buffer);
1566 * This function is called to set a channel's buffer as ready.
1568 * @param ipu ipu handler
1569 * @param bufNum Input parameter for which buffer number set to
1572 * @return Returns 0 on success or negative error code on fail
1574 int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
1577 uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
1579 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
1580 idma_mask(dma_chan)|
1581 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
1582 unsigned long lock_flags;
1584 /* Mark buffers to be ready. */
1585 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1587 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
1589 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
1590 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1593 EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
1596 static int proc_dest_sel[] = {
1597 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
1598 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
1599 static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
1600 NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
1601 static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
1602 NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
1606 * This function links 2 channels together for automatic frame
1607 * synchronization. The output of the source channel is linked to the input of
1608 * the destination channel.
1610 * @param ipu ipu handler
1611 * @param src_ch Input parameter for the logical channel ID of
1612 * the source channel.
1614 * @param dest_ch Input parameter for the logical channel ID of
1615 * the destination channel.
1617 * @return This function returns 0 on success or negative error code on
1620 int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1623 unsigned long lock_flags;
1624 uint32_t fs_proc_flow1;
1625 uint32_t fs_proc_flow2;
1626 uint32_t fs_proc_flow3;
1627 uint32_t fs_disp_flow1;
1629 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1631 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1632 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1633 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1634 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1638 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1640 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1641 FS_SMFC0_DEST_SEL_OFFSET;
1644 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1646 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1647 FS_SMFC1_DEST_SEL_OFFSET;
1650 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1652 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1653 FS_SMFC2_DEST_SEL_OFFSET;
1656 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1658 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1659 FS_SMFC3_DEST_SEL_OFFSET;
1661 case CSI_PRP_ENC_MEM:
1662 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1664 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1665 FS_PRPENC_DEST_SEL_OFFSET;
1667 case CSI_PRP_VF_MEM:
1668 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1670 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1671 FS_PRPVF_DEST_SEL_OFFSET;
1674 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1676 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1677 FS_PP_DEST_SEL_OFFSET;
1679 case MEM_ROT_PP_MEM:
1680 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1682 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1683 FS_PP_ROT_DEST_SEL_OFFSET;
1685 case MEM_PRP_ENC_MEM:
1686 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1688 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1689 FS_PRPENC_DEST_SEL_OFFSET;
1691 case MEM_ROT_ENC_MEM:
1692 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1694 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1695 FS_PRPENC_ROT_DEST_SEL_OFFSET;
1697 case MEM_PRP_VF_MEM:
1698 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1700 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1701 FS_PRPVF_DEST_SEL_OFFSET;
1703 case MEM_VDI_PRP_VF_MEM:
1704 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1706 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1707 FS_PRPVF_DEST_SEL_OFFSET;
1709 case MEM_ROT_VF_MEM:
1710 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1712 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1713 FS_PRPVF_ROT_DEST_SEL_OFFSET;
1722 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1724 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PP_SRC_SEL_OFFSET;
1726 case MEM_ROT_PP_MEM:
1727 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1729 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1730 FS_PP_ROT_SRC_SEL_OFFSET;
1732 case MEM_PRP_ENC_MEM:
1733 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1735 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1737 case MEM_ROT_ENC_MEM:
1738 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1740 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1741 FS_PRPENC_ROT_SRC_SEL_OFFSET;
1743 case MEM_PRP_VF_MEM:
1744 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1746 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1748 case MEM_VDI_PRP_VF_MEM:
1749 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1751 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1753 case MEM_ROT_VF_MEM:
1754 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1756 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1757 FS_PRPVF_ROT_SRC_SEL_OFFSET;
1760 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1762 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
1765 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1767 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1768 FS_DP_SYNC0_SRC_SEL_OFFSET;
1771 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1773 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1774 FS_DP_SYNC1_SRC_SEL_OFFSET;
1777 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1779 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
1782 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1784 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1785 FS_DP_ASYNC0_SRC_SEL_OFFSET;
1788 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1790 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1791 FS_DP_ASYNC1_SRC_SEL_OFFSET;
1798 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1799 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1800 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1801 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1804 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1807 EXPORT_SYMBOL(ipu_link_channels);
1810 * This function unlinks 2 channels and disables automatic frame
1813 * @param ipu ipu handler
1814 * @param src_ch Input parameter for the logical channel ID of
1815 * the source channel.
1817 * @param dest_ch Input parameter for the logical channel ID of
1818 * the destination channel.
1820 * @return This function returns 0 on success or negative error code on
1823 int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1826 unsigned long lock_flags;
1827 uint32_t fs_proc_flow1;
1828 uint32_t fs_proc_flow2;
1829 uint32_t fs_proc_flow3;
1830 uint32_t fs_disp_flow1;
1832 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1834 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1835 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1836 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1837 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1841 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1844 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1847 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1850 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1852 case CSI_PRP_ENC_MEM:
1853 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1855 case CSI_PRP_VF_MEM:
1856 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1859 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1861 case MEM_ROT_PP_MEM:
1862 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1864 case MEM_PRP_ENC_MEM:
1865 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1867 case MEM_ROT_ENC_MEM:
1868 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1870 case MEM_PRP_VF_MEM:
1871 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1873 case MEM_VDI_PRP_VF_MEM:
1874 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1876 case MEM_ROT_VF_MEM:
1877 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1886 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1888 case MEM_ROT_PP_MEM:
1889 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1891 case MEM_PRP_ENC_MEM:
1892 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1894 case MEM_ROT_ENC_MEM:
1895 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1897 case MEM_PRP_VF_MEM:
1898 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1900 case MEM_VDI_PRP_VF_MEM:
1901 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1903 case MEM_ROT_VF_MEM:
1904 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1907 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1910 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1913 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1916 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1919 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1922 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1929 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1930 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1931 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1932 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1935 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1938 EXPORT_SYMBOL(ipu_unlink_channels);
1941 * This function check whether a logical channel was enabled.
1943 * @param ipu ipu handler
1944 * @param channel Input parameter for the logical channel ID.
1946 * @return This function returns 1 while request channel is enabled or
1947 * 0 for not enabled.
1949 int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
1955 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1956 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1958 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
1959 if (reg & idma_mask(in_dma))
1961 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
1962 if (reg & idma_mask(out_dma))
1966 EXPORT_SYMBOL(ipu_is_channel_busy);
1969 * This function enables a logical channel.
1971 * @param ipu ipu handler
1972 * @param channel Input parameter for the logical channel ID.
1974 * @return This function returns 0 on success or negative error code on
1977 int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1980 unsigned long lock_flags;
1987 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1989 if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
1990 dev_err(ipu->dev, "Warning: channel already enabled %d\n",
1991 IPU_CHAN_ID(channel));
1992 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1996 /* Get input and output dma channels */
1997 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1998 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2000 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
2001 if (ipu->di_use_count[0] > 0) {
2002 ipu_conf |= IPU_CONF_DI0_EN;
2004 if (ipu->di_use_count[1] > 0) {
2005 ipu_conf |= IPU_CONF_DI1_EN;
2007 if (ipu->dp_use_count > 0)
2008 ipu_conf |= IPU_CONF_DP_EN;
2009 if (ipu->dc_use_count > 0)
2010 ipu_conf |= IPU_CONF_DC_EN;
2011 if (ipu->dmfc_use_count > 0)
2012 ipu_conf |= IPU_CONF_DMFC_EN;
2013 if (ipu->ic_use_count > 0)
2014 ipu_conf |= IPU_CONF_IC_EN;
2015 if (ipu->vdi_use_count > 0) {
2016 ipu_conf |= IPU_CONF_ISP_EN;
2017 ipu_conf |= IPU_CONF_VDI_EN;
2018 ipu_conf |= IPU_CONF_IC_INPUT;
2020 if (ipu->rot_use_count > 0)
2021 ipu_conf |= IPU_CONF_ROT_EN;
2022 if (ipu->smfc_use_count > 0)
2023 ipu_conf |= IPU_CONF_SMFC_EN;
2024 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
2026 if (idma_is_valid(in_dma)) {
2027 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2028 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2030 if (idma_is_valid(out_dma)) {
2031 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2032 ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2035 if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
2036 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
2037 (channel == MEM_VDI_PRP_VF_MEM))) {
2038 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2039 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2040 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2042 if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2043 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
2044 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2045 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2046 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2048 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2049 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2050 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2051 } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2052 ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
2053 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2054 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2055 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2056 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2057 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
2060 if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
2061 (channel == MEM_FG_SYNC)) {
2062 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2063 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2065 _ipu_dp_dc_enable(ipu, channel);
2068 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2069 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2070 _ipu_ic_enable_task(ipu, channel);
2072 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
2074 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2078 EXPORT_SYMBOL(ipu_enable_channel);
2081 * This function check buffer ready for a logical channel.
2083 * @param ipu ipu handler
2084 * @param channel Input parameter for the logical channel ID.
2086 * @param type Input parameter which buffer to clear.
2088 * @param bufNum Input parameter for which buffer number clear
2092 int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2095 uint32_t dma_chan = channel_2_dma(channel, type);
2098 if (dma_chan == IDMA_CHAN_INVALID)
2102 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
2103 else if (bufNum == 1)
2104 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
2106 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
2108 if (reg & idma_mask(dma_chan))
2113 EXPORT_SYMBOL(ipu_check_buffer_ready);
2116 * This function clear buffer ready for a logical channel.
2118 * @param ipu ipu handler
2119 * @param channel Input parameter for the logical channel ID.
2121 * @param type Input parameter which buffer to clear.
2123 * @param bufNum Input parameter for which buffer number clear
2127 void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2130 unsigned long lock_flags;
2131 uint32_t dma_ch = channel_2_dma(channel, type);
2133 if (!idma_is_valid(dma_ch))
2136 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2137 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
2139 ipu_cm_write(ipu, idma_mask(dma_ch),
2140 IPU_CHA_BUF0_RDY(dma_ch));
2141 else if (bufNum == 1)
2142 ipu_cm_write(ipu, idma_mask(dma_ch),
2143 IPU_CHA_BUF1_RDY(dma_ch));
2145 ipu_cm_write(ipu, idma_mask(dma_ch),
2146 IPU_CHA_BUF2_RDY(dma_ch));
2147 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
2148 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2150 EXPORT_SYMBOL(ipu_clear_buffer_ready);
2152 static irqreturn_t disable_chan_irq_handler(int irq, void *dev_id)
2154 struct completion *comp = dev_id;
2161 * This function disables a logical channel.
2163 * @param ipu ipu handler
2164 * @param channel Input parameter for the logical channel ID.
2166 * @param wait_for_stop Flag to set whether to wait for channel end
2167 * of frame or return immediately.
2169 * @return This function returns 0 on success or negative error code on
2172 int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
2175 unsigned long lock_flags;
2178 uint32_t sec_dma = NO_DMA;
2179 uint32_t thrd_dma = NO_DMA;
2180 uint16_t fg_pos_x, fg_pos_y;
2182 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2184 if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
2185 dev_err(ipu->dev, "Channel already disabled %d\n",
2186 IPU_CHAN_ID(channel));
2187 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2191 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2193 /* Get input and output dma channels */
2194 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2195 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2197 if ((idma_is_valid(in_dma) &&
2198 !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
2199 && (idma_is_valid(out_dma) &&
2200 !idma_is_set(ipu, IDMAC_CHA_EN, out_dma)))
2203 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
2204 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2205 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
2206 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2207 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2210 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2211 (channel == MEM_DC_SYNC)) {
2212 if (channel == MEM_FG_SYNC) {
2213 ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
2214 ipu_disp_set_window_pos(ipu, channel, 0, 0);
2217 _ipu_dp_dc_disable(ipu, channel, false);
2220 * wait for BG channel EOF then disable FG-IDMAC,
2221 * it avoid FG NFB4EOF error.
2223 if (channel == MEM_FG_SYNC) {
2226 ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
2227 IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
2228 while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
2229 IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
2233 dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
2238 } else if (wait_for_stop) {
2239 while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
2240 idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
2241 (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2242 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
2243 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2244 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
2245 uint32_t ret, irq = 0xffffffff;
2246 DECLARE_COMPLETION_ONSTACK(disable_comp);
2248 if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
2250 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2251 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
2253 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2254 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
2256 if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
2259 if (irq == 0xffffffff) {
2260 dev_err(ipu->dev, "warning: no channel busy, break\n");
2264 dev_err(ipu->dev, "warning: channel %d busy, need wait\n", irq);
2266 ret = ipu_request_irq(ipu, irq, disable_chan_irq_handler, 0, NULL, &disable_comp);
2268 dev_err(ipu->dev, "irq %d in use\n", irq);
2271 ret = wait_for_completion_timeout(&disable_comp, msecs_to_jiffies(200));
2272 ipu_free_irq(ipu, irq, &disable_comp);
2274 ipu_dump_registers(ipu);
2275 dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
2282 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2284 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2285 (channel == MEM_DC_SYNC)) {
2286 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2287 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2290 /* Disable IC task */
2291 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2292 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2293 _ipu_ic_disable_task(ipu, channel);
2295 /* Disable DMA channel(s) */
2296 if (idma_is_valid(in_dma)) {
2297 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2298 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2299 ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
2300 ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
2301 IPU_CHA_TRIPLE_CUR_BUF(in_dma));
2303 if (idma_is_valid(out_dma)) {
2304 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2305 ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2306 ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
2307 ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
2308 IPU_CHA_TRIPLE_CUR_BUF(out_dma));
2310 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2311 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2312 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2313 ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
2315 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2316 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2317 ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2318 if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
2319 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2320 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
2322 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2323 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2325 ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
2328 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
2330 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2332 /* Set channel buffers NOT to be ready */
2333 if (idma_is_valid(in_dma)) {
2334 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
2335 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
2336 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
2338 if (idma_is_valid(out_dma)) {
2339 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
2340 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
2342 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2343 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
2344 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
2346 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2347 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
2348 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
2351 if (channel == MEM_FG_SYNC)
2352 ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
2356 EXPORT_SYMBOL(ipu_disable_channel);
2359 * This function enables CSI.
2361 * @param ipu ipu handler
2362 * @param csi csi num 0 or 1
2364 * @return This function returns 0 on success or negative error code on
2367 int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
2370 unsigned long lock_flags;
2373 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2377 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2378 ipu->csi_use_count[csi]++;
2380 if (ipu->csi_use_count[csi] == 1) {
2381 reg = ipu_cm_read(ipu, IPU_CONF);
2383 ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
2385 ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
2387 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2390 EXPORT_SYMBOL(ipu_enable_csi);
2393 * This function disables CSI.
2395 * @param ipu ipu handler
2396 * @param csi csi num 0 or 1
2398 * @return This function returns 0 on success or negative error code on
2401 int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
2404 unsigned long lock_flags;
2407 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2411 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2412 ipu->csi_use_count[csi]--;
2414 if (ipu->csi_use_count[csi] == 0) {
2415 reg = ipu_cm_read(ipu, IPU_CONF);
2417 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
2419 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
2421 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2424 EXPORT_SYMBOL(ipu_disable_csi);
2426 static irqreturn_t ipu_irq_handler(int irq, void *desc)
2428 struct ipu_soc *ipu = desc;
2431 irqreturn_t result = IRQ_NONE;
2433 const int err_reg[] = { 5, 6, 9, 10, 0 };
2434 const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
2437 if (err_reg[i] == 0)
2439 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
2440 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
2442 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
2444 "IPU Error - IPU_INT_STAT_%d = 0x%08X\n",
2445 err_reg[i], int_stat);
2446 /* Disable interrupts so we only get error once */
2448 ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) & ~int_stat;
2449 ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
2454 if (int_reg[i] == 0)
2456 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
2457 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
2458 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
2459 while ((line = ffs(int_stat)) != 0) {
2461 int_stat &= ~(1UL << line);
2462 line += (int_reg[i] - 1) * 32;
2464 ipu->irq_list[line].handler(line,
2465 ipu->irq_list[line].
2474 * This function enables the interrupt for the specified interrupt line.
2475 * The interrupt lines are defined in \b ipu_irq_line enum.
2477 * @param ipu ipu handler
2478 * @param irq Interrupt line to enable interrupt for.
2481 void ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
2484 unsigned long lock_flags;
2486 if (!ipu->clk_enabled)
2487 clk_enable(ipu->ipu_clk);
2489 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2491 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2492 reg |= IPUIRQ_2_MASK(irq);
2493 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2495 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2496 if (!ipu->clk_enabled)
2497 clk_disable(ipu->ipu_clk);
2499 EXPORT_SYMBOL(ipu_enable_irq);
2502 * This function disables the interrupt for the specified interrupt line.
2503 * The interrupt lines are defined in \b ipu_irq_line enum.
2505 * @param ipu ipu handler
2506 * @param irq Interrupt line to disable interrupt for.
2509 void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
2512 unsigned long lock_flags;
2514 if (!ipu->clk_enabled)
2515 clk_enable(ipu->ipu_clk);
2517 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2519 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2520 reg &= ~IPUIRQ_2_MASK(irq);
2521 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2523 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2524 if (!ipu->clk_enabled)
2525 clk_disable(ipu->ipu_clk);
2527 EXPORT_SYMBOL(ipu_disable_irq);
2530 * This function clears the interrupt for the specified interrupt line.
2531 * The interrupt lines are defined in \b ipu_irq_line enum.
2533 * @param ipu ipu handler
2534 * @param irq Interrupt line to clear interrupt for.
2537 void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
2539 if (!ipu->clk_enabled)
2540 clk_enable(ipu->ipu_clk);
2542 ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
2544 if (!ipu->clk_enabled)
2545 clk_disable(ipu->ipu_clk);
2547 EXPORT_SYMBOL(ipu_clear_irq);
2550 * This function returns the current interrupt status for the specified
2551 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2553 * @param ipu ipu handler
2554 * @param irq Interrupt line to get status for.
2556 * @return Returns true if the interrupt is pending/asserted or false if
2557 * the interrupt is not pending.
2559 bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
2563 if (!ipu->clk_enabled)
2564 clk_enable(ipu->ipu_clk);
2566 reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
2568 if (!ipu->clk_enabled)
2569 clk_disable(ipu->ipu_clk);
2571 if (reg & IPUIRQ_2_MASK(irq))
2576 EXPORT_SYMBOL(ipu_get_irq_status);
2579 * This function registers an interrupt handler function for the specified
2580 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2582 * @param ipu ipu handler
2583 * @param irq Interrupt line to get status for.
2585 * @param handler Input parameter for address of the handler
2588 * @param irq_flags Flags for interrupt mode. Currently not used.
2590 * @param devname Input parameter for string name of driver
2591 * registering the handler.
2593 * @param dev_id Input parameter for pointer of data to be
2594 * passed to the handler.
2596 * @return This function returns 0 on success or negative error code on
2599 int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
2600 irqreturn_t(*handler) (int, void *),
2601 uint32_t irq_flags, const char *devname, void *dev_id)
2603 unsigned long lock_flags;
2605 BUG_ON(irq >= IPU_IRQ_COUNT);
2607 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2609 if (ipu->irq_list[irq].handler != NULL) {
2611 "handler already installed on irq %d\n", irq);
2612 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2616 ipu->irq_list[irq].handler = handler;
2617 ipu->irq_list[irq].flags = irq_flags;
2618 ipu->irq_list[irq].dev_id = dev_id;
2619 ipu->irq_list[irq].name = devname;
2621 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2623 ipu_enable_irq(ipu, irq); /* enable the interrupt */
2627 EXPORT_SYMBOL(ipu_request_irq);
2630 * This function unregisters an interrupt handler for the specified interrupt
2631 * line. The interrupt lines are defined in \b ipu_irq_line enum.
2633 * @param ipu ipu handler
2634 * @param irq Interrupt line to get status for.
2636 * @param dev_id Input parameter for pointer of data to be passed
2637 * to the handler. This must match value passed to
2638 * ipu_request_irq().
2641 void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
2643 ipu_disable_irq(ipu, irq); /* disable the interrupt */
2645 if (ipu->irq_list[irq].dev_id == dev_id)
2646 ipu->irq_list[irq].handler = NULL;
2648 EXPORT_SYMBOL(ipu_free_irq);
2650 uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
2652 uint32_t reg, dma_chan;
2654 dma_chan = channel_2_dma(channel, type);
2655 if (!idma_is_valid(dma_chan))
2658 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
2659 if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
2660 reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
2661 return (reg & tri_cur_buf_mask(dma_chan)) >>
2662 tri_cur_buf_shift(dma_chan);
2664 reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
2665 if (reg & idma_mask(dma_chan))
2671 EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
2673 uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
2676 uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
2679 case MEM_PRP_VF_MEM:
2680 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2682 case MEM_VDI_PRP_VF_MEM:
2683 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2685 case MEM_ROT_VF_MEM:
2687 (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
2689 case MEM_PRP_ENC_MEM:
2690 stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
2692 case MEM_ROT_ENC_MEM:
2694 (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
2695 TSTAT_ENC_ROT_OFFSET;
2698 stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
2700 case MEM_ROT_PP_MEM:
2702 (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
2706 stat = TASK_STAT_IDLE;
2712 int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
2715 unsigned long lock_flags;
2717 int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
2718 int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
2720 /* enable target channel */
2721 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2723 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
2724 ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
2726 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
2728 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2731 _ipu_dp_dc_disable(ipu, from_ch, true);
2733 /* disable source channel */
2734 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2736 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
2737 ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
2738 ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
2739 ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
2740 IPU_CHA_TRIPLE_CUR_BUF(from_dma));
2742 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
2744 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2746 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
2747 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
2748 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
2752 EXPORT_SYMBOL(ipu_swap_channel);
2754 uint32_t bytes_per_pixel(uint32_t fmt)
2757 case IPU_PIX_FMT_GENERIC: /*generic data */
2758 case IPU_PIX_FMT_RGB332:
2759 case IPU_PIX_FMT_YUV420P:
2760 case IPU_PIX_FMT_YVU420P:
2761 case IPU_PIX_FMT_YUV422P:
2764 case IPU_PIX_FMT_RGB565:
2765 case IPU_PIX_FMT_YUYV:
2766 case IPU_PIX_FMT_UYVY:
2769 case IPU_PIX_FMT_BGR24:
2770 case IPU_PIX_FMT_RGB24:
2773 case IPU_PIX_FMT_GENERIC_32: /*generic data */
2774 case IPU_PIX_FMT_BGR32:
2775 case IPU_PIX_FMT_BGRA32:
2776 case IPU_PIX_FMT_RGB32:
2777 case IPU_PIX_FMT_RGBA32:
2778 case IPU_PIX_FMT_ABGR32:
2787 EXPORT_SYMBOL(bytes_per_pixel);
2789 ipu_color_space_t format_to_colorspace(uint32_t fmt)
2792 case IPU_PIX_FMT_RGB666:
2793 case IPU_PIX_FMT_RGB565:
2794 case IPU_PIX_FMT_BGR24:
2795 case IPU_PIX_FMT_RGB24:
2796 case IPU_PIX_FMT_GBR24:
2797 case IPU_PIX_FMT_BGR32:
2798 case IPU_PIX_FMT_BGRA32:
2799 case IPU_PIX_FMT_RGB32:
2800 case IPU_PIX_FMT_RGBA32:
2801 case IPU_PIX_FMT_ABGR32:
2802 case IPU_PIX_FMT_LVDS666:
2803 case IPU_PIX_FMT_LVDS888:
2814 bool ipu_pixel_format_has_alpha(uint32_t fmt)
2817 case IPU_PIX_FMT_RGBA32:
2818 case IPU_PIX_FMT_BGRA32:
2819 case IPU_PIX_FMT_ABGR32:
2829 void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
2831 _ipu_dp_set_csc_coefficients(ipu, channel, param);
2833 EXPORT_SYMBOL(ipu_set_csc_coefficients);
2835 static int ipu_suspend(struct platform_device *pdev, pm_message_t state)
2837 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2838 struct ipu_soc *ipu = platform_get_drvdata(pdev);
2840 if (ipu->ipu_use_count) {
2841 /* save and disable enabled channels*/
2842 ipu->idma_enable_reg[0] = ipu_idmac_read(ipu, IDMAC_CHA_EN(0));
2843 ipu->idma_enable_reg[1] = ipu_idmac_read(ipu, IDMAC_CHA_EN(32));
2844 while ((ipu_idmac_read(ipu, IDMAC_CHA_BUSY(0))
2845 & ipu->idma_enable_reg[0])
2846 || (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(32))
2847 & ipu->idma_enable_reg[1])) {
2848 /* disable channel not busy already */
2849 uint32_t chan_should_disable, timeout = 1000, time = 0;
2851 chan_should_disable =
2852 ipu_idmac_read(ipu, IDMAC_CHA_BUSY(0))
2853 ^ ipu->idma_enable_reg[0];
2854 ipu_idmac_write(ipu, (~chan_should_disable) &
2855 ipu->idma_enable_reg[0], IDMAC_CHA_EN(0));
2856 chan_should_disable =
2857 ipu_idmac_read(ipu, IDMAC_CHA_BUSY(1))
2858 ^ ipu->idma_enable_reg[1];
2859 ipu_idmac_write(ipu, (~chan_should_disable) &
2860 ipu->idma_enable_reg[1], IDMAC_CHA_EN(32));
2863 if (time >= timeout)
2866 ipu_idmac_write(ipu, 0, IDMAC_CHA_EN(0));
2867 ipu_idmac_write(ipu, 0, IDMAC_CHA_EN(32));
2869 /* save double buffer select regs */
2870 ipu->cha_db_mode_reg[0] = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0));
2871 ipu->cha_db_mode_reg[1] = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32));
2872 ipu->cha_db_mode_reg[2] =
2873 ipu_cm_read(ipu, IPU_ALT_CHA_DB_MODE_SEL(0));
2874 ipu->cha_db_mode_reg[3] =
2875 ipu_cm_read(ipu, IPU_ALT_CHA_DB_MODE_SEL(32));
2877 /* save triple buffer select regs */
2878 ipu->cha_trb_mode_reg[0] = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0));
2879 ipu->cha_trb_mode_reg[1] = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32));
2881 /* save idamc sub addr regs */
2882 ipu->idma_sub_addr_reg[0] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_0);
2883 ipu->idma_sub_addr_reg[1] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_1);
2884 ipu->idma_sub_addr_reg[2] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_2);
2885 ipu->idma_sub_addr_reg[3] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_3);
2886 ipu->idma_sub_addr_reg[4] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_4);
2888 /* save sub-modules status and disable all */
2889 ipu->ic_conf_reg = ipu_ic_read(ipu, IC_CONF);
2890 ipu_ic_write(ipu, 0, IC_CONF);
2891 ipu->ipu_conf_reg = ipu_cm_read(ipu, IPU_CONF);
2892 ipu_cm_write(ipu, 0, IPU_CONF);
2894 /* save buf ready regs */
2895 ipu->buf_ready_reg[0] = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(0));
2896 ipu->buf_ready_reg[1] = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(32));
2897 ipu->buf_ready_reg[2] = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(0));
2898 ipu->buf_ready_reg[3] = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(32));
2899 ipu->buf_ready_reg[4] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF0_RDY(0));
2900 ipu->buf_ready_reg[5] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF0_RDY(32));
2901 ipu->buf_ready_reg[6] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF1_RDY(0));
2902 ipu->buf_ready_reg[7] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF1_RDY(32));
2903 ipu->buf_ready_reg[8] = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(0));
2904 ipu->buf_ready_reg[9] = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(32));
2913 static int ipu_resume(struct platform_device *pdev)
2915 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2916 struct ipu_soc *ipu = platform_get_drvdata(pdev);
2921 if (ipu->ipu_use_count) {
2923 /* restore buf ready regs */
2924 ipu_cm_write(ipu, ipu->buf_ready_reg[0], IPU_CHA_BUF0_RDY(0));
2925 ipu_cm_write(ipu, ipu->buf_ready_reg[1], IPU_CHA_BUF0_RDY(32));
2926 ipu_cm_write(ipu, ipu->buf_ready_reg[2], IPU_CHA_BUF1_RDY(0));
2927 ipu_cm_write(ipu, ipu->buf_ready_reg[3], IPU_CHA_BUF1_RDY(32));
2928 ipu_cm_write(ipu, ipu->buf_ready_reg[4], IPU_ALT_CHA_BUF0_RDY(0));
2929 ipu_cm_write(ipu, ipu->buf_ready_reg[5], IPU_ALT_CHA_BUF0_RDY(32));
2930 ipu_cm_write(ipu, ipu->buf_ready_reg[6], IPU_ALT_CHA_BUF1_RDY(0));
2931 ipu_cm_write(ipu, ipu->buf_ready_reg[7], IPU_ALT_CHA_BUF1_RDY(32));
2932 ipu_cm_write(ipu, ipu->buf_ready_reg[8], IPU_CHA_BUF2_RDY(0));
2933 ipu_cm_write(ipu, ipu->buf_ready_reg[9], IPU_CHA_BUF2_RDY(32));
2935 /* re-enable sub-modules*/
2936 ipu_cm_write(ipu, ipu->ipu_conf_reg, IPU_CONF);
2937 ipu_ic_write(ipu, ipu->ic_conf_reg, IC_CONF);
2939 /* restore double buffer select regs */
2940 ipu_cm_write(ipu, ipu->cha_db_mode_reg[0], IPU_CHA_DB_MODE_SEL(0));
2941 ipu_cm_write(ipu, ipu->cha_db_mode_reg[1], IPU_CHA_DB_MODE_SEL(32));
2942 ipu_cm_write(ipu, ipu->cha_db_mode_reg[2],
2943 IPU_ALT_CHA_DB_MODE_SEL(0));
2944 ipu_cm_write(ipu, ipu->cha_db_mode_reg[3],
2945 IPU_ALT_CHA_DB_MODE_SEL(32));
2947 /* restore triple buffer select regs */
2948 ipu_cm_write(ipu, ipu->cha_trb_mode_reg[0], IPU_CHA_TRB_MODE_SEL(0));
2949 ipu_cm_write(ipu, ipu->cha_trb_mode_reg[1], IPU_CHA_TRB_MODE_SEL(32));
2951 /* restore idamc sub addr regs */
2952 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[0], IDMAC_SUB_ADDR_0);
2953 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[1], IDMAC_SUB_ADDR_1);
2954 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[2], IDMAC_SUB_ADDR_2);
2955 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[3], IDMAC_SUB_ADDR_3);
2956 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[4], IDMAC_SUB_ADDR_4);
2958 /* restart idma channel*/
2959 ipu_idmac_write(ipu, ipu->idma_enable_reg[0], IDMAC_CHA_EN(0));
2960 ipu_idmac_write(ipu, ipu->idma_enable_reg[1], IDMAC_CHA_EN(32));
2963 _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
2964 _ipu_init_dc_mappings(ipu);
2965 /* Set sync refresh channels as high priority */
2966 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
2974 * This structure contains pointers to the power management callback functions.
2976 static struct platform_driver mxcipu_driver = {
2978 .name = "imx-ipuv3",
2981 .remove = ipu_remove,
2982 .suspend = ipu_suspend,
2983 .resume = ipu_resume,
2986 int32_t __init ipu_gen_init(void)
2990 ret = platform_driver_register(&mxcipu_driver);
2994 subsys_initcall(ipu_gen_init);
2996 static void __exit ipu_gen_uninit(void)
2998 platform_driver_unregister(&mxcipu_driver);
3001 module_exit(ipu_gen_uninit);