2 * Copyright 2005-2011 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief This file contains the IPU driver common API functions.
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/platform_device.h>
24 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/irqdesc.h>
31 #include <linux/ipu.h>
32 #include <linux/clk.h>
33 #include <linux/clkdev.h>
34 #include <mach/clock.h>
35 #include <mach/hardware.h>
36 #include <mach/ipu-v3.h>
37 #include <mach/devices-common.h>
41 #include "ipu_param_mem.h"
43 static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
46 /* for power gating */
47 static uint32_t ipu_conf_reg;
48 static uint32_t ic_conf_reg;
49 static uint32_t ipu_cha_db_mode_reg[4];
50 static uint32_t ipu_cha_trb_mode_reg[2];
51 static uint32_t ipu_cha_cur_buf_reg[4];
52 static uint32_t ipu_cha_triple_cur_buf_reg[4];
53 static uint32_t idma_sub_addr_reg[5];
54 static uint32_t idma_enable_reg[2];
55 static uint32_t buf_ready_reg[10];
57 /* Static functions */
58 static irqreturn_t ipu_irq_handler(int irq, void *desc);
60 static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
62 return ((uint32_t) ch >> (6 * type)) & 0x3F;
65 static inline int _ipu_is_ic_chan(uint32_t dma_chan)
67 return ((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) && (dma_chan != 18));
70 static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
72 return (dma_chan == 14 || dma_chan == 15);
75 /* Either DP BG or DP FG can be graphic window */
76 static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
78 return (dma_chan == 23 || dma_chan == 27);
81 static inline int _ipu_is_irt_chan(uint32_t dma_chan)
83 return ((dma_chan >= 45) && (dma_chan <= 50));
86 static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
88 return ((dma_chan >= 23) && (dma_chan <= 29));
91 static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
93 return ((dma_chan >= 0) && (dma_chan <= 3));
96 static inline int _ipu_is_trb_chan(uint32_t dma_chan)
98 return (((dma_chan == 8) || (dma_chan == 9) ||
99 (dma_chan == 10) || (dma_chan == 13) ||
100 (dma_chan == 21) || (dma_chan == 23) ||
101 (dma_chan == 27) || (dma_chan == 28)) &&
102 (g_ipu_hw_rev >= 2));
105 #define idma_is_valid(ch) (ch != NO_DMA)
106 #define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
107 #define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
108 #define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
109 #define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
111 static int ipu_reset(struct ipu_soc *ipu)
115 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
117 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
126 static inline struct ipu_soc *pixelclk2ipu(struct clk *clk)
129 struct clk *base = clk - clk->id;
131 ipu = container_of(base, struct ipu_soc, pixel_clk[0]);
136 static unsigned long _ipu_pixel_clk_get_rate(struct clk *clk)
138 struct ipu_soc *ipu = pixelclk2ipu(clk);
139 u32 div = ipu_di_read(ipu, clk->id, DI_BS_CLKGEN0);
142 return (clk_get_rate(clk->parent) * 16) / div;
145 static unsigned long _ipu_pixel_clk_round_rate(struct clk *clk, unsigned long rate)
148 u32 parent_rate = clk_get_rate(clk->parent) * 16;
151 * Fractional part is 4 bits,
152 * so simply multiply by 2^4 to get fractional part.
154 div = parent_rate / rate;
156 if (div < 0x10) /* Min DI disp clock divider is 1 */
161 /* Round up divider if it gets us closer to desired pix clk */
162 if ((div & 0xC) == 0xC) {
167 return parent_rate / div;
170 static int _ipu_pixel_clk_set_rate(struct clk *clk, unsigned long rate)
172 struct ipu_soc *ipu = pixelclk2ipu(clk);
173 u32 div = (clk_get_rate(clk->parent) * 16) / rate;
174 unsigned long lock_flags;
176 /* Round up divider if it gets us closer to desired pix clk */
177 if ((div & 0xC) == 0xC) {
182 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
183 ipu_di_write(ipu, clk->id, div, DI_BS_CLKGEN0);
185 /* Setup pixel clock timing */
186 /* FIXME: needs to be more flexible */
187 /* Down time is half of period */
188 ipu_di_write(ipu, clk->id, (div / 16) << 16, DI_BS_CLKGEN1);
189 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
194 static int _ipu_pixel_clk_enable(struct clk *clk)
196 struct ipu_soc *ipu = pixelclk2ipu(clk);
197 u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
198 disp_gen |= clk->id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
199 ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
204 static void _ipu_pixel_clk_disable(struct clk *clk)
206 struct ipu_soc *ipu = pixelclk2ipu(clk);
208 u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
209 disp_gen &= clk->id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
210 ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
213 static int _ipu_pixel_clk_set_parent(struct clk *clk, struct clk *parent)
215 struct ipu_soc *ipu = pixelclk2ipu(clk);
216 unsigned long lock_flags;
219 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
220 di_gen = ipu_di_read(ipu, clk->id, DI_GENERAL);
221 if (parent == ipu->ipu_clk)
222 di_gen &= ~DI_GEN_DI_CLK_EXT;
223 else if (!IS_ERR(ipu->di_clk[clk->id]) && parent == ipu->di_clk[clk->id])
224 di_gen |= DI_GEN_DI_CLK_EXT;
226 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
230 ipu_di_write(ipu, clk->id, di_gen, DI_GENERAL);
231 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
235 #ifdef CONFIG_CLK_DEBUG
236 #define __INIT_CLK_DEBUG(n) .name = #n,
238 #define __INIT_CLK_DEBUG(n)
240 static int __devinit ipu_clk_setup_enable(struct ipu_soc *ipu,
241 struct platform_device *pdev)
243 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
244 static struct clk ipu_pixel_clk[] = {
246 __INIT_CLK_DEBUG(pixel_clk_0)
248 .get_rate = _ipu_pixel_clk_get_rate,
249 .set_rate = _ipu_pixel_clk_set_rate,
250 .round_rate = _ipu_pixel_clk_round_rate,
251 .set_parent = _ipu_pixel_clk_set_parent,
252 .enable = _ipu_pixel_clk_enable,
253 .disable = _ipu_pixel_clk_disable,
256 __INIT_CLK_DEBUG(pixel_clk_1)
258 .get_rate = _ipu_pixel_clk_get_rate,
259 .set_rate = _ipu_pixel_clk_set_rate,
260 .round_rate = _ipu_pixel_clk_round_rate,
261 .set_parent = _ipu_pixel_clk_set_parent,
262 .enable = _ipu_pixel_clk_enable,
263 .disable = _ipu_pixel_clk_disable,
266 static struct clk_lookup ipu_lookups[] = {
269 .con_id = "pixel_clk_0",
273 .con_id = "pixel_clk_1",
276 char ipu_clk[] = "ipu1_clk";
277 char di0_clk[] = "ipu1_di0_clk";
278 char di1_clk[] = "ipu1_di1_clk";
280 ipu_clk[3] += pdev->id;
281 di0_clk[3] += pdev->id;
282 di1_clk[3] += pdev->id;
284 ipu->ipu_clk = clk_get(ipu->dev, ipu_clk);
285 if (IS_ERR(ipu->ipu_clk)) {
286 dev_err(ipu->dev, "clk_get failed");
287 return PTR_ERR(ipu->ipu_clk);
289 dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
291 ipu->pixel_clk[0] = ipu_pixel_clk[0];
292 ipu->pixel_clk[1] = ipu_pixel_clk[1];
294 ipu_lookups[0].clk = &ipu->pixel_clk[0];
295 ipu_lookups[1].clk = &ipu->pixel_clk[1];
296 clkdev_add(&ipu_lookups[0]);
297 clkdev_add(&ipu_lookups[1]);
299 clk_debug_register(&ipu->pixel_clk[0]);
300 clk_debug_register(&ipu->pixel_clk[1]);
302 clk_enable(ipu->ipu_clk);
304 clk_set_parent(&ipu->pixel_clk[0], ipu->ipu_clk);
305 clk_set_parent(&ipu->pixel_clk[1], ipu->ipu_clk);
307 ipu->di_clk[0] = clk_get(ipu->dev, di0_clk);
308 ipu->di_clk[1] = clk_get(ipu->dev, di1_clk);
310 ipu->csi_clk[0] = clk_get(ipu->dev, plat_data->csi_clk[0]);
311 ipu->csi_clk[1] = clk_get(ipu->dev, plat_data->csi_clk[1]);
317 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
319 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
320 const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
328 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
329 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
331 while ((line = ffs(status))) {
333 status &= ~(1UL << line);
334 line += ipu->irq_start + (int_reg[i] - 1) * 32;
335 generic_handle_irq(line);
341 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
343 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
344 const int int_reg[] = { 5, 6, 9, 10, 0 };
352 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
353 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
355 while ((line = ffs(status))) {
357 status &= ~(1UL << line);
358 line += ipu->irq_start + (int_reg[i] - 1) * 32;
359 generic_handle_irq(line);
365 static void ipu_ack_irq(struct irq_data *d)
367 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
368 unsigned int irq = d->irq - ipu->irq_start;
371 spin_lock_irqsave(&ipu->ipu_lock, flags);
372 ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32 + 1));
373 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
376 static void ipu_unmask_irq(struct irq_data *d)
378 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
379 unsigned int irq = d->irq - ipu->irq_start;
383 spin_lock_irqsave(&ipu->ipu_lock, flags);
384 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
385 reg |= 1 << (irq % 32);
386 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
387 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
390 static void ipu_mask_irq(struct irq_data *d)
392 struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
393 unsigned int irq = d->irq - ipu->irq_start;
397 spin_lock_irqsave(&ipu->ipu_lock, flags);
398 reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
399 reg &= ~(1 << (irq % 32));
400 ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
401 spin_unlock_irqrestore(&ipu->ipu_lock, flags);
404 static struct irq_chip ipu_irq_chip = {
406 .irq_ack = ipu_ack_irq,
407 .irq_mask = ipu_mask_irq,
408 .irq_unmask = ipu_unmask_irq,
411 static void __devinit ipu_irq_setup(struct ipu_soc *ipu)
415 for (i = ipu->irq_start; i < ipu->irq_start + MX5_IPU_IRQS; i++) {
416 irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
417 set_irq_flags(i, IRQF_VALID);
418 irq_set_chip_data(i, ipu);
421 irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
422 irq_set_handler_data(ipu->irq_sync, ipu);
423 irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
424 irq_set_handler_data(ipu->irq_err, ipu);
427 int ipu_request_irq(struct ipu_soc *ipu, unsigned int irq,
428 irq_handler_t handler, unsigned long flags,
429 const char *name, void *dev)
431 return request_irq(ipu->irq_start + irq, handler, flags, name, dev);
433 EXPORT_SYMBOL_GPL(ipu_request_irq);
435 void ipu_enable_irq(struct ipu_soc *ipu, unsigned int irq)
437 return enable_irq(ipu->irq_start + irq);
439 EXPORT_SYMBOL_GPL(ipu_disable_irq);
441 void ipu_disable_irq(struct ipu_soc *ipu, unsigned int irq)
443 return disable_irq(ipu->irq_start + irq);
445 EXPORT_SYMBOL_GPL(ipu_disable_irq);
447 void ipu_free_irq(struct ipu_soc *ipu, unsigned int irq, void *dev_id)
449 free_irq(ipu->irq_start + irq, dev_id);
451 EXPORT_SYMBOL_GPL(ipu_free_irq);
453 static irqreturn_t ipu_completion_handler(int irq, void *dev)
455 struct completion *completion = dev;
457 complete(completion);
461 int ipu_wait_for_interrupt(struct ipu_soc *ipu, int interrupt, int timeout_ms)
463 DECLARE_COMPLETION_ONSTACK(completion);
466 ret = ipu_request_irq(ipu, interrupt, ipu_completion_handler,
467 0, NULL, &completion);
470 "ipu request irq %d fail\n", interrupt);
474 ret = wait_for_completion_timeout(&completion,
475 msecs_to_jiffies(timeout_ms));
477 ipu_free_irq(ipu, interrupt, &completion);
479 return ret > 0 ? 0 : -ETIMEDOUT;
481 EXPORT_SYMBOL_GPL(ipu_wait_for_interrupt);
484 struct ipu_soc *ipu_get_soc(int id)
486 if (id >= MXC_IPU_MAX_NUM)
487 return ERR_PTR(-ENODEV);
489 return &(ipu_array[id]);
493 * This function is called by the driver framework to initialize the IPU
496 * @param dev The device structure for the IPU passed in by the
499 * @return Returns 0 on success or negative error code on error
501 static int __devinit ipu_probe(struct platform_device *pdev)
503 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
505 struct resource *res;
506 unsigned long ipu_base;
509 if (pdev->id >= MXC_IPU_MAX_NUM)
512 ipu = &ipu_array[pdev->id];
513 memset(ipu, 0, sizeof(struct ipu_soc));
515 spin_lock_init(&ipu->ipu_lock);
517 g_ipu_hw_rev = plat_data->rev;
519 ipu->dev = &pdev->dev;
522 plat_data->init(pdev->id);
524 ipu->irq_sync = platform_get_irq(pdev, 0);
525 ipu->irq_err = platform_get_irq(pdev, 1);
526 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
528 if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
533 if (request_irq(ipu->irq_sync, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
534 dev_err(ipu->dev, "request SYNC interrupt failed\n");
536 goto failed_req_irq_sync;
538 /* Some platforms have 2 IPU interrupts */
539 if (ipu->irq_err >= 0) {
541 (ipu->irq_err, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
542 dev_err(ipu->dev, "request ERR interrupt failed\n");
544 goto failed_req_irq_err;
548 ipu_base = res->start;
550 if (g_ipu_hw_rev == 4) /* IPUv3H */
551 ipu_base += IPUV3H_REG_BASE;
552 else if (g_ipu_hw_rev == 3) /* IPUv3M */
553 ipu_base += IPUV3M_REG_BASE;
554 else /* IPUv3D, v3E, v3EX */
555 ipu_base += IPUV3DEX_REG_BASE;
557 ipu->cm_reg = ioremap(ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
558 ipu->ic_reg = ioremap(ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
559 ipu->idmac_reg = ioremap(ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
560 /* DP Registers are accessed thru the SRM */
561 ipu->dp_reg = ioremap(ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
562 ipu->dc_reg = ioremap(ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
563 ipu->dmfc_reg = ioremap(ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
564 ipu->di_reg[0] = ioremap(ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
565 ipu->di_reg[1] = ioremap(ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
566 ipu->smfc_reg = ioremap(ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
567 ipu->csi_reg[0] = ioremap(ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
568 ipu->csi_reg[1] = ioremap(ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
569 ipu->cpmem_base = ioremap(ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
570 ipu->tpmem_base = ioremap(ipu_base + IPU_TPM_REG_BASE, SZ_64K);
571 ipu->dc_tmpl_reg = ioremap(ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
572 ipu->vdi_reg = ioremap(ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
573 ipu->disp_base[1] = ioremap(ipu_base + IPU_DISP1_BASE, SZ_4K);
575 if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
576 !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
577 !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
578 !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
579 !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
585 dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
586 dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
587 dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
588 dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
589 dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
590 dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
591 dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
592 dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
593 dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
594 dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
595 dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
596 dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
597 dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
598 dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
599 dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
600 dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
602 ret = ipu_clk_setup_enable(ipu, pdev);
604 dev_err(ipu->dev, "ipu clk setup failed\n");
605 goto failed_clk_setup;
608 platform_set_drvdata(pdev, ipu);
614 /* Set sync refresh channels and CSI->mem channel as high priority */
615 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
617 /* Set MCU_T to divide MCU access window into 2 */
618 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), IPU_DISP_GEN);
620 clk_disable(ipu->ipu_clk);
622 register_ipu_device(ipu, pdev->id);
627 iounmap(ipu->cm_reg);
628 iounmap(ipu->ic_reg);
629 iounmap(ipu->idmac_reg);
630 iounmap(ipu->dc_reg);
631 iounmap(ipu->dp_reg);
632 iounmap(ipu->dmfc_reg);
633 iounmap(ipu->di_reg[0]);
634 iounmap(ipu->di_reg[1]);
635 iounmap(ipu->smfc_reg);
636 iounmap(ipu->csi_reg[0]);
637 iounmap(ipu->csi_reg[1]);
638 iounmap(ipu->cpmem_base);
639 iounmap(ipu->tpmem_base);
640 iounmap(ipu->dc_tmpl_reg);
641 iounmap(ipu->disp_base[1]);
642 iounmap(ipu->vdi_reg);
645 free_irq(ipu->irq_err, ipu);
647 free_irq(ipu->irq_sync, ipu);
653 int __devexit ipu_remove(struct platform_device *pdev)
655 struct ipu_soc *ipu = platform_get_drvdata(pdev);
657 unregister_ipu_device(ipu, pdev->id);
660 free_irq(ipu->irq_sync, ipu);
662 free_irq(ipu->irq_err, ipu);
664 clk_put(ipu->ipu_clk);
666 iounmap(ipu->cm_reg);
667 iounmap(ipu->ic_reg);
668 iounmap(ipu->idmac_reg);
669 iounmap(ipu->dc_reg);
670 iounmap(ipu->dp_reg);
671 iounmap(ipu->dmfc_reg);
672 iounmap(ipu->di_reg[0]);
673 iounmap(ipu->di_reg[1]);
674 iounmap(ipu->smfc_reg);
675 iounmap(ipu->csi_reg[0]);
676 iounmap(ipu->csi_reg[1]);
677 iounmap(ipu->cpmem_base);
678 iounmap(ipu->tpmem_base);
679 iounmap(ipu->dc_tmpl_reg);
680 iounmap(ipu->disp_base[1]);
681 iounmap(ipu->vdi_reg);
686 void ipu_dump_registers(struct ipu_soc *ipu)
688 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
689 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
690 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
691 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
692 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
693 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
694 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
695 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
696 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
697 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
698 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
699 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
700 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
701 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
702 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
703 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
704 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
705 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
706 if (g_ipu_hw_rev >= 2) {
707 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
708 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
709 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
710 ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
712 dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
713 ipu_dmfc_read(ipu, DMFC_WR_CHAN));
714 dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
715 ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
716 dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
717 ipu_dmfc_read(ipu, DMFC_DP_CHAN));
718 dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
719 ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
720 dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
721 ipu_dmfc_read(ipu, DMFC_IC_CTRL));
722 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
723 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
724 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
725 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
726 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
727 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
728 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
729 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
733 * This function is called to initialize a logical IPU channel.
735 * @param ipu ipu handler
736 * @param channel Input parameter for the logical channel ID to init.
738 * @param params Input parameter containing union of channel
739 * initialization parameters.
741 * @return Returns 0 on success or negative error code on fail
743 int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
748 unsigned long lock_flags;
750 dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
752 if (ipu->clk_enabled == false) {
753 ipu->clk_enabled = true;
754 clk_enable(ipu->ipu_clk);
757 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
759 if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
760 dev_err(ipu->dev, "Warning: channel already initialized %d\n",
761 IPU_CHAN_ID(channel));
764 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
771 if (params->csi_mem.csi > 1) {
776 if (params->csi_mem.interlaced)
777 ipu->chan_is_interlaced[channel_2_dma(channel,
778 IPU_OUTPUT_BUFFER)] = true;
780 ipu->chan_is_interlaced[channel_2_dma(channel,
781 IPU_OUTPUT_BUFFER)] = false;
783 ipu->smfc_use_count++;
784 ipu->csi_channel[params->csi_mem.csi] = channel;
787 if (params->csi_mem.mipi_en) {
788 ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
789 params->csi_mem.csi));
790 _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_id,
791 params->csi_mem.csi);
793 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
794 params->csi_mem.csi));
795 _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
798 /*CSI data (include compander) dest*/
799 _ipu_csi_init(ipu, channel, params->csi_mem.csi);
801 case CSI_PRP_ENC_MEM:
802 if (params->csi_prp_enc_mem.csi > 1) {
806 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
810 ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
813 ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
815 /*Without SMFC, CSI only support parallel data source*/
816 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
817 params->csi_prp_enc_mem.csi));
819 /*CSI0/1 feed into IC*/
820 ipu_conf &= ~IPU_CONF_IC_INPUT;
821 if (params->csi_prp_enc_mem.csi)
822 ipu_conf |= IPU_CONF_CSI_SEL;
824 ipu_conf &= ~IPU_CONF_CSI_SEL;
826 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
827 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
828 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
830 /*CSI data (include compander) dest*/
831 _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
832 _ipu_ic_init_prpenc(ipu, params, true);
835 if (params->csi_prp_vf_mem.csi > 1) {
839 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
843 ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
846 ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
848 /*Without SMFC, CSI only support parallel data source*/
849 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
850 params->csi_prp_vf_mem.csi));
852 /*CSI0/1 feed into IC*/
853 ipu_conf &= ~IPU_CONF_IC_INPUT;
854 if (params->csi_prp_vf_mem.csi)
855 ipu_conf |= IPU_CONF_CSI_SEL;
857 ipu_conf &= ~IPU_CONF_CSI_SEL;
859 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
860 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
861 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
863 /*CSI data (include compander) dest*/
864 _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
865 _ipu_ic_init_prpvf(ipu, params, true);
869 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
870 ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
872 if (params->mem_prp_vf_mem.graphics_combine_en)
873 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
874 if (params->mem_prp_vf_mem.alpha_chan_en)
875 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
877 _ipu_ic_init_prpvf(ipu, params, false);
879 case MEM_VDI_PRP_VF_MEM:
880 if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
881 (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
885 ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
887 ipu->vdi_use_count++;
888 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
889 reg &= ~FS_VDI_SRC_SEL_MASK;
890 ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
892 if (params->mem_prp_vf_mem.graphics_combine_en)
893 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
894 _ipu_ic_init_prpvf(ipu, params, false);
895 _ipu_vdi_init(ipu, channel, params);
897 case MEM_VDI_PRP_VF_MEM_P:
898 _ipu_vdi_init(ipu, channel, params);
900 case MEM_VDI_PRP_VF_MEM_N:
901 _ipu_vdi_init(ipu, channel, params);
905 ipu->rot_use_count++;
906 _ipu_ic_init_rotate_vf(ipu, params);
908 case MEM_PRP_ENC_MEM:
910 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
911 ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
912 _ipu_ic_init_prpenc(ipu, params, false);
914 case MEM_ROT_ENC_MEM:
916 ipu->rot_use_count++;
917 _ipu_ic_init_rotate_enc(ipu, params);
920 if (params->mem_pp_mem.graphics_combine_en)
921 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
922 if (params->mem_pp_mem.alpha_chan_en)
923 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
924 _ipu_ic_init_pp(ipu, params);
928 _ipu_ic_init_rotate_pp(ipu, params);
930 ipu->rot_use_count++;
933 if (params->mem_dc_sync.di > 1) {
938 ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
939 _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
940 params->mem_dc_sync.interlaced,
941 params->mem_dc_sync.out_pixel_fmt);
942 ipu->di_use_count[params->mem_dc_sync.di]++;
944 ipu->dmfc_use_count++;
947 if (params->mem_dp_bg_sync.di > 1) {
952 if (params->mem_dp_bg_sync.alpha_chan_en)
953 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
955 ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
956 _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
957 params->mem_dp_bg_sync.out_pixel_fmt);
958 _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
959 params->mem_dp_bg_sync.interlaced,
960 params->mem_dp_bg_sync.out_pixel_fmt);
961 ipu->di_use_count[params->mem_dp_bg_sync.di]++;
964 ipu->dmfc_use_count++;
967 _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
968 params->mem_dp_fg_sync.out_pixel_fmt);
970 if (params->mem_dp_fg_sync.alpha_chan_en)
971 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
975 ipu->dmfc_use_count++;
978 if (params->direct_async.di > 1) {
983 ipu->dc_di_assignment[8] = params->direct_async.di;
984 _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
985 ipu->di_use_count[params->direct_async.di]++;
989 if (params->direct_async.di > 1) {
994 ipu->dc_di_assignment[9] = params->direct_async.di;
995 _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
996 ipu->di_use_count[params->direct_async.di]++;
1000 dev_err(ipu->dev, "Missing channel initialization\n");
1004 /* Enable IPU sub module */
1005 ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
1007 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
1010 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1013 EXPORT_SYMBOL(ipu_init_channel);
1016 * This function is called to uninitialize a logical IPU channel.
1018 * @param ipu ipu handler
1019 * @param channel Input parameter for the logical channel ID to uninit.
1021 void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1023 unsigned long lock_flags;
1025 uint32_t in_dma, out_dma = 0;
1028 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1030 if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
1031 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1032 IPU_CHAN_ID(channel));
1033 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1037 /* Make sure channel is disabled */
1038 /* Get input and output dma channels */
1039 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1040 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1042 if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
1043 idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
1045 "Channel %d is not disabled, disable first\n",
1046 IPU_CHAN_ID(channel));
1047 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1051 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
1053 /* Reset the double buffer */
1054 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
1055 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
1056 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
1057 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
1059 /* Reset the triple buffer */
1060 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
1061 ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
1062 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
1063 ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
1065 if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
1066 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
1067 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
1075 ipu->smfc_use_count--;
1076 if (ipu->csi_channel[0] == channel) {
1077 ipu->csi_channel[0] = CHAN_NONE;
1078 } else if (ipu->csi_channel[1] == channel) {
1079 ipu->csi_channel[1] = CHAN_NONE;
1082 case CSI_PRP_ENC_MEM:
1083 ipu->ic_use_count--;
1084 if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
1085 ipu->using_ic_dirct_ch = 0;
1086 _ipu_ic_uninit_prpenc(ipu);
1087 if (ipu->csi_channel[0] == channel) {
1088 ipu->csi_channel[0] = CHAN_NONE;
1089 } else if (ipu->csi_channel[1] == channel) {
1090 ipu->csi_channel[1] = CHAN_NONE;
1093 case CSI_PRP_VF_MEM:
1094 ipu->ic_use_count--;
1095 if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
1096 ipu->using_ic_dirct_ch = 0;
1097 _ipu_ic_uninit_prpvf(ipu);
1098 if (ipu->csi_channel[0] == channel) {
1099 ipu->csi_channel[0] = CHAN_NONE;
1100 } else if (ipu->csi_channel[1] == channel) {
1101 ipu->csi_channel[1] = CHAN_NONE;
1104 case MEM_PRP_VF_MEM:
1105 ipu->ic_use_count--;
1106 _ipu_ic_uninit_prpvf(ipu);
1107 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1108 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1110 case MEM_VDI_PRP_VF_MEM:
1111 ipu->ic_use_count--;
1112 ipu->vdi_use_count--;
1113 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
1114 ipu->using_ic_dirct_ch = 0;
1115 _ipu_ic_uninit_prpvf(ipu);
1116 _ipu_vdi_uninit(ipu);
1117 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1118 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1120 case MEM_VDI_PRP_VF_MEM_P:
1121 case MEM_VDI_PRP_VF_MEM_N:
1123 case MEM_ROT_VF_MEM:
1124 ipu->rot_use_count--;
1125 ipu->ic_use_count--;
1126 _ipu_ic_uninit_rotate_vf(ipu);
1128 case MEM_PRP_ENC_MEM:
1129 ipu->ic_use_count--;
1130 _ipu_ic_uninit_prpenc(ipu);
1131 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1132 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
1134 case MEM_ROT_ENC_MEM:
1135 ipu->rot_use_count--;
1136 ipu->ic_use_count--;
1137 _ipu_ic_uninit_rotate_enc(ipu);
1140 ipu->ic_use_count--;
1141 _ipu_ic_uninit_pp(ipu);
1143 case MEM_ROT_PP_MEM:
1144 ipu->rot_use_count--;
1145 ipu->ic_use_count--;
1146 _ipu_ic_uninit_rotate_pp(ipu);
1149 _ipu_dc_uninit(ipu, 1);
1150 ipu->di_use_count[ipu->dc_di_assignment[1]]--;
1151 ipu->dc_use_count--;
1152 ipu->dmfc_use_count--;
1155 _ipu_dp_uninit(ipu, channel);
1156 _ipu_dc_uninit(ipu, 5);
1157 ipu->di_use_count[ipu->dc_di_assignment[5]]--;
1158 ipu->dc_use_count--;
1159 ipu->dp_use_count--;
1160 ipu->dmfc_use_count--;
1163 _ipu_dp_uninit(ipu, channel);
1164 ipu->dc_use_count--;
1165 ipu->dp_use_count--;
1166 ipu->dmfc_use_count--;
1169 _ipu_dc_uninit(ipu, 8);
1170 ipu->di_use_count[ipu->dc_di_assignment[8]]--;
1171 ipu->dc_use_count--;
1174 _ipu_dc_uninit(ipu, 9);
1175 ipu->di_use_count[ipu->dc_di_assignment[9]]--;
1176 ipu->dc_use_count--;
1182 ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
1184 if (ipu->ic_use_count == 0)
1185 ipu_conf &= ~IPU_CONF_IC_EN;
1186 if (ipu->vdi_use_count == 0) {
1187 ipu_conf &= ~IPU_CONF_ISP_EN;
1188 ipu_conf &= ~IPU_CONF_VDI_EN;
1189 ipu_conf &= ~IPU_CONF_IC_INPUT;
1191 if (ipu->rot_use_count == 0)
1192 ipu_conf &= ~IPU_CONF_ROT_EN;
1193 if (ipu->dc_use_count == 0)
1194 ipu_conf &= ~IPU_CONF_DC_EN;
1195 if (ipu->dp_use_count == 0)
1196 ipu_conf &= ~IPU_CONF_DP_EN;
1197 if (ipu->dmfc_use_count == 0)
1198 ipu_conf &= ~IPU_CONF_DMFC_EN;
1199 if (ipu->di_use_count[0] == 0) {
1200 ipu_conf &= ~IPU_CONF_DI0_EN;
1202 if (ipu->di_use_count[1] == 0) {
1203 ipu_conf &= ~IPU_CONF_DI1_EN;
1205 if (ipu->smfc_use_count == 0)
1206 ipu_conf &= ~IPU_CONF_SMFC_EN;
1208 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
1210 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1212 if (ipu_conf == 0) {
1213 clk_disable(ipu->ipu_clk);
1214 ipu->clk_enabled = false;
1217 WARN_ON(ipu->ic_use_count < 0);
1218 WARN_ON(ipu->vdi_use_count < 0);
1219 WARN_ON(ipu->rot_use_count < 0);
1220 WARN_ON(ipu->dc_use_count < 0);
1221 WARN_ON(ipu->dp_use_count < 0);
1222 WARN_ON(ipu->dmfc_use_count < 0);
1223 WARN_ON(ipu->smfc_use_count < 0);
1225 EXPORT_SYMBOL(ipu_uninit_channel);
1228 * This function is called to initialize buffer(s) for logical IPU channel.
1230 * @param ipu ipu handler
1232 * @param channel Input parameter for the logical channel ID.
1234 * @param type Input parameter which buffer to initialize.
1236 * @param pixel_fmt Input parameter for pixel format of buffer.
1237 * Pixel format is a FOURCC ASCII code.
1239 * @param width Input parameter for width of buffer in pixels.
1241 * @param height Input parameter for height of buffer in pixels.
1243 * @param stride Input parameter for stride length of buffer
1246 * @param rot_mode Input parameter for rotation setting of buffer.
1247 * A rotation setting other than
1248 * IPU_ROTATE_VERT_FLIP
1249 * should only be used for input buffers of
1250 * rotation channels.
1252 * @param phyaddr_0 Input parameter buffer 0 physical address.
1254 * @param phyaddr_1 Input parameter buffer 1 physical address.
1255 * Setting this to a value other than NULL enables
1256 * double buffering mode.
1258 * @param phyaddr_2 Input parameter buffer 2 physical address.
1259 * Setting this to a value other than NULL enables
1260 * triple buffering mode, phyaddr_1 should not be
1263 * @param u private u offset for additional cropping,
1266 * @param v private v offset for additional cropping,
1269 * @return Returns 0 on success or negative error code on fail
1271 int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1274 uint16_t width, uint16_t height,
1276 ipu_rotate_mode_t rot_mode,
1277 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
1278 dma_addr_t phyaddr_2,
1279 uint32_t u, uint32_t v)
1281 unsigned long lock_flags;
1284 uint32_t burst_size;
1286 dma_chan = channel_2_dma(channel, type);
1287 if (!idma_is_valid(dma_chan))
1290 if (stride < width * bytes_per_pixel(pixel_fmt))
1291 stride = width * bytes_per_pixel(pixel_fmt);
1295 "Stride not 32-bit aligned, stride = %d\n", stride);
1298 /* IC & IRT channels' width must be multiple of 8 pixels */
1299 if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
1301 dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
1305 /* IPUv3EX and IPUv3M support triple buffer */
1306 if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
1307 dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
1308 "mode\n", dma_chan);
1311 if (!phyaddr_1 && phyaddr_2) {
1312 dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
1313 "triple buffer mode\n", dma_chan);
1317 /* Build parameter memory data for DMA channel */
1318 _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
1319 phyaddr_0, phyaddr_1, phyaddr_2);
1321 /* Set correlative channel parameter of local alpha channel */
1322 if ((_ipu_is_ic_graphic_chan(dma_chan) ||
1323 _ipu_is_dp_graphic_chan(dma_chan)) &&
1324 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
1325 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
1326 _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
1327 _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
1328 /* fix alpha width as 8 and burst size as 16*/
1329 _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
1330 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1331 } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
1332 ipu_pixel_format_has_alpha(pixel_fmt))
1333 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
1336 _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
1338 /* IC and ROT channels have restriction of 8 or 16 pix burst length */
1339 if (_ipu_is_ic_chan(dma_chan)) {
1340 if ((width % 16) == 0)
1341 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1343 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1344 } else if (_ipu_is_irt_chan(dma_chan)) {
1345 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1346 _ipu_ch_param_set_block_mode(ipu, dma_chan);
1347 } else if (_ipu_is_dmfc_chan(dma_chan)) {
1348 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1349 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1350 _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
1351 _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
1352 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1355 if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
1356 ipu->chan_is_interlaced[dma_chan])
1357 _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
1359 if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan)) {
1360 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1361 _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
1363 } else if (_ipu_is_smfc_chan(dma_chan)) {
1364 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1365 if ((pixel_fmt == IPU_PIX_FMT_GENERIC) &&
1366 ((_ipu_ch_param_get_bpp(ipu, dma_chan) == 5) ||
1367 (_ipu_ch_param_get_bpp(ipu, dma_chan) == 3)))
1368 burst_size = burst_size >> 4;
1370 burst_size = burst_size >> 2;
1371 _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
1374 if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan) && !cpu_is_mx53()
1376 _ipu_ch_param_set_high_priority(ipu, dma_chan);
1378 _ipu_ch_param_dump(ipu, dma_chan);
1380 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1381 if (phyaddr_2 && g_ipu_hw_rev >= 2) {
1382 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1383 reg &= ~idma_mask(dma_chan);
1384 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1386 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1387 reg |= idma_mask(dma_chan);
1388 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1390 /* Set IDMAC third buffer's cpmem number */
1391 /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
1392 ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
1393 ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
1394 ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
1396 /* Reset to buffer 0 */
1397 ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
1398 IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
1400 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1401 reg &= ~idma_mask(dma_chan);
1402 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1404 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1406 reg |= idma_mask(dma_chan);
1408 reg &= ~idma_mask(dma_chan);
1409 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1411 /* Reset to buffer 0 */
1412 ipu_cm_write(ipu, idma_mask(dma_chan),
1413 IPU_CHA_CUR_BUF(dma_chan));
1416 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1420 EXPORT_SYMBOL(ipu_init_channel_buffer);
1423 * This function is called to update the physical address of a buffer for
1424 * a logical IPU channel.
1426 * @param ipu ipu handler
1427 * @param channel Input parameter for the logical channel ID.
1429 * @param type Input parameter which buffer to initialize.
1431 * @param bufNum Input parameter for buffer number to update.
1432 * 0 or 1 are the only valid values.
1434 * @param phyaddr Input parameter buffer physical address.
1436 * @return This function returns 0 on success or negative error code on
1437 * fail. This function will fail if the buffer is set to ready.
1439 int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1440 ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
1444 unsigned long lock_flags;
1445 uint32_t dma_chan = channel_2_dma(channel, type);
1446 if (dma_chan == IDMA_CHAN_INVALID)
1449 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1452 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
1453 else if (bufNum == 1)
1454 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
1456 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
1458 if ((reg & idma_mask(dma_chan)) == 0)
1459 _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
1463 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1466 EXPORT_SYMBOL(ipu_update_channel_buffer);
1470 * This function is called to initialize a buffer for logical IPU channel.
1472 * @param ipu ipu handler
1473 * @param channel Input parameter for the logical channel ID.
1475 * @param type Input parameter which buffer to initialize.
1477 * @param pixel_fmt Input parameter for pixel format of buffer.
1478 * Pixel format is a FOURCC ASCII code.
1480 * @param width Input parameter for width of buffer in pixels.
1482 * @param height Input parameter for height of buffer in pixels.
1484 * @param stride Input parameter for stride length of buffer
1487 * @param u predefined private u offset for additional cropping,
1490 * @param v predefined private v offset for additional cropping,
1493 * @param vertical_offset vertical offset for Y coordinate
1494 * in the existed frame
1497 * @param horizontal_offset horizontal offset for X coordinate
1498 * in the existed frame
1501 * @return Returns 0 on success or negative error code on fail
1502 * This function will fail if any buffer is set to ready.
1505 int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
1506 ipu_channel_t channel, ipu_buffer_t type,
1508 uint16_t width, uint16_t height,
1510 uint32_t u, uint32_t v,
1511 uint32_t vertical_offset, uint32_t horizontal_offset)
1514 unsigned long lock_flags;
1515 uint32_t dma_chan = channel_2_dma(channel, type);
1517 if (dma_chan == IDMA_CHAN_INVALID)
1520 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1522 if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1523 (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1524 ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
1525 (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
1526 _ipu_is_trb_chan(dma_chan)))
1529 _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
1530 u, v, 0, vertical_offset, horizontal_offset);
1532 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1535 EXPORT_SYMBOL(ipu_update_channel_offset);
1539 * This function is called to set a channel's buffer as ready.
1541 * @param ipu ipu handler
1542 * @param channel Input parameter for the logical channel ID.
1544 * @param type Input parameter which buffer to initialize.
1546 * @param bufNum Input parameter for which buffer number set to
1549 * @return Returns 0 on success or negative error code on fail
1551 int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1552 ipu_buffer_t type, uint32_t bufNum)
1554 uint32_t dma_chan = channel_2_dma(channel, type);
1555 unsigned long lock_flags;
1557 if (dma_chan == IDMA_CHAN_INVALID)
1560 /* Mark buffer to be ready. */
1561 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1563 ipu_cm_write(ipu, idma_mask(dma_chan),
1564 IPU_CHA_BUF0_RDY(dma_chan));
1565 else if (bufNum == 1)
1566 ipu_cm_write(ipu, idma_mask(dma_chan),
1567 IPU_CHA_BUF1_RDY(dma_chan));
1569 ipu_cm_write(ipu, idma_mask(dma_chan),
1570 IPU_CHA_BUF2_RDY(dma_chan));
1571 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1574 EXPORT_SYMBOL(ipu_select_buffer);
1577 * This function is called to set a channel's buffer as ready.
1579 * @param ipu ipu handler
1580 * @param bufNum Input parameter for which buffer number set to
1583 * @return Returns 0 on success or negative error code on fail
1585 int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
1588 uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
1590 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
1591 idma_mask(dma_chan)|
1592 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
1593 unsigned long lock_flags;
1595 /* Mark buffers to be ready. */
1596 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1598 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
1600 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
1601 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1604 EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
1607 static int proc_dest_sel[] = {
1608 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
1609 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
1610 static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
1611 NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
1612 static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
1613 NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
1617 * This function links 2 channels together for automatic frame
1618 * synchronization. The output of the source channel is linked to the input of
1619 * the destination channel.
1621 * @param ipu ipu handler
1622 * @param src_ch Input parameter for the logical channel ID of
1623 * the source channel.
1625 * @param dest_ch Input parameter for the logical channel ID of
1626 * the destination channel.
1628 * @return This function returns 0 on success or negative error code on
1631 int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1634 unsigned long lock_flags;
1635 uint32_t fs_proc_flow1;
1636 uint32_t fs_proc_flow2;
1637 uint32_t fs_proc_flow3;
1638 uint32_t fs_disp_flow1;
1640 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1642 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1643 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1644 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1645 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1649 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1651 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1652 FS_SMFC0_DEST_SEL_OFFSET;
1655 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1657 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1658 FS_SMFC1_DEST_SEL_OFFSET;
1661 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1663 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1664 FS_SMFC2_DEST_SEL_OFFSET;
1667 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1669 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1670 FS_SMFC3_DEST_SEL_OFFSET;
1672 case CSI_PRP_ENC_MEM:
1673 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1675 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1676 FS_PRPENC_DEST_SEL_OFFSET;
1678 case CSI_PRP_VF_MEM:
1679 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1681 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1682 FS_PRPVF_DEST_SEL_OFFSET;
1685 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1687 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1688 FS_PP_DEST_SEL_OFFSET;
1690 case MEM_ROT_PP_MEM:
1691 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1693 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1694 FS_PP_ROT_DEST_SEL_OFFSET;
1696 case MEM_PRP_ENC_MEM:
1697 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1699 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1700 FS_PRPENC_DEST_SEL_OFFSET;
1702 case MEM_ROT_ENC_MEM:
1703 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1705 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1706 FS_PRPENC_ROT_DEST_SEL_OFFSET;
1708 case MEM_PRP_VF_MEM:
1709 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1711 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1712 FS_PRPVF_DEST_SEL_OFFSET;
1714 case MEM_VDI_PRP_VF_MEM:
1715 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1717 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1718 FS_PRPVF_DEST_SEL_OFFSET;
1720 case MEM_ROT_VF_MEM:
1721 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1723 proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1724 FS_PRPVF_ROT_DEST_SEL_OFFSET;
1733 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1735 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PP_SRC_SEL_OFFSET;
1737 case MEM_ROT_PP_MEM:
1738 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1740 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1741 FS_PP_ROT_SRC_SEL_OFFSET;
1743 case MEM_PRP_ENC_MEM:
1744 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1746 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1748 case MEM_ROT_ENC_MEM:
1749 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1751 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1752 FS_PRPENC_ROT_SRC_SEL_OFFSET;
1754 case MEM_PRP_VF_MEM:
1755 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1757 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1759 case MEM_VDI_PRP_VF_MEM:
1760 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1762 proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1764 case MEM_ROT_VF_MEM:
1765 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1767 proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1768 FS_PRPVF_ROT_SRC_SEL_OFFSET;
1771 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1773 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
1776 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1778 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1779 FS_DP_SYNC0_SRC_SEL_OFFSET;
1782 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1784 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1785 FS_DP_SYNC1_SRC_SEL_OFFSET;
1788 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1790 disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
1793 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1795 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1796 FS_DP_ASYNC0_SRC_SEL_OFFSET;
1799 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1801 disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1802 FS_DP_ASYNC1_SRC_SEL_OFFSET;
1809 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1810 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1811 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1812 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1815 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1818 EXPORT_SYMBOL(ipu_link_channels);
1821 * This function unlinks 2 channels and disables automatic frame
1824 * @param ipu ipu handler
1825 * @param src_ch Input parameter for the logical channel ID of
1826 * the source channel.
1828 * @param dest_ch Input parameter for the logical channel ID of
1829 * the destination channel.
1831 * @return This function returns 0 on success or negative error code on
1834 int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1837 unsigned long lock_flags;
1838 uint32_t fs_proc_flow1;
1839 uint32_t fs_proc_flow2;
1840 uint32_t fs_proc_flow3;
1841 uint32_t fs_disp_flow1;
1843 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1845 fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1846 fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1847 fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1848 fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1852 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1855 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1858 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1861 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1863 case CSI_PRP_ENC_MEM:
1864 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1866 case CSI_PRP_VF_MEM:
1867 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1870 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1872 case MEM_ROT_PP_MEM:
1873 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1875 case MEM_PRP_ENC_MEM:
1876 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1878 case MEM_ROT_ENC_MEM:
1879 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1881 case MEM_PRP_VF_MEM:
1882 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1884 case MEM_VDI_PRP_VF_MEM:
1885 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1887 case MEM_ROT_VF_MEM:
1888 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1897 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1899 case MEM_ROT_PP_MEM:
1900 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1902 case MEM_PRP_ENC_MEM:
1903 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1905 case MEM_ROT_ENC_MEM:
1906 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1908 case MEM_PRP_VF_MEM:
1909 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1911 case MEM_VDI_PRP_VF_MEM:
1912 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1914 case MEM_ROT_VF_MEM:
1915 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1918 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1921 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1924 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1927 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1930 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1933 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1940 ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1941 ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1942 ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1943 ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1946 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1949 EXPORT_SYMBOL(ipu_unlink_channels);
1952 * This function check whether a logical channel was enabled.
1954 * @param ipu ipu handler
1955 * @param channel Input parameter for the logical channel ID.
1957 * @return This function returns 1 while request channel is enabled or
1958 * 0 for not enabled.
1960 int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
1966 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1967 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1969 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
1970 if (reg & idma_mask(in_dma))
1972 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
1973 if (reg & idma_mask(out_dma))
1977 EXPORT_SYMBOL(ipu_is_channel_busy);
1980 * This function enables a logical channel.
1982 * @param ipu ipu handler
1983 * @param channel Input parameter for the logical channel ID.
1985 * @return This function returns 0 on success or negative error code on
1988 int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1991 unsigned long lock_flags;
1998 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2000 if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
2001 dev_err(ipu->dev, "Warning: channel already enabled %d\n",
2002 IPU_CHAN_ID(channel));
2003 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2007 /* Get input and output dma channels */
2008 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2009 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2011 ipu_conf = ipu_cm_read(ipu, IPU_CONF);
2012 if (ipu->di_use_count[0] > 0) {
2013 ipu_conf |= IPU_CONF_DI0_EN;
2015 if (ipu->di_use_count[1] > 0) {
2016 ipu_conf |= IPU_CONF_DI1_EN;
2018 if (ipu->dp_use_count > 0)
2019 ipu_conf |= IPU_CONF_DP_EN;
2020 if (ipu->dc_use_count > 0)
2021 ipu_conf |= IPU_CONF_DC_EN;
2022 if (ipu->dmfc_use_count > 0)
2023 ipu_conf |= IPU_CONF_DMFC_EN;
2024 if (ipu->ic_use_count > 0)
2025 ipu_conf |= IPU_CONF_IC_EN;
2026 if (ipu->vdi_use_count > 0) {
2027 ipu_conf |= IPU_CONF_ISP_EN;
2028 ipu_conf |= IPU_CONF_VDI_EN;
2029 ipu_conf |= IPU_CONF_IC_INPUT;
2031 if (ipu->rot_use_count > 0)
2032 ipu_conf |= IPU_CONF_ROT_EN;
2033 if (ipu->smfc_use_count > 0)
2034 ipu_conf |= IPU_CONF_SMFC_EN;
2035 ipu_cm_write(ipu, ipu_conf, IPU_CONF);
2037 if (idma_is_valid(in_dma)) {
2038 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2039 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2041 if (idma_is_valid(out_dma)) {
2042 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2043 ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2046 if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
2047 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
2048 (channel == MEM_VDI_PRP_VF_MEM))) {
2049 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2050 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2051 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2053 if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2054 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
2055 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2056 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2057 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2059 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2060 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2061 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2062 } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2063 ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
2064 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2065 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2066 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2067 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2068 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
2071 if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
2072 (channel == MEM_FG_SYNC)) {
2073 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2074 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2076 _ipu_dp_dc_enable(ipu, channel);
2079 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2080 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2081 _ipu_ic_enable_task(ipu, channel);
2083 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
2085 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2089 EXPORT_SYMBOL(ipu_enable_channel);
2092 * This function check buffer ready for a logical channel.
2094 * @param ipu ipu handler
2095 * @param channel Input parameter for the logical channel ID.
2097 * @param type Input parameter which buffer to clear.
2099 * @param bufNum Input parameter for which buffer number clear
2103 int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2106 uint32_t dma_chan = channel_2_dma(channel, type);
2109 if (dma_chan == IDMA_CHAN_INVALID)
2113 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
2114 else if (bufNum == 1)
2115 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
2117 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
2119 if (reg & idma_mask(dma_chan))
2124 EXPORT_SYMBOL(ipu_check_buffer_ready);
2127 * This function clear buffer ready for a logical channel.
2129 * @param ipu ipu handler
2130 * @param channel Input parameter for the logical channel ID.
2132 * @param type Input parameter which buffer to clear.
2134 * @param bufNum Input parameter for which buffer number clear
2138 void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2141 unsigned long lock_flags;
2142 uint32_t dma_ch = channel_2_dma(channel, type);
2144 if (!idma_is_valid(dma_ch))
2147 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2148 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
2150 ipu_cm_write(ipu, idma_mask(dma_ch),
2151 IPU_CHA_BUF0_RDY(dma_ch));
2152 else if (bufNum == 1)
2153 ipu_cm_write(ipu, idma_mask(dma_ch),
2154 IPU_CHA_BUF1_RDY(dma_ch));
2156 ipu_cm_write(ipu, idma_mask(dma_ch),
2157 IPU_CHA_BUF2_RDY(dma_ch));
2158 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
2159 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2161 EXPORT_SYMBOL(ipu_clear_buffer_ready);
2163 static irqreturn_t disable_chan_irq_handler(int irq, void *dev_id)
2165 struct completion *comp = dev_id;
2172 * This function disables a logical channel.
2174 * @param ipu ipu handler
2175 * @param channel Input parameter for the logical channel ID.
2177 * @param wait_for_stop Flag to set whether to wait for channel end
2178 * of frame or return immediately.
2180 * @return This function returns 0 on success or negative error code on
2183 int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
2186 unsigned long lock_flags;
2189 uint32_t sec_dma = NO_DMA;
2190 uint32_t thrd_dma = NO_DMA;
2191 uint16_t fg_pos_x, fg_pos_y;
2193 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2195 if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
2196 dev_err(ipu->dev, "Channel already disabled %d\n",
2197 IPU_CHAN_ID(channel));
2198 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2202 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2204 /* Get input and output dma channels */
2205 out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2206 in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2208 if ((idma_is_valid(in_dma) &&
2209 !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
2210 && (idma_is_valid(out_dma) &&
2211 !idma_is_set(ipu, IDMAC_CHA_EN, out_dma)))
2214 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
2215 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2216 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
2217 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2218 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2221 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2222 (channel == MEM_DC_SYNC)) {
2223 if (channel == MEM_FG_SYNC) {
2224 ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
2225 ipu_disp_set_window_pos(ipu, channel, 0, 0);
2228 _ipu_dp_dc_disable(ipu, channel, false);
2231 * wait for BG channel EOF then disable FG-IDMAC,
2232 * it avoid FG NFB4EOF error.
2234 if (channel == MEM_FG_SYNC) {
2237 ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
2238 IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
2239 while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
2240 IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
2244 dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
2249 } else if (wait_for_stop) {
2250 while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
2251 idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
2252 (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2253 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
2254 (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2255 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
2256 uint32_t ret, irq = 0xffffffff;
2257 DECLARE_COMPLETION_ONSTACK(disable_comp);
2259 if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
2261 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2262 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
2264 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2265 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
2267 if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
2270 if (irq == 0xffffffff) {
2271 dev_err(ipu->dev, "warning: no channel busy, break\n");
2275 dev_err(ipu->dev, "warning: channel %d busy, need wait\n", irq);
2277 ret = ipu_request_irq(ipu, irq, disable_chan_irq_handler, 0, NULL, &disable_comp);
2279 dev_err(ipu->dev, "irq %d in use\n", irq);
2282 ret = wait_for_completion_timeout(&disable_comp, msecs_to_jiffies(200));
2283 ipu_free_irq(ipu, irq, &disable_comp);
2285 ipu_dump_registers(ipu);
2286 dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
2293 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2295 if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2296 (channel == MEM_DC_SYNC)) {
2297 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2298 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2301 /* Disable IC task */
2302 if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2303 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2304 _ipu_ic_disable_task(ipu, channel);
2306 /* Disable DMA channel(s) */
2307 if (idma_is_valid(in_dma)) {
2308 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2309 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2310 ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
2311 ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
2312 IPU_CHA_TRIPLE_CUR_BUF(in_dma));
2314 if (idma_is_valid(out_dma)) {
2315 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2316 ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2317 ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
2318 ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
2319 IPU_CHA_TRIPLE_CUR_BUF(out_dma));
2321 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2322 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2323 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2324 ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
2326 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2327 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2328 ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2329 if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
2330 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2331 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
2333 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2334 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2336 ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
2339 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
2341 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2343 /* Set channel buffers NOT to be ready */
2344 if (idma_is_valid(in_dma)) {
2345 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
2346 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
2347 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
2349 if (idma_is_valid(out_dma)) {
2350 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
2351 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
2353 if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2354 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
2355 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
2357 if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2358 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
2359 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
2362 if (channel == MEM_FG_SYNC)
2363 ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
2367 EXPORT_SYMBOL(ipu_disable_channel);
2370 * This function enables CSI.
2372 * @param ipu ipu handler
2373 * @param csi csi num 0 or 1
2375 * @return This function returns 0 on success or negative error code on
2378 int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
2381 unsigned long lock_flags;
2384 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2388 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2389 ipu->csi_use_count[csi]++;
2391 if (ipu->csi_use_count[csi] == 1) {
2392 reg = ipu_cm_read(ipu, IPU_CONF);
2394 ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
2396 ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
2398 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2401 EXPORT_SYMBOL(ipu_enable_csi);
2404 * This function disables CSI.
2406 * @param ipu ipu handler
2407 * @param csi csi num 0 or 1
2409 * @return This function returns 0 on success or negative error code on
2412 int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
2415 unsigned long lock_flags;
2418 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2422 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2423 ipu->csi_use_count[csi]--;
2425 if (ipu->csi_use_count[csi] == 0) {
2426 reg = ipu_cm_read(ipu, IPU_CONF);
2428 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
2430 ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
2432 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2435 EXPORT_SYMBOL(ipu_disable_csi);
2437 static irqreturn_t ipu_irq_handler(int irq, void *desc)
2439 struct ipu_soc *ipu = desc;
2442 irqreturn_t result = IRQ_NONE;
2444 const int err_reg[] = { 5, 6, 9, 10, 0 };
2445 const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
2448 if (err_reg[i] == 0)
2450 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
2451 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
2453 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
2455 "IPU Error - IPU_INT_STAT_%d = 0x%08X\n",
2456 err_reg[i], int_stat);
2457 /* Disable interrupts so we only get error once */
2459 ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) & ~int_stat;
2460 ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
2465 if (int_reg[i] == 0)
2467 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
2468 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
2469 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
2470 while ((line = ffs(int_stat)) != 0) {
2472 int_stat &= ~(1UL << line);
2473 line += (int_reg[i] - 1) * 32;
2475 ipu->irq_list[line].handler(line,
2476 ipu->irq_list[line].
2485 * This function enables the interrupt for the specified interrupt line.
2486 * The interrupt lines are defined in \b ipu_irq_line enum.
2488 * @param ipu ipu handler
2489 * @param irq Interrupt line to enable interrupt for.
2492 void ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
2495 unsigned long lock_flags;
2497 if (!ipu->clk_enabled)
2498 clk_enable(ipu->ipu_clk);
2500 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2502 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2503 reg |= IPUIRQ_2_MASK(irq);
2504 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2506 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2507 if (!ipu->clk_enabled)
2508 clk_disable(ipu->ipu_clk);
2510 EXPORT_SYMBOL(ipu_enable_irq);
2513 * This function disables the interrupt for the specified interrupt line.
2514 * The interrupt lines are defined in \b ipu_irq_line enum.
2516 * @param ipu ipu handler
2517 * @param irq Interrupt line to disable interrupt for.
2520 void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
2523 unsigned long lock_flags;
2525 if (!ipu->clk_enabled)
2526 clk_enable(ipu->ipu_clk);
2528 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2530 reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2531 reg &= ~IPUIRQ_2_MASK(irq);
2532 ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2534 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2535 if (!ipu->clk_enabled)
2536 clk_disable(ipu->ipu_clk);
2538 EXPORT_SYMBOL(ipu_disable_irq);
2541 * This function clears the interrupt for the specified interrupt line.
2542 * The interrupt lines are defined in \b ipu_irq_line enum.
2544 * @param ipu ipu handler
2545 * @param irq Interrupt line to clear interrupt for.
2548 void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
2550 if (!ipu->clk_enabled)
2551 clk_enable(ipu->ipu_clk);
2553 ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
2555 if (!ipu->clk_enabled)
2556 clk_disable(ipu->ipu_clk);
2558 EXPORT_SYMBOL(ipu_clear_irq);
2561 * This function returns the current interrupt status for the specified
2562 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2564 * @param ipu ipu handler
2565 * @param irq Interrupt line to get status for.
2567 * @return Returns true if the interrupt is pending/asserted or false if
2568 * the interrupt is not pending.
2570 bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
2574 if (!ipu->clk_enabled)
2575 clk_enable(ipu->ipu_clk);
2577 reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
2579 if (!ipu->clk_enabled)
2580 clk_disable(ipu->ipu_clk);
2582 if (reg & IPUIRQ_2_MASK(irq))
2587 EXPORT_SYMBOL(ipu_get_irq_status);
2590 * This function registers an interrupt handler function for the specified
2591 * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2593 * @param ipu ipu handler
2594 * @param irq Interrupt line to get status for.
2596 * @param handler Input parameter for address of the handler
2599 * @param irq_flags Flags for interrupt mode. Currently not used.
2601 * @param devname Input parameter for string name of driver
2602 * registering the handler.
2604 * @param dev_id Input parameter for pointer of data to be
2605 * passed to the handler.
2607 * @return This function returns 0 on success or negative error code on
2610 int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
2611 irqreturn_t(*handler) (int, void *),
2612 uint32_t irq_flags, const char *devname, void *dev_id)
2614 unsigned long lock_flags;
2616 BUG_ON(irq >= IPU_IRQ_COUNT);
2618 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2620 if (ipu->irq_list[irq].handler != NULL) {
2622 "handler already installed on irq %d\n", irq);
2623 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2627 ipu->irq_list[irq].handler = handler;
2628 ipu->irq_list[irq].flags = irq_flags;
2629 ipu->irq_list[irq].dev_id = dev_id;
2630 ipu->irq_list[irq].name = devname;
2632 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2634 ipu_enable_irq(ipu, irq); /* enable the interrupt */
2638 EXPORT_SYMBOL(ipu_request_irq);
2641 * This function unregisters an interrupt handler for the specified interrupt
2642 * line. The interrupt lines are defined in \b ipu_irq_line enum.
2644 * @param ipu ipu handler
2645 * @param irq Interrupt line to get status for.
2647 * @param dev_id Input parameter for pointer of data to be passed
2648 * to the handler. This must match value passed to
2649 * ipu_request_irq().
2652 void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
2654 ipu_disable_irq(ipu, irq); /* disable the interrupt */
2656 if (ipu->irq_list[irq].dev_id == dev_id)
2657 ipu->irq_list[irq].handler = NULL;
2659 EXPORT_SYMBOL(ipu_free_irq);
2661 uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
2663 uint32_t reg, dma_chan;
2665 dma_chan = channel_2_dma(channel, type);
2666 if (!idma_is_valid(dma_chan))
2669 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
2670 if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
2671 reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
2672 return (reg & tri_cur_buf_mask(dma_chan)) >>
2673 tri_cur_buf_shift(dma_chan);
2675 reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
2676 if (reg & idma_mask(dma_chan))
2682 EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
2684 uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
2687 uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
2690 case MEM_PRP_VF_MEM:
2691 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2693 case MEM_VDI_PRP_VF_MEM:
2694 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2696 case MEM_ROT_VF_MEM:
2698 (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
2700 case MEM_PRP_ENC_MEM:
2701 stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
2703 case MEM_ROT_ENC_MEM:
2705 (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
2706 TSTAT_ENC_ROT_OFFSET;
2709 stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
2711 case MEM_ROT_PP_MEM:
2713 (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
2717 stat = TASK_STAT_IDLE;
2723 int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
2726 unsigned long lock_flags;
2728 int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
2729 int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
2731 /* enable target channel */
2732 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2734 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
2735 ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
2737 ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
2739 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2742 _ipu_dp_dc_disable(ipu, from_ch, true);
2744 /* disable source channel */
2745 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2747 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
2748 ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
2749 ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
2750 ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
2751 IPU_CHA_TRIPLE_CUR_BUF(from_dma));
2753 ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
2755 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2757 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
2758 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
2759 ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
2763 EXPORT_SYMBOL(ipu_swap_channel);
2765 uint32_t bytes_per_pixel(uint32_t fmt)
2768 case IPU_PIX_FMT_GENERIC: /*generic data */
2769 case IPU_PIX_FMT_RGB332:
2770 case IPU_PIX_FMT_YUV420P:
2771 case IPU_PIX_FMT_YVU420P:
2772 case IPU_PIX_FMT_YUV422P:
2775 case IPU_PIX_FMT_RGB565:
2776 case IPU_PIX_FMT_YUYV:
2777 case IPU_PIX_FMT_UYVY:
2780 case IPU_PIX_FMT_BGR24:
2781 case IPU_PIX_FMT_RGB24:
2784 case IPU_PIX_FMT_GENERIC_32: /*generic data */
2785 case IPU_PIX_FMT_BGR32:
2786 case IPU_PIX_FMT_BGRA32:
2787 case IPU_PIX_FMT_RGB32:
2788 case IPU_PIX_FMT_RGBA32:
2789 case IPU_PIX_FMT_ABGR32:
2798 EXPORT_SYMBOL(bytes_per_pixel);
2800 ipu_color_space_t format_to_colorspace(uint32_t fmt)
2803 case IPU_PIX_FMT_RGB666:
2804 case IPU_PIX_FMT_RGB565:
2805 case IPU_PIX_FMT_BGR24:
2806 case IPU_PIX_FMT_RGB24:
2807 case IPU_PIX_FMT_GBR24:
2808 case IPU_PIX_FMT_BGR32:
2809 case IPU_PIX_FMT_BGRA32:
2810 case IPU_PIX_FMT_RGB32:
2811 case IPU_PIX_FMT_RGBA32:
2812 case IPU_PIX_FMT_ABGR32:
2813 case IPU_PIX_FMT_LVDS666:
2814 case IPU_PIX_FMT_LVDS888:
2825 bool ipu_pixel_format_has_alpha(uint32_t fmt)
2828 case IPU_PIX_FMT_RGBA32:
2829 case IPU_PIX_FMT_BGRA32:
2830 case IPU_PIX_FMT_ABGR32:
2840 void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
2842 _ipu_dp_set_csc_coefficients(ipu, channel, param);
2844 EXPORT_SYMBOL(ipu_set_csc_coefficients);
2846 static int ipu_suspend(struct platform_device *pdev, pm_message_t state)
2848 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2851 if (g_ipu_clk_enabled) {
2852 /* save and disable enabled channels*/
2853 idma_enable_reg[0] = __raw_readl(IDMAC_CHA_EN(0));
2854 idma_enable_reg[1] = __raw_readl(IDMAC_CHA_EN(32));
2855 while ((__raw_readl(IDMAC_CHA_BUSY(0)) & idma_enable_reg[0])
2856 || (__raw_readl(IDMAC_CHA_BUSY(32)) &
2857 idma_enable_reg[1])) {
2858 /* disable channel not busy already */
2859 uint32_t chan_should_disable, timeout = 1000, time = 0;
2861 chan_should_disable =
2862 __raw_readl(IDMAC_CHA_BUSY(0))
2863 ^ idma_enable_reg[0];
2864 __raw_writel((~chan_should_disable) &
2865 idma_enable_reg[0], IDMAC_CHA_EN(0));
2866 chan_should_disable =
2867 __raw_readl(IDMAC_CHA_BUSY(1))
2868 ^ idma_enable_reg[1];
2869 __raw_writel((~chan_should_disable) &
2870 idma_enable_reg[1], IDMAC_CHA_EN(32));
2873 if (time >= timeout)
2876 __raw_writel(0, IDMAC_CHA_EN(0));
2877 __raw_writel(0, IDMAC_CHA_EN(32));
2879 /* save double buffer select regs */
2880 ipu_cha_db_mode_reg[0] = __raw_readl(IPU_CHA_DB_MODE_SEL(0));
2881 ipu_cha_db_mode_reg[1] = __raw_readl(IPU_CHA_DB_MODE_SEL(32));
2882 ipu_cha_db_mode_reg[2] =
2883 __raw_readl(IPU_ALT_CHA_DB_MODE_SEL(0));
2884 ipu_cha_db_mode_reg[3] =
2885 __raw_readl(IPU_ALT_CHA_DB_MODE_SEL(32));
2887 /* save triple buffer select regs */
2888 ipu_cha_trb_mode_reg[0] = __raw_readl(IPU_CHA_TRB_MODE_SEL(0));
2889 ipu_cha_trb_mode_reg[1] = __raw_readl(IPU_CHA_TRB_MODE_SEL(32));
2891 /* save current buffer regs */
2892 ipu_cha_cur_buf_reg[0] = __raw_readl(IPU_CHA_CUR_BUF(0));
2893 ipu_cha_cur_buf_reg[1] = __raw_readl(IPU_CHA_CUR_BUF(32));
2894 ipu_cha_cur_buf_reg[2] = __raw_readl(IPU_ALT_CUR_BUF0);
2895 ipu_cha_cur_buf_reg[3] = __raw_readl(IPU_ALT_CUR_BUF1);
2897 /* save current triple buffer regs */
2898 ipu_cha_triple_cur_buf_reg[0] =
2899 __raw_readl(IPU_CHA_TRIPLE_CUR_BUF(0));
2900 ipu_cha_triple_cur_buf_reg[1] =
2901 __raw_readl(IPU_CHA_TRIPLE_CUR_BUF(32));
2902 ipu_cha_triple_cur_buf_reg[2] =
2903 __raw_readl(IPU_CHA_TRIPLE_CUR_BUF(64));
2904 ipu_cha_triple_cur_buf_reg[3] =
2905 __raw_readl(IPU_CHA_TRIPLE_CUR_BUF(96));
2907 /* save idamc sub addr regs */
2908 idma_sub_addr_reg[0] = __raw_readl(IDMAC_SUB_ADDR_0);
2909 idma_sub_addr_reg[1] = __raw_readl(IDMAC_SUB_ADDR_1);
2910 idma_sub_addr_reg[2] = __raw_readl(IDMAC_SUB_ADDR_2);
2911 idma_sub_addr_reg[3] = __raw_readl(IDMAC_SUB_ADDR_3);
2912 idma_sub_addr_reg[4] = __raw_readl(IDMAC_SUB_ADDR_4);
2914 /* save sub-modules status and disable all */
2915 ic_conf_reg = __raw_readl(IC_CONF);
2916 __raw_writel(0, IC_CONF);
2917 ipu_conf_reg = __raw_readl(IPU_CONF);
2918 __raw_writel(0, IPU_CONF);
2920 /* save buf ready regs */
2921 buf_ready_reg[0] = __raw_readl(IPU_CHA_BUF0_RDY(0));
2922 buf_ready_reg[1] = __raw_readl(IPU_CHA_BUF0_RDY(32));
2923 buf_ready_reg[2] = __raw_readl(IPU_CHA_BUF1_RDY(0));
2924 buf_ready_reg[3] = __raw_readl(IPU_CHA_BUF1_RDY(32));
2925 buf_ready_reg[4] = __raw_readl(IPU_ALT_CHA_BUF0_RDY(0));
2926 buf_ready_reg[5] = __raw_readl(IPU_ALT_CHA_BUF0_RDY(32));
2927 buf_ready_reg[6] = __raw_readl(IPU_ALT_CHA_BUF1_RDY(0));
2928 buf_ready_reg[7] = __raw_readl(IPU_ALT_CHA_BUF1_RDY(32));
2929 buf_ready_reg[8] = __raw_readl(IPU_CHA_BUF2_RDY(0));
2930 buf_ready_reg[9] = __raw_readl(IPU_CHA_BUF2_RDY(32));
2940 static int ipu_resume(struct platform_device *pdev)
2942 struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2948 if (g_ipu_clk_enabled) {
2950 /* restore buf ready regs */
2951 __raw_writel(buf_ready_reg[0], IPU_CHA_BUF0_RDY(0));
2952 __raw_writel(buf_ready_reg[1], IPU_CHA_BUF0_RDY(32));
2953 __raw_writel(buf_ready_reg[2], IPU_CHA_BUF1_RDY(0));
2954 __raw_writel(buf_ready_reg[3], IPU_CHA_BUF1_RDY(32));
2955 __raw_writel(buf_ready_reg[4], IPU_ALT_CHA_BUF0_RDY(0));
2956 __raw_writel(buf_ready_reg[5], IPU_ALT_CHA_BUF0_RDY(32));
2957 __raw_writel(buf_ready_reg[6], IPU_ALT_CHA_BUF1_RDY(0));
2958 __raw_writel(buf_ready_reg[7], IPU_ALT_CHA_BUF1_RDY(32));
2959 __raw_writel(buf_ready_reg[8], IPU_CHA_BUF2_RDY(0));
2960 __raw_writel(buf_ready_reg[9], IPU_CHA_BUF2_RDY(32));
2962 /* re-enable sub-modules*/
2963 __raw_writel(ipu_conf_reg, IPU_CONF);
2964 __raw_writel(ic_conf_reg, IC_CONF);
2966 /* restore double buffer select regs */
2967 __raw_writel(ipu_cha_db_mode_reg[0], IPU_CHA_DB_MODE_SEL(0));
2968 __raw_writel(ipu_cha_db_mode_reg[1], IPU_CHA_DB_MODE_SEL(32));
2969 __raw_writel(ipu_cha_db_mode_reg[2],
2970 IPU_ALT_CHA_DB_MODE_SEL(0));
2971 __raw_writel(ipu_cha_db_mode_reg[3],
2972 IPU_ALT_CHA_DB_MODE_SEL(32));
2974 /* restore triple buffer select regs */
2975 __raw_writel(ipu_cha_trb_mode_reg[0], IPU_CHA_TRB_MODE_SEL(0));
2976 __raw_writel(ipu_cha_trb_mode_reg[1], IPU_CHA_TRB_MODE_SEL(32));
2978 /* restore current buffer select regs */
2979 __raw_writel(~(ipu_cha_cur_buf_reg[0]), IPU_CHA_CUR_BUF(0));
2980 __raw_writel(~(ipu_cha_cur_buf_reg[1]), IPU_CHA_CUR_BUF(32));
2981 __raw_writel(~(ipu_cha_cur_buf_reg[2]), IPU_ALT_CUR_BUF0);
2982 __raw_writel(~(ipu_cha_cur_buf_reg[3]), IPU_ALT_CUR_BUF1);
2984 /* restore triple current buffer select regs */
2985 __raw_writel(~(ipu_cha_triple_cur_buf_reg[0]),
2986 IPU_CHA_TRIPLE_CUR_BUF(0));
2987 __raw_writel(~(ipu_cha_triple_cur_buf_reg[1]),
2988 IPU_CHA_TRIPLE_CUR_BUF(32));
2989 __raw_writel(~(ipu_cha_triple_cur_buf_reg[2]),
2990 IPU_CHA_TRIPLE_CUR_BUF(64));
2991 __raw_writel(~(ipu_cha_triple_cur_buf_reg[3]),
2992 IPU_CHA_TRIPLE_CUR_BUF(96));
2994 /* restore idamc sub addr regs */
2995 __raw_writel(idma_sub_addr_reg[0], IDMAC_SUB_ADDR_0);
2996 __raw_writel(idma_sub_addr_reg[1], IDMAC_SUB_ADDR_1);
2997 __raw_writel(idma_sub_addr_reg[2], IDMAC_SUB_ADDR_2);
2998 __raw_writel(idma_sub_addr_reg[3], IDMAC_SUB_ADDR_3);
2999 __raw_writel(idma_sub_addr_reg[4], IDMAC_SUB_ADDR_4);
3001 /* restart idma channel*/
3002 __raw_writel(idma_enable_reg[0], IDMAC_CHA_EN(0));
3003 __raw_writel(idma_enable_reg[1], IDMAC_CHA_EN(32));
3005 clk_enable(g_ipu_clk);
3006 _ipu_dmfc_init(dmfc_type_setup, 1);
3007 _ipu_init_dc_mappings();
3009 /* Set sync refresh channels as high priority */
3010 __raw_writel(0x18800001L, IDMAC_CHA_PRI(0));
3011 clk_disable(g_ipu_clk);
3019 * This structure contains pointers to the power management callback functions.
3021 static struct platform_driver mxcipu_driver = {
3023 .name = "imx-ipuv3",
3026 .remove = ipu_remove,
3027 .suspend = ipu_suspend,
3028 .resume = ipu_resume,
3031 int32_t __init ipu_gen_init(void)
3035 ret = platform_driver_register(&mxcipu_driver);
3039 subsys_initcall(ipu_gen_init);
3041 static void __exit ipu_gen_uninit(void)
3043 platform_driver_unregister(&mxcipu_driver);
3046 module_exit(ipu_gen_uninit);