]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mxc/ipu3/ipu_common.c
3e099d9e0f1dcdc4b08360b4ba6397d65b1c2a90
[karo-tx-linux.git] / drivers / mxc / ipu3 / ipu_common.c
1 /*
2  * Copyright 2005-2011 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file ipu_common.c
16  *
17  * @brief This file contains the IPU driver common API functions.
18  *
19  * @ingroup IPU
20  */
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/platform_device.h>
24 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/irqdesc.h>
31 #include <linux/ipu.h>
32 #include <linux/clk.h>
33 #include <linux/clkdev.h>
34 #include <mach/clock.h>
35 #include <mach/hardware.h>
36 #include <mach/ipu-v3.h>
37 #include <mach/devices-common.h>
38
39 #include "ipu_prv.h"
40 #include "ipu_regs.h"
41 #include "ipu_param_mem.h"
42
43 static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
44 int g_ipu_hw_rev;
45
46 /* Static functions */
47 static irqreturn_t ipu_irq_handler(int irq, void *desc);
48
49 static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
50 {
51         return ((uint32_t) ch >> (6 * type)) & 0x3F;
52 };
53
54 static inline int _ipu_is_ic_chan(uint32_t dma_chan)
55 {
56         return ((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) && (dma_chan != 18));
57 }
58
59 static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
60 {
61         return (dma_chan == 14 || dma_chan == 15);
62 }
63
64 /* Either DP BG or DP FG can be graphic window */
65 static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
66 {
67         return (dma_chan == 23 || dma_chan == 27);
68 }
69
70 static inline int _ipu_is_irt_chan(uint32_t dma_chan)
71 {
72         return ((dma_chan >= 45) && (dma_chan <= 50));
73 }
74
75 static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
76 {
77         return ((dma_chan >= 23) && (dma_chan <= 29));
78 }
79
80 static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
81 {
82         return ((dma_chan >= 0) && (dma_chan <= 3));
83 }
84
85 static inline int _ipu_is_trb_chan(uint32_t dma_chan)
86 {
87         return (((dma_chan == 8) || (dma_chan == 9) ||
88                  (dma_chan == 10) || (dma_chan == 13) ||
89                  (dma_chan == 21) || (dma_chan == 23) ||
90                  (dma_chan == 27) || (dma_chan == 28)) &&
91                 (g_ipu_hw_rev >= 2));
92 }
93
94 #define idma_is_valid(ch)       (ch != NO_DMA)
95 #define idma_mask(ch)           (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
96 #define idma_is_set(ipu, reg, dma)      (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
97 #define tri_cur_buf_mask(ch)    (idma_mask(ch*2) * 3)
98 #define tri_cur_buf_shift(ch)   (ffs(idma_mask(ch*2)) - 1)
99
100 static int ipu_reset(struct ipu_soc *ipu)
101 {
102         int timeout = 1000;
103
104         ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
105
106         while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
107                 if (!timeout--)
108                         return -ETIME;
109                 msleep(1);
110         }
111
112         return 0;
113 }
114
115 static inline struct ipu_soc *pixelclk2ipu(struct clk *clk)
116 {
117         struct ipu_soc *ipu;
118         struct clk *base = clk - clk->id;
119
120         ipu = container_of(base, struct ipu_soc, pixel_clk[0]);
121
122         return ipu;
123 }
124
125 static unsigned long _ipu_pixel_clk_get_rate(struct clk *clk)
126 {
127         struct ipu_soc *ipu = pixelclk2ipu(clk);
128         u32 div = ipu_di_read(ipu, clk->id, DI_BS_CLKGEN0);
129         if (div == 0)
130                 return 0;
131         return  (clk_get_rate(clk->parent) * 16) / div;
132 }
133
134 static unsigned long _ipu_pixel_clk_round_rate(struct clk *clk, unsigned long rate)
135 {
136         u32 div;
137         u32 parent_rate = clk_get_rate(clk->parent) * 16;
138         /*
139          * Calculate divider
140          * Fractional part is 4 bits,
141          * so simply multiply by 2^4 to get fractional part.
142          */
143         div = parent_rate / rate;
144
145         if (div < 0x10)            /* Min DI disp clock divider is 1 */
146                 div = 0x10;
147         if (div & ~0xFEF)
148                 div &= 0xFF8;
149         else {
150                 /* Round up divider if it gets us closer to desired pix clk */
151                 if ((div & 0xC) == 0xC) {
152                         div += 0x10;
153                         div &= ~0xF;
154                 }
155         }
156         return parent_rate / div;
157 }
158
159 static int _ipu_pixel_clk_set_rate(struct clk *clk, unsigned long rate)
160 {
161         struct ipu_soc *ipu = pixelclk2ipu(clk);
162         u32 div = (clk_get_rate(clk->parent) * 16) / rate;
163         unsigned long lock_flags;
164
165         /* Round up divider if it gets us closer to desired pix clk */
166         if ((div & 0xC) == 0xC) {
167                 div += 0x10;
168                 div &= ~0xF;
169         }
170
171         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
172         ipu_di_write(ipu, clk->id, div, DI_BS_CLKGEN0);
173
174         /* Setup pixel clock timing */
175         /* FIXME: needs to be more flexible */
176         /* Down time is half of period */
177         ipu_di_write(ipu, clk->id, (div / 16) << 16, DI_BS_CLKGEN1);
178         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
179
180         return 0;
181 }
182
183 static int _ipu_pixel_clk_enable(struct clk *clk)
184 {
185         struct ipu_soc *ipu = pixelclk2ipu(clk);
186         u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
187         disp_gen |= clk->id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
188         ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
189
190         return 0;
191 }
192
193 static void _ipu_pixel_clk_disable(struct clk *clk)
194 {
195         struct ipu_soc *ipu = pixelclk2ipu(clk);
196
197         u32 disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
198         disp_gen &= clk->id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
199         ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
200 }
201
202 static int _ipu_pixel_clk_set_parent(struct clk *clk, struct clk *parent)
203 {
204         struct ipu_soc *ipu = pixelclk2ipu(clk);
205         unsigned long lock_flags;
206         u32 di_gen;
207
208         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
209         di_gen = ipu_di_read(ipu, clk->id, DI_GENERAL);
210         if (parent == ipu->ipu_clk)
211                 di_gen &= ~DI_GEN_DI_CLK_EXT;
212         else if (!IS_ERR(ipu->di_clk[clk->id]) && parent == ipu->di_clk[clk->id])
213                 di_gen |= DI_GEN_DI_CLK_EXT;
214         else {
215                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
216                 return -EINVAL;
217         }
218
219         ipu_di_write(ipu, clk->id, di_gen, DI_GENERAL);
220         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
221         return 0;
222 }
223
224 #ifdef CONFIG_CLK_DEBUG
225 #define __INIT_CLK_DEBUG(n)     .name = #n,
226 #else
227 #define __INIT_CLK_DEBUG(n)
228 #endif
229 static int __devinit ipu_clk_setup_enable(struct ipu_soc *ipu,
230                 struct platform_device *pdev)
231 {
232         struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
233         static struct clk ipu_pixel_clk[] = {
234                 {
235                         __INIT_CLK_DEBUG(pixel_clk_0)
236                                 .id = 0,
237                         .get_rate = _ipu_pixel_clk_get_rate,
238                         .set_rate = _ipu_pixel_clk_set_rate,
239                         .round_rate = _ipu_pixel_clk_round_rate,
240                         .set_parent = _ipu_pixel_clk_set_parent,
241                         .enable = _ipu_pixel_clk_enable,
242                         .disable = _ipu_pixel_clk_disable,
243                 },
244                 {
245                         __INIT_CLK_DEBUG(pixel_clk_1)
246                                 .id = 1,
247                         .get_rate = _ipu_pixel_clk_get_rate,
248                         .set_rate = _ipu_pixel_clk_set_rate,
249                         .round_rate = _ipu_pixel_clk_round_rate,
250                         .set_parent = _ipu_pixel_clk_set_parent,
251                         .enable = _ipu_pixel_clk_enable,
252                         .disable = _ipu_pixel_clk_disable,
253                 },
254         };
255         static struct clk_lookup ipu_lookups[] = {
256                 {
257                         .dev_id = NULL,
258                         .con_id = "pixel_clk_0",
259                 },
260                 {
261                         .dev_id = NULL,
262                         .con_id = "pixel_clk_1",
263                 },
264         };
265         char ipu_clk[] = "ipu1_clk";
266         char di0_clk[] = "ipu1_di0_clk";
267         char di1_clk[] = "ipu1_di1_clk";
268
269         ipu_clk[3] += pdev->id;
270         di0_clk[3] += pdev->id;
271         di1_clk[3] += pdev->id;
272
273         ipu->ipu_clk = clk_get(ipu->dev, ipu_clk);
274         if (IS_ERR(ipu->ipu_clk)) {
275                 dev_err(ipu->dev, "clk_get failed");
276                 return PTR_ERR(ipu->ipu_clk);
277         }
278         dev_dbg(ipu->dev, "ipu_clk = %lu\n", clk_get_rate(ipu->ipu_clk));
279
280         ipu->pixel_clk[0] = ipu_pixel_clk[0];
281         ipu->pixel_clk[1] = ipu_pixel_clk[1];
282
283         ipu_lookups[0].clk = &ipu->pixel_clk[0];
284         ipu_lookups[1].clk = &ipu->pixel_clk[1];
285         clkdev_add(&ipu_lookups[0]);
286         clkdev_add(&ipu_lookups[1]);
287
288         clk_debug_register(&ipu->pixel_clk[0]);
289         clk_debug_register(&ipu->pixel_clk[1]);
290
291         clk_enable(ipu->ipu_clk);
292
293         clk_set_parent(&ipu->pixel_clk[0], ipu->ipu_clk);
294         clk_set_parent(&ipu->pixel_clk[1], ipu->ipu_clk);
295
296         ipu->di_clk[0] = clk_get(ipu->dev, di0_clk);
297         ipu->di_clk[1] = clk_get(ipu->dev, di1_clk);
298
299         ipu->csi_clk[0] = clk_get(ipu->dev, plat_data->csi_clk[0]);
300         ipu->csi_clk[1] = clk_get(ipu->dev, plat_data->csi_clk[1]);
301
302         return 0;
303 }
304
305 #if 0
306 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
307 {
308         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
309         const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
310         u32 status;
311         int i, line;
312
313         for (i = 0;; i++) {
314                 if (int_reg[i] == 0)
315                         break;
316
317                 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
318                 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
319
320                 while ((line = ffs(status))) {
321                         line--;
322                         status &= ~(1UL << line);
323                         line += ipu->irq_start + (int_reg[i] - 1) * 32;
324                         generic_handle_irq(line);
325                 }
326
327         }
328 }
329
330 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
331 {
332         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
333         const int int_reg[] = { 5, 6, 9, 10, 0 };
334         u32 status;
335         int i, line;
336
337         for (i = 0;; i++) {
338                 if (int_reg[i] == 0)
339                         break;
340
341                 status = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
342                 status &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
343
344                 while ((line = ffs(status))) {
345                         line--;
346                         status &= ~(1UL << line);
347                         line += ipu->irq_start + (int_reg[i] - 1) * 32;
348                         generic_handle_irq(line);
349                 }
350
351         }
352 }
353
354 static void ipu_ack_irq(struct irq_data *d)
355 {
356         struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
357         unsigned int irq = d->irq - ipu->irq_start;
358         unsigned long flags;
359
360         spin_lock_irqsave(&ipu->ipu_lock, flags);
361         ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32 + 1));
362         spin_unlock_irqrestore(&ipu->ipu_lock, flags);
363 }
364
365 static void ipu_unmask_irq(struct irq_data *d)
366 {
367         struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
368         unsigned int irq = d->irq - ipu->irq_start;
369         unsigned long flags;
370         u32 reg;
371
372         spin_lock_irqsave(&ipu->ipu_lock, flags);
373         reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
374         reg |= 1 << (irq % 32);
375         ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
376         spin_unlock_irqrestore(&ipu->ipu_lock, flags);
377 }
378
379 static void ipu_mask_irq(struct irq_data *d)
380 {
381         struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
382         unsigned int irq = d->irq - ipu->irq_start;
383         unsigned long flags;
384         u32 reg;
385
386         spin_lock_irqsave(&ipu->ipu_lock, flags);
387         reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32 + 1));
388         reg &= ~(1 << (irq % 32));
389         ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32 + 1));
390         spin_unlock_irqrestore(&ipu->ipu_lock, flags);
391 }
392
393 static struct irq_chip ipu_irq_chip = {
394         .name = "IPU",
395         .irq_ack = ipu_ack_irq,
396         .irq_mask = ipu_mask_irq,
397         .irq_unmask = ipu_unmask_irq,
398 };
399
400 static void __devinit ipu_irq_setup(struct ipu_soc *ipu)
401 {
402         int i;
403
404         for (i = ipu->irq_start; i < ipu->irq_start + MX5_IPU_IRQS; i++) {
405                 irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
406                 set_irq_flags(i, IRQF_VALID);
407                 irq_set_chip_data(i, ipu);
408         }
409
410         irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
411         irq_set_handler_data(ipu->irq_sync, ipu);
412         irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
413         irq_set_handler_data(ipu->irq_err, ipu);
414 }
415
416 int ipu_request_irq(struct ipu_soc *ipu, unsigned int irq,
417                 irq_handler_t handler, unsigned long flags,
418                 const char *name, void *dev)
419 {
420         return request_irq(ipu->irq_start + irq, handler, flags, name, dev);
421 }
422 EXPORT_SYMBOL_GPL(ipu_request_irq);
423
424 void ipu_enable_irq(struct ipu_soc *ipu, unsigned int irq)
425 {
426         return enable_irq(ipu->irq_start + irq);
427 }
428 EXPORT_SYMBOL_GPL(ipu_disable_irq);
429
430 void ipu_disable_irq(struct ipu_soc *ipu, unsigned int irq)
431 {
432         return disable_irq(ipu->irq_start + irq);
433 }
434 EXPORT_SYMBOL_GPL(ipu_disable_irq);
435
436 void ipu_free_irq(struct ipu_soc *ipu, unsigned int irq, void *dev_id)
437 {
438         free_irq(ipu->irq_start + irq, dev_id);
439 }
440 EXPORT_SYMBOL_GPL(ipu_free_irq);
441
442 static irqreturn_t ipu_completion_handler(int irq, void *dev)
443 {
444         struct completion *completion = dev;
445
446         complete(completion);
447         return IRQ_HANDLED;
448 }
449
450 int ipu_wait_for_interrupt(struct ipu_soc *ipu, int interrupt, int timeout_ms)
451 {
452         DECLARE_COMPLETION_ONSTACK(completion);
453         int ret;
454
455         ret = ipu_request_irq(ipu, interrupt, ipu_completion_handler,
456                         0, NULL, &completion);
457         if (ret) {
458                 dev_err(ipu->dev,
459                         "ipu request irq %d fail\n", interrupt);
460                 return ret;
461         }
462
463         ret = wait_for_completion_timeout(&completion,
464                         msecs_to_jiffies(timeout_ms));
465
466         ipu_free_irq(ipu, interrupt, &completion);
467
468         return ret > 0 ? 0 : -ETIMEDOUT;
469 }
470 EXPORT_SYMBOL_GPL(ipu_wait_for_interrupt);
471 #endif
472
473 struct ipu_soc *ipu_get_soc(int id)
474 {
475         if (id >= MXC_IPU_MAX_NUM)
476                 return ERR_PTR(-ENODEV);
477         else
478                 return &(ipu_array[id]);
479 }
480
481 /*!
482  * This function is called by the driver framework to initialize the IPU
483  * hardware.
484  *
485  * @param       dev     The device structure for the IPU passed in by the
486  *                      driver framework.
487  *
488  * @return      Returns 0 on success or negative error code on error
489  */
490 static int __devinit ipu_probe(struct platform_device *pdev)
491 {
492         struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
493         struct ipu_soc *ipu;
494         struct resource *res;
495         unsigned long ipu_base;
496         int ret = 0;
497
498         if (pdev->id >= MXC_IPU_MAX_NUM)
499                 return -ENODEV;
500
501         ipu = &ipu_array[pdev->id];
502         memset(ipu, 0, sizeof(struct ipu_soc));
503
504         spin_lock_init(&ipu->ipu_lock);
505
506         g_ipu_hw_rev = plat_data->rev;
507
508         ipu->dev = &pdev->dev;
509
510         if (plat_data->init)
511                 plat_data->init(pdev->id);
512
513         ipu->irq_sync = platform_get_irq(pdev, 0);
514         ipu->irq_err = platform_get_irq(pdev, 1);
515         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
516
517         if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
518                 ret = -ENODEV;
519                 goto failed_get_res;
520         }
521
522         if (request_irq(ipu->irq_sync, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
523                 dev_err(ipu->dev, "request SYNC interrupt failed\n");
524                 ret = -EBUSY;
525                 goto failed_req_irq_sync;
526         }
527         /* Some platforms have 2 IPU interrupts */
528         if (ipu->irq_err >= 0) {
529                 if (request_irq
530                     (ipu->irq_err, ipu_irq_handler, 0, pdev->name, ipu) != 0) {
531                         dev_err(ipu->dev, "request ERR interrupt failed\n");
532                         ret = -EBUSY;
533                         goto failed_req_irq_err;
534                 }
535         }
536
537         ipu_base = res->start;
538         /* base fixup */
539         if (g_ipu_hw_rev == 4)  /* IPUv3H */
540                 ipu_base += IPUV3H_REG_BASE;
541         else if (g_ipu_hw_rev == 3)     /* IPUv3M */
542                 ipu_base += IPUV3M_REG_BASE;
543         else                    /* IPUv3D, v3E, v3EX */
544                 ipu_base += IPUV3DEX_REG_BASE;
545
546         ipu->cm_reg = ioremap(ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
547         ipu->ic_reg = ioremap(ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
548         ipu->idmac_reg = ioremap(ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
549         /* DP Registers are accessed thru the SRM */
550         ipu->dp_reg = ioremap(ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
551         ipu->dc_reg = ioremap(ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
552         ipu->dmfc_reg = ioremap(ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
553         ipu->di_reg[0] = ioremap(ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
554         ipu->di_reg[1] = ioremap(ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
555         ipu->smfc_reg = ioremap(ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
556         ipu->csi_reg[0] = ioremap(ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
557         ipu->csi_reg[1] = ioremap(ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
558         ipu->cpmem_base = ioremap(ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
559         ipu->tpmem_base = ioremap(ipu_base + IPU_TPM_REG_BASE, SZ_64K);
560         ipu->dc_tmpl_reg = ioremap(ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
561         ipu->vdi_reg = ioremap(ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
562         ipu->disp_base[1] = ioremap(ipu_base + IPU_DISP1_BASE, SZ_4K);
563
564         if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
565                 !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
566                 !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
567                 !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
568                 !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
569                 || !ipu->vdi_reg) {
570                 ret = -ENOMEM;
571                 goto failed_ioremap;
572         }
573
574         dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
575         dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
576         dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
577         dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
578         dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
579         dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
580         dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
581         dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
582         dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
583         dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
584         dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
585         dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
586         dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
587         dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
588         dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
589         dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
590
591         ret = ipu_clk_setup_enable(ipu, pdev);
592         if (ret < 0) {
593                 dev_err(ipu->dev, "ipu clk setup failed\n");
594                 goto failed_clk_setup;
595         }
596
597         platform_set_drvdata(pdev, ipu);
598
599         ipu_reset(ipu);
600
601         ipu_disp_init(ipu);
602
603         /* Set sync refresh channels and CSI->mem channel as high priority */
604         ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
605
606         /* Set MCU_T to divide MCU access window into 2 */
607         ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), IPU_DISP_GEN);
608
609         clk_disable(ipu->ipu_clk);
610
611         register_ipu_device(ipu, pdev->id);
612
613         return ret;
614
615 failed_clk_setup:
616         iounmap(ipu->cm_reg);
617         iounmap(ipu->ic_reg);
618         iounmap(ipu->idmac_reg);
619         iounmap(ipu->dc_reg);
620         iounmap(ipu->dp_reg);
621         iounmap(ipu->dmfc_reg);
622         iounmap(ipu->di_reg[0]);
623         iounmap(ipu->di_reg[1]);
624         iounmap(ipu->smfc_reg);
625         iounmap(ipu->csi_reg[0]);
626         iounmap(ipu->csi_reg[1]);
627         iounmap(ipu->cpmem_base);
628         iounmap(ipu->tpmem_base);
629         iounmap(ipu->dc_tmpl_reg);
630         iounmap(ipu->disp_base[1]);
631         iounmap(ipu->vdi_reg);
632 failed_ioremap:
633         if (ipu->irq_sync)
634                 free_irq(ipu->irq_err, ipu);
635 failed_req_irq_err:
636         free_irq(ipu->irq_sync, ipu);
637 failed_req_irq_sync:
638 failed_get_res:
639         return ret;
640 }
641
642 int __devexit ipu_remove(struct platform_device *pdev)
643 {
644         struct ipu_soc *ipu = platform_get_drvdata(pdev);
645
646         unregister_ipu_device(ipu, pdev->id);
647
648         if (ipu->irq_sync)
649                 free_irq(ipu->irq_sync, ipu);
650         if (ipu->irq_err)
651                 free_irq(ipu->irq_err, ipu);
652
653         clk_put(ipu->ipu_clk);
654
655         iounmap(ipu->cm_reg);
656         iounmap(ipu->ic_reg);
657         iounmap(ipu->idmac_reg);
658         iounmap(ipu->dc_reg);
659         iounmap(ipu->dp_reg);
660         iounmap(ipu->dmfc_reg);
661         iounmap(ipu->di_reg[0]);
662         iounmap(ipu->di_reg[1]);
663         iounmap(ipu->smfc_reg);
664         iounmap(ipu->csi_reg[0]);
665         iounmap(ipu->csi_reg[1]);
666         iounmap(ipu->cpmem_base);
667         iounmap(ipu->tpmem_base);
668         iounmap(ipu->dc_tmpl_reg);
669         iounmap(ipu->disp_base[1]);
670         iounmap(ipu->vdi_reg);
671
672         return 0;
673 }
674
675 void ipu_dump_registers(struct ipu_soc *ipu)
676 {
677         dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
678         dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
679         dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
680                ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
681         dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
682                ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
683         dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
684                ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
685         dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
686                ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
687         dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
688                ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
689         dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
690                ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
691         dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
692                ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
693         dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
694                ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
695         if (g_ipu_hw_rev >= 2) {
696                 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
697                        ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
698                 dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
699                        ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
700         }
701         dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
702                ipu_dmfc_read(ipu, DMFC_WR_CHAN));
703         dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
704                ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
705         dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
706                ipu_dmfc_read(ipu, DMFC_DP_CHAN));
707         dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
708                ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
709         dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
710                ipu_dmfc_read(ipu, DMFC_IC_CTRL));
711         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
712                ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
713         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
714                ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
715         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
716                ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
717         dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
718                ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
719 }
720
721 /*!
722  * This function is called to initialize a logical IPU channel.
723  *
724  * @param       ipu     ipu handler
725  * @param       channel Input parameter for the logical channel ID to init.
726  *
727  * @param       params  Input parameter containing union of channel
728  *                      initialization parameters.
729  *
730  * @return      Returns 0 on success or negative error code on fail
731  */
732 int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
733 {
734         int ret = 0;
735         uint32_t ipu_conf;
736         uint32_t reg;
737         unsigned long lock_flags;
738
739         dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
740
741         if (ipu->clk_enabled == false) {
742                 ipu->clk_enabled = true;
743                 clk_enable(ipu->ipu_clk);
744         }
745
746         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
747
748         if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
749                 dev_err(ipu->dev, "Warning: channel already initialized %d\n",
750                         IPU_CHAN_ID(channel));
751         }
752
753         ipu_conf = ipu_cm_read(ipu, IPU_CONF);
754
755         switch (channel) {
756         case CSI_MEM0:
757         case CSI_MEM1:
758         case CSI_MEM2:
759         case CSI_MEM3:
760                 if (params->csi_mem.csi > 1) {
761                         ret = -EINVAL;
762                         goto err;
763                 }
764
765                 if (params->csi_mem.interlaced)
766                         ipu->chan_is_interlaced[channel_2_dma(channel,
767                                 IPU_OUTPUT_BUFFER)] = true;
768                 else
769                         ipu->chan_is_interlaced[channel_2_dma(channel,
770                                 IPU_OUTPUT_BUFFER)] = false;
771
772                 ipu->smfc_use_count++;
773                 ipu->csi_channel[params->csi_mem.csi] = channel;
774
775                 /*SMFC setting*/
776                 if (params->csi_mem.mipi_en) {
777                         ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
778                                 params->csi_mem.csi));
779                         _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_id,
780                                 params->csi_mem.csi);
781                 } else {
782                         ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
783                                 params->csi_mem.csi));
784                         _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
785                 }
786
787                 /*CSI data (include compander) dest*/
788                 _ipu_csi_init(ipu, channel, params->csi_mem.csi);
789                 break;
790         case CSI_PRP_ENC_MEM:
791                 if (params->csi_prp_enc_mem.csi > 1) {
792                         ret = -EINVAL;
793                         goto err;
794                 }
795                 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
796                         ret = -EINVAL;
797                         goto err;
798                 }
799                 ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
800
801                 ipu->ic_use_count++;
802                 ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
803
804                 /*Without SMFC, CSI only support parallel data source*/
805                 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
806                         params->csi_prp_enc_mem.csi));
807
808                 /*CSI0/1 feed into IC*/
809                 ipu_conf &= ~IPU_CONF_IC_INPUT;
810                 if (params->csi_prp_enc_mem.csi)
811                         ipu_conf |= IPU_CONF_CSI_SEL;
812                 else
813                         ipu_conf &= ~IPU_CONF_CSI_SEL;
814
815                 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
816                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
817                 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
818
819                 /*CSI data (include compander) dest*/
820                 _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
821                 _ipu_ic_init_prpenc(ipu, params, true);
822                 break;
823         case CSI_PRP_VF_MEM:
824                 if (params->csi_prp_vf_mem.csi > 1) {
825                         ret = -EINVAL;
826                         goto err;
827                 }
828                 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) {
829                         ret = -EINVAL;
830                         goto err;
831                 }
832                 ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
833
834                 ipu->ic_use_count++;
835                 ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
836
837                 /*Without SMFC, CSI only support parallel data source*/
838                 ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
839                         params->csi_prp_vf_mem.csi));
840
841                 /*CSI0/1 feed into IC*/
842                 ipu_conf &= ~IPU_CONF_IC_INPUT;
843                 if (params->csi_prp_vf_mem.csi)
844                         ipu_conf |= IPU_CONF_CSI_SEL;
845                 else
846                         ipu_conf &= ~IPU_CONF_CSI_SEL;
847
848                 /*PRP skip buffer in memory, only valid when RWS_EN is true*/
849                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
850                 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
851
852                 /*CSI data (include compander) dest*/
853                 _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
854                 _ipu_ic_init_prpvf(ipu, params, true);
855                 break;
856         case MEM_PRP_VF_MEM:
857                 ipu->ic_use_count++;
858                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
859                 ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
860
861                 if (params->mem_prp_vf_mem.graphics_combine_en)
862                         ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
863                 if (params->mem_prp_vf_mem.alpha_chan_en)
864                         ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
865
866                 _ipu_ic_init_prpvf(ipu, params, false);
867                 break;
868         case MEM_VDI_PRP_VF_MEM:
869                 if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
870                      (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
871                         ret = -EINVAL;
872                         goto err;
873                 }
874                 ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
875                 ipu->ic_use_count++;
876                 ipu->vdi_use_count++;
877                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
878                 reg &= ~FS_VDI_SRC_SEL_MASK;
879                 ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
880
881                 if (params->mem_prp_vf_mem.graphics_combine_en)
882                         ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
883                 _ipu_ic_init_prpvf(ipu, params, false);
884                 _ipu_vdi_init(ipu, channel, params);
885                 break;
886         case MEM_VDI_PRP_VF_MEM_P:
887                 _ipu_vdi_init(ipu, channel, params);
888                 break;
889         case MEM_VDI_PRP_VF_MEM_N:
890                 _ipu_vdi_init(ipu, channel, params);
891                 break;
892         case MEM_ROT_VF_MEM:
893                 ipu->ic_use_count++;
894                 ipu->rot_use_count++;
895                 _ipu_ic_init_rotate_vf(ipu, params);
896                 break;
897         case MEM_PRP_ENC_MEM:
898                 ipu->ic_use_count++;
899                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
900                 ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
901                 _ipu_ic_init_prpenc(ipu, params, false);
902                 break;
903         case MEM_ROT_ENC_MEM:
904                 ipu->ic_use_count++;
905                 ipu->rot_use_count++;
906                 _ipu_ic_init_rotate_enc(ipu, params);
907                 break;
908         case MEM_PP_MEM:
909                 if (params->mem_pp_mem.graphics_combine_en)
910                         ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
911                 if (params->mem_pp_mem.alpha_chan_en)
912                         ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
913                 _ipu_ic_init_pp(ipu, params);
914                 ipu->ic_use_count++;
915                 break;
916         case MEM_ROT_PP_MEM:
917                 _ipu_ic_init_rotate_pp(ipu, params);
918                 ipu->ic_use_count++;
919                 ipu->rot_use_count++;
920                 break;
921         case MEM_DC_SYNC:
922                 if (params->mem_dc_sync.di > 1) {
923                         ret = -EINVAL;
924                         goto err;
925                 }
926
927                 ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
928                 _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
929                              params->mem_dc_sync.interlaced,
930                              params->mem_dc_sync.out_pixel_fmt);
931                 ipu->di_use_count[params->mem_dc_sync.di]++;
932                 ipu->dc_use_count++;
933                 ipu->dmfc_use_count++;
934                 break;
935         case MEM_BG_SYNC:
936                 if (params->mem_dp_bg_sync.di > 1) {
937                         ret = -EINVAL;
938                         goto err;
939                 }
940
941                 if (params->mem_dp_bg_sync.alpha_chan_en)
942                         ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
943
944                 ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
945                 _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
946                              params->mem_dp_bg_sync.out_pixel_fmt);
947                 _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
948                              params->mem_dp_bg_sync.interlaced,
949                              params->mem_dp_bg_sync.out_pixel_fmt);
950                 ipu->di_use_count[params->mem_dp_bg_sync.di]++;
951                 ipu->dc_use_count++;
952                 ipu->dp_use_count++;
953                 ipu->dmfc_use_count++;
954                 break;
955         case MEM_FG_SYNC:
956                 _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
957                              params->mem_dp_fg_sync.out_pixel_fmt);
958
959                 if (params->mem_dp_fg_sync.alpha_chan_en)
960                         ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
961
962                 ipu->dc_use_count++;
963                 ipu->dp_use_count++;
964                 ipu->dmfc_use_count++;
965                 break;
966         case DIRECT_ASYNC0:
967                 if (params->direct_async.di > 1) {
968                         ret = -EINVAL;
969                         goto err;
970                 }
971
972                 ipu->dc_di_assignment[8] = params->direct_async.di;
973                 _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
974                 ipu->di_use_count[params->direct_async.di]++;
975                 ipu->dc_use_count++;
976                 break;
977         case DIRECT_ASYNC1:
978                 if (params->direct_async.di > 1) {
979                         ret = -EINVAL;
980                         goto err;
981                 }
982
983                 ipu->dc_di_assignment[9] = params->direct_async.di;
984                 _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
985                 ipu->di_use_count[params->direct_async.di]++;
986                 ipu->dc_use_count++;
987                 break;
988         default:
989                 dev_err(ipu->dev, "Missing channel initialization\n");
990                 break;
991         }
992
993         /* Enable IPU sub module */
994         ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
995
996         ipu_cm_write(ipu, ipu_conf, IPU_CONF);
997
998 err:
999         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1000         return ret;
1001 }
1002 EXPORT_SYMBOL(ipu_init_channel);
1003
1004 /*!
1005  * This function is called to uninitialize a logical IPU channel.
1006  *
1007  * @param       ipu     ipu handler
1008  * @param       channel Input parameter for the logical channel ID to uninit.
1009  */
1010 void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1011 {
1012         unsigned long lock_flags;
1013         uint32_t reg;
1014         uint32_t in_dma, out_dma = 0;
1015         uint32_t ipu_conf;
1016
1017         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1018
1019         if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
1020                 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1021                         IPU_CHAN_ID(channel));
1022                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1023                 return;
1024         }
1025
1026         /* Make sure channel is disabled */
1027         /* Get input and output dma channels */
1028         in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1029         out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1030
1031         if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
1032             idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
1033                 dev_err(ipu->dev,
1034                         "Channel %d is not disabled, disable first\n",
1035                         IPU_CHAN_ID(channel));
1036                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1037                 return;
1038         }
1039
1040         ipu_conf = ipu_cm_read(ipu, IPU_CONF);
1041
1042         /* Reset the double buffer */
1043         reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
1044         ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
1045         reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
1046         ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
1047
1048         /* Reset the triple buffer */
1049         reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
1050         ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
1051         reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
1052         ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
1053
1054         if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
1055                 ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
1056                 ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
1057         }
1058
1059         switch (channel) {
1060         case CSI_MEM0:
1061         case CSI_MEM1:
1062         case CSI_MEM2:
1063         case CSI_MEM3:
1064                 ipu->smfc_use_count--;
1065                 if (ipu->csi_channel[0] == channel) {
1066                         ipu->csi_channel[0] = CHAN_NONE;
1067                 } else if (ipu->csi_channel[1] == channel) {
1068                         ipu->csi_channel[1] = CHAN_NONE;
1069                 }
1070                 break;
1071         case CSI_PRP_ENC_MEM:
1072                 ipu->ic_use_count--;
1073                 if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
1074                         ipu->using_ic_dirct_ch = 0;
1075                 _ipu_ic_uninit_prpenc(ipu);
1076                 if (ipu->csi_channel[0] == channel) {
1077                         ipu->csi_channel[0] = CHAN_NONE;
1078                 } else if (ipu->csi_channel[1] == channel) {
1079                         ipu->csi_channel[1] = CHAN_NONE;
1080                 }
1081                 break;
1082         case CSI_PRP_VF_MEM:
1083                 ipu->ic_use_count--;
1084                 if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
1085                         ipu->using_ic_dirct_ch = 0;
1086                 _ipu_ic_uninit_prpvf(ipu);
1087                 if (ipu->csi_channel[0] == channel) {
1088                         ipu->csi_channel[0] = CHAN_NONE;
1089                 } else if (ipu->csi_channel[1] == channel) {
1090                         ipu->csi_channel[1] = CHAN_NONE;
1091                 }
1092                 break;
1093         case MEM_PRP_VF_MEM:
1094                 ipu->ic_use_count--;
1095                 _ipu_ic_uninit_prpvf(ipu);
1096                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1097                 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1098                 break;
1099         case MEM_VDI_PRP_VF_MEM:
1100                 ipu->ic_use_count--;
1101                 ipu->vdi_use_count--;
1102                 if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
1103                         ipu->using_ic_dirct_ch = 0;
1104                 _ipu_ic_uninit_prpvf(ipu);
1105                 _ipu_vdi_uninit(ipu);
1106                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1107                 ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
1108                 break;
1109         case MEM_VDI_PRP_VF_MEM_P:
1110         case MEM_VDI_PRP_VF_MEM_N:
1111                 break;
1112         case MEM_ROT_VF_MEM:
1113                 ipu->rot_use_count--;
1114                 ipu->ic_use_count--;
1115                 _ipu_ic_uninit_rotate_vf(ipu);
1116                 break;
1117         case MEM_PRP_ENC_MEM:
1118                 ipu->ic_use_count--;
1119                 _ipu_ic_uninit_prpenc(ipu);
1120                 reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1121                 ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
1122                 break;
1123         case MEM_ROT_ENC_MEM:
1124                 ipu->rot_use_count--;
1125                 ipu->ic_use_count--;
1126                 _ipu_ic_uninit_rotate_enc(ipu);
1127                 break;
1128         case MEM_PP_MEM:
1129                 ipu->ic_use_count--;
1130                 _ipu_ic_uninit_pp(ipu);
1131                 break;
1132         case MEM_ROT_PP_MEM:
1133                 ipu->rot_use_count--;
1134                 ipu->ic_use_count--;
1135                 _ipu_ic_uninit_rotate_pp(ipu);
1136                 break;
1137         case MEM_DC_SYNC:
1138                 _ipu_dc_uninit(ipu, 1);
1139                 ipu->di_use_count[ipu->dc_di_assignment[1]]--;
1140                 ipu->dc_use_count--;
1141                 ipu->dmfc_use_count--;
1142                 break;
1143         case MEM_BG_SYNC:
1144                 _ipu_dp_uninit(ipu, channel);
1145                 _ipu_dc_uninit(ipu, 5);
1146                 ipu->di_use_count[ipu->dc_di_assignment[5]]--;
1147                 ipu->dc_use_count--;
1148                 ipu->dp_use_count--;
1149                 ipu->dmfc_use_count--;
1150                 break;
1151         case MEM_FG_SYNC:
1152                 _ipu_dp_uninit(ipu, channel);
1153                 ipu->dc_use_count--;
1154                 ipu->dp_use_count--;
1155                 ipu->dmfc_use_count--;
1156                 break;
1157         case DIRECT_ASYNC0:
1158                 _ipu_dc_uninit(ipu, 8);
1159                 ipu->di_use_count[ipu->dc_di_assignment[8]]--;
1160                 ipu->dc_use_count--;
1161                 break;
1162         case DIRECT_ASYNC1:
1163                 _ipu_dc_uninit(ipu, 9);
1164                 ipu->di_use_count[ipu->dc_di_assignment[9]]--;
1165                 ipu->dc_use_count--;
1166                 break;
1167         default:
1168                 break;
1169         }
1170
1171         ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
1172
1173         if (ipu->ic_use_count == 0)
1174                 ipu_conf &= ~IPU_CONF_IC_EN;
1175         if (ipu->vdi_use_count == 0) {
1176                 ipu_conf &= ~IPU_CONF_ISP_EN;
1177                 ipu_conf &= ~IPU_CONF_VDI_EN;
1178                 ipu_conf &= ~IPU_CONF_IC_INPUT;
1179         }
1180         if (ipu->rot_use_count == 0)
1181                 ipu_conf &= ~IPU_CONF_ROT_EN;
1182         if (ipu->dc_use_count == 0)
1183                 ipu_conf &= ~IPU_CONF_DC_EN;
1184         if (ipu->dp_use_count == 0)
1185                 ipu_conf &= ~IPU_CONF_DP_EN;
1186         if (ipu->dmfc_use_count == 0)
1187                 ipu_conf &= ~IPU_CONF_DMFC_EN;
1188         if (ipu->di_use_count[0] == 0) {
1189                 ipu_conf &= ~IPU_CONF_DI0_EN;
1190         }
1191         if (ipu->di_use_count[1] == 0) {
1192                 ipu_conf &= ~IPU_CONF_DI1_EN;
1193         }
1194         if (ipu->smfc_use_count == 0)
1195                 ipu_conf &= ~IPU_CONF_SMFC_EN;
1196
1197         ipu_cm_write(ipu, ipu_conf, IPU_CONF);
1198
1199         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1200
1201         if (ipu_conf == 0) {
1202                 clk_disable(ipu->ipu_clk);
1203                 ipu->clk_enabled = false;
1204         }
1205
1206         WARN_ON(ipu->ic_use_count < 0);
1207         WARN_ON(ipu->vdi_use_count < 0);
1208         WARN_ON(ipu->rot_use_count < 0);
1209         WARN_ON(ipu->dc_use_count < 0);
1210         WARN_ON(ipu->dp_use_count < 0);
1211         WARN_ON(ipu->dmfc_use_count < 0);
1212         WARN_ON(ipu->smfc_use_count < 0);
1213 }
1214 EXPORT_SYMBOL(ipu_uninit_channel);
1215
1216 /*!
1217  * This function is called to initialize buffer(s) for logical IPU channel.
1218  *
1219  * @param       ipu             ipu handler
1220  *
1221  * @param       channel         Input parameter for the logical channel ID.
1222  *
1223  * @param       type            Input parameter which buffer to initialize.
1224  *
1225  * @param       pixel_fmt       Input parameter for pixel format of buffer.
1226  *                              Pixel format is a FOURCC ASCII code.
1227  *
1228  * @param       width           Input parameter for width of buffer in pixels.
1229  *
1230  * @param       height          Input parameter for height of buffer in pixels.
1231  *
1232  * @param       stride          Input parameter for stride length of buffer
1233  *                              in pixels.
1234  *
1235  * @param       rot_mode        Input parameter for rotation setting of buffer.
1236  *                              A rotation setting other than
1237  *                              IPU_ROTATE_VERT_FLIP
1238  *                              should only be used for input buffers of
1239  *                              rotation channels.
1240  *
1241  * @param       phyaddr_0       Input parameter buffer 0 physical address.
1242  *
1243  * @param       phyaddr_1       Input parameter buffer 1 physical address.
1244  *                              Setting this to a value other than NULL enables
1245  *                              double buffering mode.
1246  *
1247  * @param       phyaddr_2       Input parameter buffer 2 physical address.
1248  *                              Setting this to a value other than NULL enables
1249  *                              triple buffering mode, phyaddr_1 should not be
1250  *                              NULL then.
1251  *
1252  * @param       u               private u offset for additional cropping,
1253  *                              zero if not used.
1254  *
1255  * @param       v               private v offset for additional cropping,
1256  *                              zero if not used.
1257  *
1258  * @return      Returns 0 on success or negative error code on fail
1259  */
1260 int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1261                                 ipu_buffer_t type,
1262                                 uint32_t pixel_fmt,
1263                                 uint16_t width, uint16_t height,
1264                                 uint32_t stride,
1265                                 ipu_rotate_mode_t rot_mode,
1266                                 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
1267                                 dma_addr_t phyaddr_2,
1268                                 uint32_t u, uint32_t v)
1269 {
1270         unsigned long lock_flags;
1271         uint32_t reg;
1272         uint32_t dma_chan;
1273         uint32_t burst_size;
1274
1275         dma_chan = channel_2_dma(channel, type);
1276         if (!idma_is_valid(dma_chan))
1277                 return -EINVAL;
1278
1279         if (stride < width * bytes_per_pixel(pixel_fmt))
1280                 stride = width * bytes_per_pixel(pixel_fmt);
1281
1282         if (stride % 4) {
1283                 dev_err(ipu->dev,
1284                         "Stride not 32-bit aligned, stride = %d\n", stride);
1285                 return -EINVAL;
1286         }
1287         /* IC & IRT channels' width must be multiple of 8 pixels */
1288         if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
1289                 && (width % 8)) {
1290                 dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
1291                 return -EINVAL;
1292         }
1293
1294         /* IPUv3EX and IPUv3M support triple buffer */
1295         if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
1296                 dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
1297                                    "mode\n", dma_chan);
1298                 return -EINVAL;
1299         }
1300         if (!phyaddr_1 && phyaddr_2) {
1301                 dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
1302                                    "triple buffer mode\n", dma_chan);
1303                 return -EINVAL;
1304         }
1305
1306         /* Build parameter memory data for DMA channel */
1307         _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
1308                            phyaddr_0, phyaddr_1, phyaddr_2);
1309
1310         /* Set correlative channel parameter of local alpha channel */
1311         if ((_ipu_is_ic_graphic_chan(dma_chan) ||
1312              _ipu_is_dp_graphic_chan(dma_chan)) &&
1313             (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
1314                 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
1315                 _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
1316                 _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
1317                 /* fix alpha width as 8 and burst size as 16*/
1318                 _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
1319                 _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1320         } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
1321                    ipu_pixel_format_has_alpha(pixel_fmt))
1322                 _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
1323
1324         if (rot_mode)
1325                 _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
1326
1327         /* IC and ROT channels have restriction of 8 or 16 pix burst length */
1328         if (_ipu_is_ic_chan(dma_chan)) {
1329                 if ((width % 16) == 0)
1330                         _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
1331                 else
1332                         _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1333         } else if (_ipu_is_irt_chan(dma_chan)) {
1334                 _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
1335                 _ipu_ch_param_set_block_mode(ipu, dma_chan);
1336         } else if (_ipu_is_dmfc_chan(dma_chan)) {
1337                 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1338                 spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1339                 _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
1340                 _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
1341                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1342         }
1343
1344         if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
1345                 ipu->chan_is_interlaced[dma_chan])
1346                 _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
1347
1348         if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan)) {
1349                 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1350                 _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
1351                         rot_mode);
1352         } else if (_ipu_is_smfc_chan(dma_chan)) {
1353                 burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
1354                 if ((pixel_fmt == IPU_PIX_FMT_GENERIC) &&
1355                         ((_ipu_ch_param_get_bpp(ipu, dma_chan) == 5) ||
1356                         (_ipu_ch_param_get_bpp(ipu, dma_chan) == 3)))
1357                         burst_size = burst_size >> 4;
1358                 else
1359                         burst_size = burst_size >> 2;
1360                 _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
1361         }
1362
1363         if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan) && !cpu_is_mx53()
1364                 && !cpu_is_mx6q())
1365                 _ipu_ch_param_set_high_priority(ipu, dma_chan);
1366
1367         _ipu_ch_param_dump(ipu, dma_chan);
1368
1369         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1370         if (phyaddr_2 && g_ipu_hw_rev >= 2) {
1371                 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1372                 reg &= ~idma_mask(dma_chan);
1373                 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1374
1375                 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1376                 reg |= idma_mask(dma_chan);
1377                 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1378
1379                 /* Set IDMAC third buffer's cpmem number */
1380                 /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
1381                 ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
1382                 ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
1383                 ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
1384
1385                 /* Reset to buffer 0 */
1386                 ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
1387                                 IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
1388         } else {
1389                 reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
1390                 reg &= ~idma_mask(dma_chan);
1391                 ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
1392
1393                 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
1394                 if (phyaddr_1)
1395                         reg |= idma_mask(dma_chan);
1396                 else
1397                         reg &= ~idma_mask(dma_chan);
1398                 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
1399
1400                 /* Reset to buffer 0 */
1401                 ipu_cm_write(ipu, idma_mask(dma_chan),
1402                                 IPU_CHA_CUR_BUF(dma_chan));
1403
1404         }
1405         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1406
1407         return 0;
1408 }
1409 EXPORT_SYMBOL(ipu_init_channel_buffer);
1410
1411 /*!
1412  * This function is called to update the physical address of a buffer for
1413  * a logical IPU channel.
1414  *
1415  * @param       ipu             ipu handler
1416  * @param       channel         Input parameter for the logical channel ID.
1417  *
1418  * @param       type            Input parameter which buffer to initialize.
1419  *
1420  * @param       bufNum          Input parameter for buffer number to update.
1421  *                              0 or 1 are the only valid values.
1422  *
1423  * @param       phyaddr         Input parameter buffer physical address.
1424  *
1425  * @return      This function returns 0 on success or negative error code on
1426  *              fail. This function will fail if the buffer is set to ready.
1427  */
1428 int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1429                                 ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
1430 {
1431         uint32_t reg;
1432         int ret = 0;
1433         unsigned long lock_flags;
1434         uint32_t dma_chan = channel_2_dma(channel, type);
1435         if (dma_chan == IDMA_CHAN_INVALID)
1436                 return -EINVAL;
1437
1438         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1439
1440         if (bufNum == 0)
1441                 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
1442         else if (bufNum == 1)
1443                 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
1444         else
1445                 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
1446
1447         if ((reg & idma_mask(dma_chan)) == 0)
1448                 _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
1449         else
1450                 ret = -EACCES;
1451
1452         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1453         return ret;
1454 }
1455 EXPORT_SYMBOL(ipu_update_channel_buffer);
1456
1457
1458 /*!
1459  * This function is called to initialize a buffer for logical IPU channel.
1460  *
1461  * @param       ipu             ipu handler
1462  * @param       channel         Input parameter for the logical channel ID.
1463  *
1464  * @param       type            Input parameter which buffer to initialize.
1465  *
1466  * @param       pixel_fmt       Input parameter for pixel format of buffer.
1467  *                              Pixel format is a FOURCC ASCII code.
1468  *
1469  * @param       width           Input parameter for width of buffer in pixels.
1470  *
1471  * @param       height          Input parameter for height of buffer in pixels.
1472  *
1473  * @param       stride          Input parameter for stride length of buffer
1474  *                              in pixels.
1475  *
1476  * @param       u               predefined private u offset for additional cropping,
1477  *                                                              zero if not used.
1478  *
1479  * @param       v               predefined private v offset for additional cropping,
1480  *                                                              zero if not used.
1481  *
1482  * @param                       vertical_offset vertical offset for Y coordinate
1483  *                                                              in the existed frame
1484  *
1485  *
1486  * @param                       horizontal_offset horizontal offset for X coordinate
1487  *                                                              in the existed frame
1488  *
1489  *
1490  * @return      Returns 0 on success or negative error code on fail
1491  *              This function will fail if any buffer is set to ready.
1492  */
1493
1494 int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
1495                                 ipu_channel_t channel, ipu_buffer_t type,
1496                                 uint32_t pixel_fmt,
1497                                 uint16_t width, uint16_t height,
1498                                 uint32_t stride,
1499                                 uint32_t u, uint32_t v,
1500                                 uint32_t vertical_offset, uint32_t horizontal_offset)
1501 {
1502         int ret = 0;
1503         unsigned long lock_flags;
1504         uint32_t dma_chan = channel_2_dma(channel, type);
1505
1506         if (dma_chan == IDMA_CHAN_INVALID)
1507                 return -EINVAL;
1508
1509         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1510
1511         if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1512             (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
1513             ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
1514              (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
1515              _ipu_is_trb_chan(dma_chan)))
1516                 ret = -EACCES;
1517         else
1518                 _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
1519                                       u, v, 0, vertical_offset, horizontal_offset);
1520
1521         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1522         return ret;
1523 }
1524 EXPORT_SYMBOL(ipu_update_channel_offset);
1525
1526
1527 /*!
1528  * This function is called to set a channel's buffer as ready.
1529  *
1530  * @param       ipu             ipu handler
1531  * @param       channel         Input parameter for the logical channel ID.
1532  *
1533  * @param       type            Input parameter which buffer to initialize.
1534  *
1535  * @param       bufNum          Input parameter for which buffer number set to
1536  *                              ready state.
1537  *
1538  * @return      Returns 0 on success or negative error code on fail
1539  */
1540 int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
1541                         ipu_buffer_t type, uint32_t bufNum)
1542 {
1543         uint32_t dma_chan = channel_2_dma(channel, type);
1544         unsigned long lock_flags;
1545
1546         if (dma_chan == IDMA_CHAN_INVALID)
1547                 return -EINVAL;
1548
1549         /* Mark buffer to be ready. */
1550         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1551         if (bufNum == 0)
1552                 ipu_cm_write(ipu, idma_mask(dma_chan),
1553                              IPU_CHA_BUF0_RDY(dma_chan));
1554         else if (bufNum == 1)
1555                 ipu_cm_write(ipu, idma_mask(dma_chan),
1556                              IPU_CHA_BUF1_RDY(dma_chan));
1557         else
1558                 ipu_cm_write(ipu, idma_mask(dma_chan),
1559                              IPU_CHA_BUF2_RDY(dma_chan));
1560         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1561         return 0;
1562 }
1563 EXPORT_SYMBOL(ipu_select_buffer);
1564
1565 /*!
1566  * This function is called to set a channel's buffer as ready.
1567  *
1568  * @param       ipu             ipu handler
1569  * @param       bufNum          Input parameter for which buffer number set to
1570  *                              ready state.
1571  *
1572  * @return      Returns 0 on success or negative error code on fail
1573  */
1574 int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
1575 {
1576
1577         uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
1578         uint32_t mask_bit =
1579                 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
1580                 idma_mask(dma_chan)|
1581                 idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
1582         unsigned long lock_flags;
1583
1584         /* Mark buffers to be ready. */
1585         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1586         if (bufNum == 0)
1587                 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
1588         else
1589                 ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
1590         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1591         return 0;
1592 }
1593 EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
1594
1595 #define NA      -1
1596 static int proc_dest_sel[] = {
1597         0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
1598         0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
1599 static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
1600   NA, NA, NA, NA, NA,  1,  2,  3,  4,  7,  8, NA, 8, NA };
1601 static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
1602   NA, NA, NA, NA, NA,  1, NA,  2, NA,  3,  4,  4,  4,  4 };
1603
1604
1605 /*!
1606  * This function links 2 channels together for automatic frame
1607  * synchronization. The output of the source channel is linked to the input of
1608  * the destination channel.
1609  *
1610  * @param       ipu             ipu handler
1611  * @param       src_ch          Input parameter for the logical channel ID of
1612  *                              the source channel.
1613  *
1614  * @param       dest_ch         Input parameter for the logical channel ID of
1615  *                              the destination channel.
1616  *
1617  * @return      This function returns 0 on success or negative error code on
1618  *              fail.
1619  */
1620 int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1621 {
1622         int retval = 0;
1623         unsigned long lock_flags;
1624         uint32_t fs_proc_flow1;
1625         uint32_t fs_proc_flow2;
1626         uint32_t fs_proc_flow3;
1627         uint32_t fs_disp_flow1;
1628
1629         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1630
1631         fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1632         fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1633         fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1634         fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1635
1636         switch (src_ch) {
1637         case CSI_MEM0:
1638                 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1639                 fs_proc_flow3 |=
1640                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1641                         FS_SMFC0_DEST_SEL_OFFSET;
1642                 break;
1643         case CSI_MEM1:
1644                 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1645                 fs_proc_flow3 |=
1646                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1647                         FS_SMFC1_DEST_SEL_OFFSET;
1648                 break;
1649         case CSI_MEM2:
1650                 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1651                 fs_proc_flow3 |=
1652                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1653                         FS_SMFC2_DEST_SEL_OFFSET;
1654                 break;
1655         case CSI_MEM3:
1656                 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1657                 fs_proc_flow3 |=
1658                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1659                         FS_SMFC3_DEST_SEL_OFFSET;
1660                 break;
1661         case CSI_PRP_ENC_MEM:
1662                 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1663                 fs_proc_flow2 |=
1664                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1665                         FS_PRPENC_DEST_SEL_OFFSET;
1666                 break;
1667         case CSI_PRP_VF_MEM:
1668                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1669                 fs_proc_flow2 |=
1670                         proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1671                         FS_PRPVF_DEST_SEL_OFFSET;
1672                 break;
1673         case MEM_PP_MEM:
1674                 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1675                 fs_proc_flow2 |=
1676                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1677                     FS_PP_DEST_SEL_OFFSET;
1678                 break;
1679         case MEM_ROT_PP_MEM:
1680                 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1681                 fs_proc_flow2 |=
1682                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1683                     FS_PP_ROT_DEST_SEL_OFFSET;
1684                 break;
1685         case MEM_PRP_ENC_MEM:
1686                 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1687                 fs_proc_flow2 |=
1688                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1689                     FS_PRPENC_DEST_SEL_OFFSET;
1690                 break;
1691         case MEM_ROT_ENC_MEM:
1692                 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1693                 fs_proc_flow2 |=
1694                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1695                     FS_PRPENC_ROT_DEST_SEL_OFFSET;
1696                 break;
1697         case MEM_PRP_VF_MEM:
1698                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1699                 fs_proc_flow2 |=
1700                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1701                     FS_PRPVF_DEST_SEL_OFFSET;
1702                 break;
1703         case MEM_VDI_PRP_VF_MEM:
1704                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1705                 fs_proc_flow2 |=
1706                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1707                     FS_PRPVF_DEST_SEL_OFFSET;
1708                 break;
1709         case MEM_ROT_VF_MEM:
1710                 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1711                 fs_proc_flow2 |=
1712                     proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
1713                     FS_PRPVF_ROT_DEST_SEL_OFFSET;
1714                 break;
1715         default:
1716                 retval = -EINVAL;
1717                 goto err;
1718         }
1719
1720         switch (dest_ch) {
1721         case MEM_PP_MEM:
1722                 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1723                 fs_proc_flow1 |=
1724                     proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PP_SRC_SEL_OFFSET;
1725                 break;
1726         case MEM_ROT_PP_MEM:
1727                 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1728                 fs_proc_flow1 |=
1729                     proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1730                     FS_PP_ROT_SRC_SEL_OFFSET;
1731                 break;
1732         case MEM_PRP_ENC_MEM:
1733                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1734                 fs_proc_flow1 |=
1735                     proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1736                 break;
1737         case MEM_ROT_ENC_MEM:
1738                 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1739                 fs_proc_flow1 |=
1740                     proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1741                     FS_PRPENC_ROT_SRC_SEL_OFFSET;
1742                 break;
1743         case MEM_PRP_VF_MEM:
1744                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1745                 fs_proc_flow1 |=
1746                     proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1747                 break;
1748         case MEM_VDI_PRP_VF_MEM:
1749                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1750                 fs_proc_flow1 |=
1751                     proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
1752                 break;
1753         case MEM_ROT_VF_MEM:
1754                 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1755                 fs_proc_flow1 |=
1756                     proc_src_sel[IPU_CHAN_ID(src_ch)] <<
1757                     FS_PRPVF_ROT_SRC_SEL_OFFSET;
1758                 break;
1759         case MEM_DC_SYNC:
1760                 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1761                 fs_disp_flow1 |=
1762                     disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
1763                 break;
1764         case MEM_BG_SYNC:
1765                 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1766                 fs_disp_flow1 |=
1767                     disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1768                     FS_DP_SYNC0_SRC_SEL_OFFSET;
1769                 break;
1770         case MEM_FG_SYNC:
1771                 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1772                 fs_disp_flow1 |=
1773                     disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1774                     FS_DP_SYNC1_SRC_SEL_OFFSET;
1775                 break;
1776         case MEM_DC_ASYNC:
1777                 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1778                 fs_disp_flow1 |=
1779                     disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
1780                 break;
1781         case MEM_BG_ASYNC0:
1782                 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1783                 fs_disp_flow1 |=
1784                     disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1785                     FS_DP_ASYNC0_SRC_SEL_OFFSET;
1786                 break;
1787         case MEM_FG_ASYNC0:
1788                 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1789                 fs_disp_flow1 |=
1790                     disp_src_sel[IPU_CHAN_ID(src_ch)] <<
1791                     FS_DP_ASYNC1_SRC_SEL_OFFSET;
1792                 break;
1793         default:
1794                 retval = -EINVAL;
1795                 goto err;
1796         }
1797
1798         ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1799         ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1800         ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1801         ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1802
1803 err:
1804         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1805         return retval;
1806 }
1807 EXPORT_SYMBOL(ipu_link_channels);
1808
1809 /*!
1810  * This function unlinks 2 channels and disables automatic frame
1811  * synchronization.
1812  *
1813  * @param       ipu             ipu handler
1814  * @param       src_ch          Input parameter for the logical channel ID of
1815  *                              the source channel.
1816  *
1817  * @param       dest_ch         Input parameter for the logical channel ID of
1818  *                              the destination channel.
1819  *
1820  * @return      This function returns 0 on success or negative error code on
1821  *              fail.
1822  */
1823 int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
1824 {
1825         int retval = 0;
1826         unsigned long lock_flags;
1827         uint32_t fs_proc_flow1;
1828         uint32_t fs_proc_flow2;
1829         uint32_t fs_proc_flow3;
1830         uint32_t fs_disp_flow1;
1831
1832         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1833
1834         fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
1835         fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
1836         fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
1837         fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
1838
1839         switch (src_ch) {
1840         case CSI_MEM0:
1841                 fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
1842                 break;
1843         case CSI_MEM1:
1844                 fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
1845                 break;
1846         case CSI_MEM2:
1847                 fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
1848                 break;
1849         case CSI_MEM3:
1850                 fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
1851                 break;
1852         case CSI_PRP_ENC_MEM:
1853                 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1854                 break;
1855         case CSI_PRP_VF_MEM:
1856                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1857                 break;
1858         case MEM_PP_MEM:
1859                 fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
1860                 break;
1861         case MEM_ROT_PP_MEM:
1862                 fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
1863                 break;
1864         case MEM_PRP_ENC_MEM:
1865                 fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
1866                 break;
1867         case MEM_ROT_ENC_MEM:
1868                 fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
1869                 break;
1870         case MEM_PRP_VF_MEM:
1871                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1872                 break;
1873         case MEM_VDI_PRP_VF_MEM:
1874                 fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
1875                 break;
1876         case MEM_ROT_VF_MEM:
1877                 fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
1878                 break;
1879         default:
1880                 retval = -EINVAL;
1881                 goto err;
1882         }
1883
1884         switch (dest_ch) {
1885         case MEM_PP_MEM:
1886                 fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
1887                 break;
1888         case MEM_ROT_PP_MEM:
1889                 fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
1890                 break;
1891         case MEM_PRP_ENC_MEM:
1892                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1893                 break;
1894         case MEM_ROT_ENC_MEM:
1895                 fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
1896                 break;
1897         case MEM_PRP_VF_MEM:
1898                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1899                 break;
1900         case MEM_VDI_PRP_VF_MEM:
1901                 fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
1902                 break;
1903         case MEM_ROT_VF_MEM:
1904                 fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
1905                 break;
1906         case MEM_DC_SYNC:
1907                 fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
1908                 break;
1909         case MEM_BG_SYNC:
1910                 fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
1911                 break;
1912         case MEM_FG_SYNC:
1913                 fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
1914                 break;
1915         case MEM_DC_ASYNC:
1916                 fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
1917                 break;
1918         case MEM_BG_ASYNC0:
1919                 fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
1920                 break;
1921         case MEM_FG_ASYNC0:
1922                 fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
1923                 break;
1924         default:
1925                 retval = -EINVAL;
1926                 goto err;
1927         }
1928
1929         ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
1930         ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
1931         ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
1932         ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
1933
1934 err:
1935         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1936         return retval;
1937 }
1938 EXPORT_SYMBOL(ipu_unlink_channels);
1939
1940 /*!
1941  * This function check whether a logical channel was enabled.
1942  *
1943  * @param       ipu             ipu handler
1944  * @param       channel         Input parameter for the logical channel ID.
1945  *
1946  * @return      This function returns 1 while request channel is enabled or
1947  *              0 for not enabled.
1948  */
1949 int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
1950 {
1951         uint32_t reg;
1952         uint32_t in_dma;
1953         uint32_t out_dma;
1954
1955         out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1956         in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1957
1958         reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
1959         if (reg & idma_mask(in_dma))
1960                 return 1;
1961         reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
1962         if (reg & idma_mask(out_dma))
1963                 return 1;
1964         return 0;
1965 }
1966 EXPORT_SYMBOL(ipu_is_channel_busy);
1967
1968 /*!
1969  * This function enables a logical channel.
1970  *
1971  * @param       ipu             ipu handler
1972  * @param       channel         Input parameter for the logical channel ID.
1973  *
1974  * @return      This function returns 0 on success or negative error code on
1975  *              fail.
1976  */
1977 int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
1978 {
1979         uint32_t reg;
1980         unsigned long lock_flags;
1981         uint32_t ipu_conf;
1982         uint32_t in_dma;
1983         uint32_t out_dma;
1984         uint32_t sec_dma;
1985         uint32_t thrd_dma;
1986
1987         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
1988
1989         if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
1990                 dev_err(ipu->dev, "Warning: channel already enabled %d\n",
1991                         IPU_CHAN_ID(channel));
1992                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
1993                 return -EACCES;
1994         }
1995
1996         /* Get input and output dma channels */
1997         out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
1998         in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
1999
2000         ipu_conf = ipu_cm_read(ipu, IPU_CONF);
2001         if (ipu->di_use_count[0] > 0) {
2002                 ipu_conf |= IPU_CONF_DI0_EN;
2003         }
2004         if (ipu->di_use_count[1] > 0) {
2005                 ipu_conf |= IPU_CONF_DI1_EN;
2006         }
2007         if (ipu->dp_use_count > 0)
2008                 ipu_conf |= IPU_CONF_DP_EN;
2009         if (ipu->dc_use_count > 0)
2010                 ipu_conf |= IPU_CONF_DC_EN;
2011         if (ipu->dmfc_use_count > 0)
2012                 ipu_conf |= IPU_CONF_DMFC_EN;
2013         if (ipu->ic_use_count > 0)
2014                 ipu_conf |= IPU_CONF_IC_EN;
2015         if (ipu->vdi_use_count > 0) {
2016                 ipu_conf |= IPU_CONF_ISP_EN;
2017                 ipu_conf |= IPU_CONF_VDI_EN;
2018                 ipu_conf |= IPU_CONF_IC_INPUT;
2019         }
2020         if (ipu->rot_use_count > 0)
2021                 ipu_conf |= IPU_CONF_ROT_EN;
2022         if (ipu->smfc_use_count > 0)
2023                 ipu_conf |= IPU_CONF_SMFC_EN;
2024         ipu_cm_write(ipu, ipu_conf, IPU_CONF);
2025
2026         if (idma_is_valid(in_dma)) {
2027                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2028                 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2029         }
2030         if (idma_is_valid(out_dma)) {
2031                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2032                 ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2033         }
2034
2035         if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
2036                 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
2037                  (channel == MEM_VDI_PRP_VF_MEM))) {
2038                 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2039                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2040                 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2041         }
2042         if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2043                 ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
2044                 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2045                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2046                 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2047
2048                 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2049                 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2050                 ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2051         } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
2052                    ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
2053                 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2054                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2055                 ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2056                 reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2057                 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
2058         }
2059
2060         if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
2061             (channel == MEM_FG_SYNC)) {
2062                 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2063                 ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2064
2065                 _ipu_dp_dc_enable(ipu, channel);
2066         }
2067
2068         if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2069                 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2070                 _ipu_ic_enable_task(ipu, channel);
2071
2072         ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
2073
2074         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2075
2076         return 0;
2077 }
2078 EXPORT_SYMBOL(ipu_enable_channel);
2079
2080 /*!
2081  * This function check buffer ready for a logical channel.
2082  *
2083  * @param       ipu             ipu handler
2084  * @param       channel         Input parameter for the logical channel ID.
2085  *
2086  * @param       type            Input parameter which buffer to clear.
2087  *
2088  * @param       bufNum          Input parameter for which buffer number clear
2089  *                              ready state.
2090  *
2091  */
2092 int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2093                 uint32_t bufNum)
2094 {
2095         uint32_t dma_chan = channel_2_dma(channel, type);
2096         uint32_t reg;
2097
2098         if (dma_chan == IDMA_CHAN_INVALID)
2099                 return -EINVAL;
2100
2101         if (bufNum == 0)
2102                 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
2103         else if (bufNum == 1)
2104                 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
2105         else
2106                 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
2107
2108         if (reg & idma_mask(dma_chan))
2109                 return 1;
2110         else
2111                 return 0;
2112 }
2113 EXPORT_SYMBOL(ipu_check_buffer_ready);
2114
2115 /*!
2116  * This function clear buffer ready for a logical channel.
2117  *
2118  * @param       ipu             ipu handler
2119  * @param       channel         Input parameter for the logical channel ID.
2120  *
2121  * @param       type            Input parameter which buffer to clear.
2122  *
2123  * @param       bufNum          Input parameter for which buffer number clear
2124  *                              ready state.
2125  *
2126  */
2127 void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
2128                 uint32_t bufNum)
2129 {
2130         unsigned long lock_flags;
2131         uint32_t dma_ch = channel_2_dma(channel, type);
2132
2133         if (!idma_is_valid(dma_ch))
2134                 return;
2135
2136         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2137         ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
2138         if (bufNum == 0)
2139                 ipu_cm_write(ipu, idma_mask(dma_ch),
2140                                 IPU_CHA_BUF0_RDY(dma_ch));
2141         else if (bufNum == 1)
2142                 ipu_cm_write(ipu, idma_mask(dma_ch),
2143                                 IPU_CHA_BUF1_RDY(dma_ch));
2144         else
2145                 ipu_cm_write(ipu, idma_mask(dma_ch),
2146                                 IPU_CHA_BUF2_RDY(dma_ch));
2147         ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
2148         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2149 }
2150 EXPORT_SYMBOL(ipu_clear_buffer_ready);
2151
2152 static irqreturn_t disable_chan_irq_handler(int irq, void *dev_id)
2153 {
2154         struct completion *comp = dev_id;
2155
2156         complete(comp);
2157         return IRQ_HANDLED;
2158 }
2159
2160 /*!
2161  * This function disables a logical channel.
2162  *
2163  * @param       ipu             ipu handler
2164  * @param       channel         Input parameter for the logical channel ID.
2165  *
2166  * @param       wait_for_stop   Flag to set whether to wait for channel end
2167  *                              of frame or return immediately.
2168  *
2169  * @return      This function returns 0 on success or negative error code on
2170  *              fail.
2171  */
2172 int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
2173 {
2174         uint32_t reg;
2175         unsigned long lock_flags;
2176         uint32_t in_dma;
2177         uint32_t out_dma;
2178         uint32_t sec_dma = NO_DMA;
2179         uint32_t thrd_dma = NO_DMA;
2180         uint16_t fg_pos_x, fg_pos_y;
2181
2182         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2183
2184         if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
2185                 dev_err(ipu->dev, "Channel already disabled %d\n",
2186                         IPU_CHAN_ID(channel));
2187                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2188                 return -EACCES;
2189         }
2190
2191         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2192
2193         /* Get input and output dma channels */
2194         out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
2195         in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
2196
2197         if ((idma_is_valid(in_dma) &&
2198                 !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
2199                 && (idma_is_valid(out_dma) &&
2200                 !idma_is_set(ipu, IDMAC_CHA_EN, out_dma)))
2201                 return -EINVAL;
2202
2203         if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
2204                 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2205         if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
2206                 sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
2207                 thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
2208         }
2209
2210         if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2211             (channel == MEM_DC_SYNC)) {
2212                 if (channel == MEM_FG_SYNC) {
2213                         ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
2214                         ipu_disp_set_window_pos(ipu, channel, 0, 0);
2215                 }
2216
2217                 _ipu_dp_dc_disable(ipu, channel, false);
2218
2219                 /*
2220                  * wait for BG channel EOF then disable FG-IDMAC,
2221                  * it avoid FG NFB4EOF error.
2222                  */
2223                 if (channel == MEM_FG_SYNC) {
2224                         int timeout = 50;
2225
2226                         ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
2227                                         IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
2228                         while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
2229                                                 IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
2230                                 msleep(10);
2231                                 timeout -= 10;
2232                                 if (timeout <= 0) {
2233                                         dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
2234                                         break;
2235                                 }
2236                         }
2237                 }
2238         } else if (wait_for_stop) {
2239                 while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
2240                        idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
2241                         (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2242                         idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
2243                         (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2244                         idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
2245                         uint32_t ret, irq = 0xffffffff;
2246                         DECLARE_COMPLETION_ONSTACK(disable_comp);
2247
2248                         if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
2249                                 irq = out_dma;
2250                         if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
2251                                 idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
2252                                 irq = sec_dma;
2253                         if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
2254                                 idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
2255                                 irq = thrd_dma;
2256                         if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
2257                                 irq = in_dma;
2258
2259                         if (irq == 0xffffffff) {
2260                                 dev_err(ipu->dev, "warning: no channel busy, break\n");
2261                                 break;
2262                         }
2263
2264                         dev_err(ipu->dev, "warning: channel %d busy, need wait\n", irq);
2265
2266                         ret = ipu_request_irq(ipu, irq, disable_chan_irq_handler, 0, NULL, &disable_comp);
2267                         if (ret < 0) {
2268                                 dev_err(ipu->dev, "irq %d in use\n", irq);
2269                                 break;
2270                         } else {
2271                                 ret = wait_for_completion_timeout(&disable_comp, msecs_to_jiffies(200));
2272                                 ipu_free_irq(ipu, irq, &disable_comp);
2273                                 if (ret == 0) {
2274                                         ipu_dump_registers(ipu);
2275                                         dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
2276                                         break;
2277                                 }
2278                         }
2279                 }
2280         }
2281
2282         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2283
2284         if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
2285             (channel == MEM_DC_SYNC)) {
2286                 reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
2287                 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
2288         }
2289
2290         /* Disable IC task */
2291         if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
2292                 _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma))
2293                 _ipu_ic_disable_task(ipu, channel);
2294
2295         /* Disable DMA channel(s) */
2296         if (idma_is_valid(in_dma)) {
2297                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
2298                 ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
2299                 ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
2300                 ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
2301                                         IPU_CHA_TRIPLE_CUR_BUF(in_dma));
2302         }
2303         if (idma_is_valid(out_dma)) {
2304                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
2305                 ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
2306                 ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
2307                 ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
2308                                         IPU_CHA_TRIPLE_CUR_BUF(out_dma));
2309         }
2310         if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2311                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
2312                 ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
2313                 ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
2314         }
2315         if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2316                 reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
2317                 ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
2318                 if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
2319                         reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2320                         ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
2321                 } else {
2322                         reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
2323                         ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
2324                 }
2325                 ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
2326         }
2327
2328         ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
2329
2330         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2331
2332         /* Set channel buffers NOT to be ready */
2333         if (idma_is_valid(in_dma)) {
2334                 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
2335                 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
2336                 ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
2337         }
2338         if (idma_is_valid(out_dma)) {
2339                 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
2340                 ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
2341         }
2342         if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
2343                 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
2344                 ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
2345         }
2346         if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
2347                 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
2348                 ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
2349         }
2350
2351         if (channel == MEM_FG_SYNC)
2352                 ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
2353
2354         return 0;
2355 }
2356 EXPORT_SYMBOL(ipu_disable_channel);
2357
2358 /*!
2359  * This function enables CSI.
2360  *
2361  * @param       ipu             ipu handler
2362  * @param       csi     csi num 0 or 1
2363  *
2364  * @return      This function returns 0 on success or negative error code on
2365  *              fail.
2366  */
2367 int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
2368 {
2369         uint32_t reg;
2370         unsigned long lock_flags;
2371
2372         if (csi > 1) {
2373                 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2374                 return -EINVAL;
2375         }
2376
2377         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2378         ipu->csi_use_count[csi]++;
2379
2380         if (ipu->csi_use_count[csi] == 1) {
2381                 reg = ipu_cm_read(ipu, IPU_CONF);
2382                 if (csi == 0)
2383                         ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
2384                 else
2385                         ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
2386         }
2387         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2388         return 0;
2389 }
2390 EXPORT_SYMBOL(ipu_enable_csi);
2391
2392 /*!
2393  * This function disables CSI.
2394  *
2395  * @param       ipu             ipu handler
2396  * @param       csi     csi num 0 or 1
2397  *
2398  * @return      This function returns 0 on success or negative error code on
2399  *              fail.
2400  */
2401 int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
2402 {
2403         uint32_t reg;
2404         unsigned long lock_flags;
2405
2406         if (csi > 1) {
2407                 dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
2408                 return -EINVAL;
2409         }
2410
2411         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2412         ipu->csi_use_count[csi]--;
2413
2414         if (ipu->csi_use_count[csi] == 0) {
2415                 reg = ipu_cm_read(ipu, IPU_CONF);
2416                 if (csi == 0)
2417                         ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
2418                 else
2419                         ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
2420         }
2421         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2422         return 0;
2423 }
2424 EXPORT_SYMBOL(ipu_disable_csi);
2425
2426 static irqreturn_t ipu_irq_handler(int irq, void *desc)
2427 {
2428         struct ipu_soc *ipu = desc;
2429         int i;
2430         uint32_t line;
2431         irqreturn_t result = IRQ_NONE;
2432         uint32_t int_stat;
2433         const int err_reg[] = { 5, 6, 9, 10, 0 };
2434         const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
2435
2436         for (i = 0;; i++) {
2437                 if (err_reg[i] == 0)
2438                         break;
2439                 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
2440                 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
2441                 if (int_stat) {
2442                         ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
2443                         dev_err(ipu->dev,
2444                                 "IPU Error - IPU_INT_STAT_%d = 0x%08X\n",
2445                                 err_reg[i], int_stat);
2446                         /* Disable interrupts so we only get error once */
2447                         int_stat =
2448                             ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) & ~int_stat;
2449                         ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
2450                 }
2451         }
2452
2453         for (i = 0;; i++) {
2454                 if (int_reg[i] == 0)
2455                         break;
2456                 int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
2457                 int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
2458                 ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
2459                 while ((line = ffs(int_stat)) != 0) {
2460                         line--;
2461                         int_stat &= ~(1UL << line);
2462                         line += (int_reg[i] - 1) * 32;
2463                         result |=
2464                             ipu->irq_list[line].handler(line,
2465                                                        ipu->irq_list[line].
2466                                                        dev_id);
2467                 }
2468         }
2469
2470         return result;
2471 }
2472
2473 /*!
2474  * This function enables the interrupt for the specified interrupt line.
2475  * The interrupt lines are defined in \b ipu_irq_line enum.
2476  *
2477  * @param       ipu             ipu handler
2478  * @param       irq             Interrupt line to enable interrupt for.
2479  *
2480  */
2481 void ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
2482 {
2483         uint32_t reg;
2484         unsigned long lock_flags;
2485
2486         if (!ipu->clk_enabled)
2487                 clk_enable(ipu->ipu_clk);
2488
2489         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2490
2491         reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2492         reg |= IPUIRQ_2_MASK(irq);
2493         ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2494
2495         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2496         if (!ipu->clk_enabled)
2497                 clk_disable(ipu->ipu_clk);
2498 }
2499 EXPORT_SYMBOL(ipu_enable_irq);
2500
2501 /*!
2502  * This function disables the interrupt for the specified interrupt line.
2503  * The interrupt lines are defined in \b ipu_irq_line enum.
2504  *
2505  * @param       ipu             ipu handler
2506  * @param       irq             Interrupt line to disable interrupt for.
2507  *
2508  */
2509 void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
2510 {
2511         uint32_t reg;
2512         unsigned long lock_flags;
2513
2514         if (!ipu->clk_enabled)
2515                 clk_enable(ipu->ipu_clk);
2516
2517         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2518
2519         reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
2520         reg &= ~IPUIRQ_2_MASK(irq);
2521         ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
2522
2523         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2524         if (!ipu->clk_enabled)
2525                 clk_disable(ipu->ipu_clk);
2526 }
2527 EXPORT_SYMBOL(ipu_disable_irq);
2528
2529 /*!
2530  * This function clears the interrupt for the specified interrupt line.
2531  * The interrupt lines are defined in \b ipu_irq_line enum.
2532  *
2533  * @param       ipu             ipu handler
2534  * @param       irq             Interrupt line to clear interrupt for.
2535  *
2536  */
2537 void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
2538 {
2539         if (!ipu->clk_enabled)
2540                 clk_enable(ipu->ipu_clk);
2541
2542         ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
2543
2544         if (!ipu->clk_enabled)
2545                 clk_disable(ipu->ipu_clk);
2546 }
2547 EXPORT_SYMBOL(ipu_clear_irq);
2548
2549 /*!
2550  * This function returns the current interrupt status for the specified
2551  * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2552  *
2553  * @param       ipu             ipu handler
2554  * @param       irq             Interrupt line to get status for.
2555  *
2556  * @return      Returns true if the interrupt is pending/asserted or false if
2557  *              the interrupt is not pending.
2558  */
2559 bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
2560 {
2561         uint32_t reg;
2562
2563         if (!ipu->clk_enabled)
2564                 clk_enable(ipu->ipu_clk);
2565
2566         reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
2567
2568         if (!ipu->clk_enabled)
2569                 clk_disable(ipu->ipu_clk);
2570
2571         if (reg & IPUIRQ_2_MASK(irq))
2572                 return true;
2573         else
2574                 return false;
2575 }
2576 EXPORT_SYMBOL(ipu_get_irq_status);
2577
2578 /*!
2579  * This function registers an interrupt handler function for the specified
2580  * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
2581  *
2582  * @param       ipu             ipu handler
2583  * @param       irq             Interrupt line to get status for.
2584  *
2585  * @param       handler         Input parameter for address of the handler
2586  *                              function.
2587  *
2588  * @param       irq_flags       Flags for interrupt mode. Currently not used.
2589  *
2590  * @param       devname         Input parameter for string name of driver
2591  *                              registering the handler.
2592  *
2593  * @param       dev_id          Input parameter for pointer of data to be
2594  *                              passed to the handler.
2595  *
2596  * @return      This function returns 0 on success or negative error code on
2597  *              fail.
2598  */
2599 int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
2600                     irqreturn_t(*handler) (int, void *),
2601                     uint32_t irq_flags, const char *devname, void *dev_id)
2602 {
2603         unsigned long lock_flags;
2604
2605         BUG_ON(irq >= IPU_IRQ_COUNT);
2606
2607         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2608
2609         if (ipu->irq_list[irq].handler != NULL) {
2610                 dev_err(ipu->dev,
2611                         "handler already installed on irq %d\n", irq);
2612                 spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2613                 return -EINVAL;
2614         }
2615
2616         ipu->irq_list[irq].handler = handler;
2617         ipu->irq_list[irq].flags = irq_flags;
2618         ipu->irq_list[irq].dev_id = dev_id;
2619         ipu->irq_list[irq].name = devname;
2620
2621         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2622
2623         ipu_enable_irq(ipu, irq);       /* enable the interrupt */
2624
2625         return 0;
2626 }
2627 EXPORT_SYMBOL(ipu_request_irq);
2628
2629 /*!
2630  * This function unregisters an interrupt handler for the specified interrupt
2631  * line. The interrupt lines are defined in \b ipu_irq_line enum.
2632  *
2633  * @param       ipu             ipu handler
2634  * @param       irq             Interrupt line to get status for.
2635  *
2636  * @param       dev_id          Input parameter for pointer of data to be passed
2637  *                              to the handler. This must match value passed to
2638  *                              ipu_request_irq().
2639  *
2640  */
2641 void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
2642 {
2643         ipu_disable_irq(ipu, irq);      /* disable the interrupt */
2644
2645         if (ipu->irq_list[irq].dev_id == dev_id)
2646                 ipu->irq_list[irq].handler = NULL;
2647 }
2648 EXPORT_SYMBOL(ipu_free_irq);
2649
2650 uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
2651 {
2652         uint32_t reg, dma_chan;
2653
2654         dma_chan = channel_2_dma(channel, type);
2655         if (!idma_is_valid(dma_chan))
2656                 return -EINVAL;
2657
2658         reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
2659         if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
2660                 reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
2661                 return (reg & tri_cur_buf_mask(dma_chan)) >>
2662                                 tri_cur_buf_shift(dma_chan);
2663         } else {
2664                 reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
2665                 if (reg & idma_mask(dma_chan))
2666                         return 1;
2667                 else
2668                         return 0;
2669         }
2670 }
2671 EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
2672
2673 uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
2674 {
2675         uint32_t stat = 0;
2676         uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
2677
2678         switch (channel) {
2679         case MEM_PRP_VF_MEM:
2680                 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2681                 break;
2682         case MEM_VDI_PRP_VF_MEM:
2683                 stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
2684                 break;
2685         case MEM_ROT_VF_MEM:
2686                 stat =
2687                     (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
2688                 break;
2689         case MEM_PRP_ENC_MEM:
2690                 stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
2691                 break;
2692         case MEM_ROT_ENC_MEM:
2693                 stat =
2694                     (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
2695                     TSTAT_ENC_ROT_OFFSET;
2696                 break;
2697         case MEM_PP_MEM:
2698                 stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
2699                 break;
2700         case MEM_ROT_PP_MEM:
2701                 stat =
2702                     (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
2703                 break;
2704
2705         default:
2706                 stat = TASK_STAT_IDLE;
2707                 break;
2708         }
2709         return stat;
2710 }
2711
2712 int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
2713 {
2714         uint32_t reg;
2715         unsigned long lock_flags;
2716
2717         int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
2718         int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
2719
2720         /* enable target channel */
2721         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2722
2723         reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
2724         ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
2725
2726         ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
2727
2728         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2729
2730         /* switch dp dc */
2731         _ipu_dp_dc_disable(ipu, from_ch, true);
2732
2733         /* disable source channel */
2734         spin_lock_irqsave(&ipu->ipu_lock, lock_flags);
2735
2736         reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
2737         ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
2738         ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
2739         ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
2740                                 IPU_CHA_TRIPLE_CUR_BUF(from_dma));
2741
2742         ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
2743
2744         spin_unlock_irqrestore(&ipu->ipu_lock, lock_flags);
2745
2746         ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
2747         ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
2748         ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
2749
2750         return 0;
2751 }
2752 EXPORT_SYMBOL(ipu_swap_channel);
2753
2754 uint32_t bytes_per_pixel(uint32_t fmt)
2755 {
2756         switch (fmt) {
2757         case IPU_PIX_FMT_GENERIC:       /*generic data */
2758         case IPU_PIX_FMT_RGB332:
2759         case IPU_PIX_FMT_YUV420P:
2760         case IPU_PIX_FMT_YVU420P:
2761         case IPU_PIX_FMT_YUV422P:
2762                 return 1;
2763                 break;
2764         case IPU_PIX_FMT_RGB565:
2765         case IPU_PIX_FMT_YUYV:
2766         case IPU_PIX_FMT_UYVY:
2767                 return 2;
2768                 break;
2769         case IPU_PIX_FMT_BGR24:
2770         case IPU_PIX_FMT_RGB24:
2771                 return 3;
2772                 break;
2773         case IPU_PIX_FMT_GENERIC_32:    /*generic data */
2774         case IPU_PIX_FMT_BGR32:
2775         case IPU_PIX_FMT_BGRA32:
2776         case IPU_PIX_FMT_RGB32:
2777         case IPU_PIX_FMT_RGBA32:
2778         case IPU_PIX_FMT_ABGR32:
2779                 return 4;
2780                 break;
2781         default:
2782                 return 1;
2783                 break;
2784         }
2785         return 0;
2786 }
2787 EXPORT_SYMBOL(bytes_per_pixel);
2788
2789 ipu_color_space_t format_to_colorspace(uint32_t fmt)
2790 {
2791         switch (fmt) {
2792         case IPU_PIX_FMT_RGB666:
2793         case IPU_PIX_FMT_RGB565:
2794         case IPU_PIX_FMT_BGR24:
2795         case IPU_PIX_FMT_RGB24:
2796         case IPU_PIX_FMT_GBR24:
2797         case IPU_PIX_FMT_BGR32:
2798         case IPU_PIX_FMT_BGRA32:
2799         case IPU_PIX_FMT_RGB32:
2800         case IPU_PIX_FMT_RGBA32:
2801         case IPU_PIX_FMT_ABGR32:
2802         case IPU_PIX_FMT_LVDS666:
2803         case IPU_PIX_FMT_LVDS888:
2804                 return RGB;
2805                 break;
2806
2807         default:
2808                 return YCbCr;
2809                 break;
2810         }
2811         return RGB;
2812 }
2813
2814 bool ipu_pixel_format_has_alpha(uint32_t fmt)
2815 {
2816         switch (fmt) {
2817         case IPU_PIX_FMT_RGBA32:
2818         case IPU_PIX_FMT_BGRA32:
2819         case IPU_PIX_FMT_ABGR32:
2820                 return true;
2821                 break;
2822         default:
2823                 return false;
2824                 break;
2825         }
2826         return false;
2827 }
2828
2829 void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
2830 {
2831         _ipu_dp_set_csc_coefficients(ipu, channel, param);
2832 }
2833 EXPORT_SYMBOL(ipu_set_csc_coefficients);
2834
2835 static int ipu_suspend(struct platform_device *pdev, pm_message_t state)
2836 {
2837         struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2838         struct ipu_soc *ipu = platform_get_drvdata(pdev);
2839
2840         if (ipu->ipu_use_count) {
2841                 /* save and disable enabled channels*/
2842                 ipu->idma_enable_reg[0] = ipu_idmac_read(ipu, IDMAC_CHA_EN(0));
2843                 ipu->idma_enable_reg[1] = ipu_idmac_read(ipu, IDMAC_CHA_EN(32));
2844                 while ((ipu_idmac_read(ipu, IDMAC_CHA_BUSY(0))
2845                         & ipu->idma_enable_reg[0])
2846                         || (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(32))
2847                         & ipu->idma_enable_reg[1])) {
2848                         /* disable channel not busy already */
2849                         uint32_t chan_should_disable, timeout = 1000, time = 0;
2850
2851                         chan_should_disable =
2852                                 ipu_idmac_read(ipu, IDMAC_CHA_BUSY(0))
2853                                         ^ ipu->idma_enable_reg[0];
2854                         ipu_idmac_write(ipu, (~chan_should_disable) &
2855                                         ipu->idma_enable_reg[0], IDMAC_CHA_EN(0));
2856                         chan_should_disable =
2857                                 ipu_idmac_read(ipu, IDMAC_CHA_BUSY(1))
2858                                         ^ ipu->idma_enable_reg[1];
2859                         ipu_idmac_write(ipu, (~chan_should_disable) &
2860                                         ipu->idma_enable_reg[1], IDMAC_CHA_EN(32));
2861                         msleep(2);
2862                         time += 2;
2863                         if (time >= timeout)
2864                                 return -1;
2865                 }
2866                 ipu_idmac_write(ipu, 0, IDMAC_CHA_EN(0));
2867                 ipu_idmac_write(ipu, 0, IDMAC_CHA_EN(32));
2868
2869                 /* save double buffer select regs */
2870                 ipu->cha_db_mode_reg[0] = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0));
2871                 ipu->cha_db_mode_reg[1] = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32));
2872                 ipu->cha_db_mode_reg[2] =
2873                         ipu_cm_read(ipu, IPU_ALT_CHA_DB_MODE_SEL(0));
2874                 ipu->cha_db_mode_reg[3] =
2875                         ipu_cm_read(ipu, IPU_ALT_CHA_DB_MODE_SEL(32));
2876
2877                 /* save triple buffer select regs */
2878                 ipu->cha_trb_mode_reg[0] = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0));
2879                 ipu->cha_trb_mode_reg[1] = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32));
2880
2881                 /* save idamc sub addr regs */
2882                 ipu->idma_sub_addr_reg[0] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_0);
2883                 ipu->idma_sub_addr_reg[1] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_1);
2884                 ipu->idma_sub_addr_reg[2] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_2);
2885                 ipu->idma_sub_addr_reg[3] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_3);
2886                 ipu->idma_sub_addr_reg[4] = ipu_idmac_read(ipu, IDMAC_SUB_ADDR_4);
2887
2888                 /* save sub-modules status and disable all */
2889                 ipu->ic_conf_reg = ipu_ic_read(ipu, IC_CONF);
2890                 ipu_ic_write(ipu, 0, IC_CONF);
2891                 ipu->ipu_conf_reg = ipu_cm_read(ipu, IPU_CONF);
2892                 ipu_cm_write(ipu, 0, IPU_CONF);
2893
2894                 /* save buf ready regs */
2895                 ipu->buf_ready_reg[0] = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(0));
2896                 ipu->buf_ready_reg[1] = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(32));
2897                 ipu->buf_ready_reg[2] = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(0));
2898                 ipu->buf_ready_reg[3] = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(32));
2899                 ipu->buf_ready_reg[4] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF0_RDY(0));
2900                 ipu->buf_ready_reg[5] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF0_RDY(32));
2901                 ipu->buf_ready_reg[6] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF1_RDY(0));
2902                 ipu->buf_ready_reg[7] = ipu_cm_read(ipu, IPU_ALT_CHA_BUF1_RDY(32));
2903                 ipu->buf_ready_reg[8] = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(0));
2904                 ipu->buf_ready_reg[9] = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(32));
2905         }
2906
2907         if (plat_data->pg)
2908                 plat_data->pg(1);
2909
2910         return 0;
2911 }
2912
2913 static int ipu_resume(struct platform_device *pdev)
2914 {
2915         struct imx_ipuv3_platform_data *plat_data = pdev->dev.platform_data;
2916         struct ipu_soc *ipu = platform_get_drvdata(pdev);
2917
2918         if (plat_data->pg)
2919                 plat_data->pg(0);
2920
2921         if (ipu->ipu_use_count) {
2922
2923                 /* restore buf ready regs */
2924                 ipu_cm_write(ipu, ipu->buf_ready_reg[0], IPU_CHA_BUF0_RDY(0));
2925                 ipu_cm_write(ipu, ipu->buf_ready_reg[1], IPU_CHA_BUF0_RDY(32));
2926                 ipu_cm_write(ipu, ipu->buf_ready_reg[2], IPU_CHA_BUF1_RDY(0));
2927                 ipu_cm_write(ipu, ipu->buf_ready_reg[3], IPU_CHA_BUF1_RDY(32));
2928                 ipu_cm_write(ipu, ipu->buf_ready_reg[4], IPU_ALT_CHA_BUF0_RDY(0));
2929                 ipu_cm_write(ipu, ipu->buf_ready_reg[5], IPU_ALT_CHA_BUF0_RDY(32));
2930                 ipu_cm_write(ipu, ipu->buf_ready_reg[6], IPU_ALT_CHA_BUF1_RDY(0));
2931                 ipu_cm_write(ipu, ipu->buf_ready_reg[7], IPU_ALT_CHA_BUF1_RDY(32));
2932                 ipu_cm_write(ipu, ipu->buf_ready_reg[8], IPU_CHA_BUF2_RDY(0));
2933                 ipu_cm_write(ipu, ipu->buf_ready_reg[9], IPU_CHA_BUF2_RDY(32));
2934
2935                 /* re-enable sub-modules*/
2936                 ipu_cm_write(ipu, ipu->ipu_conf_reg, IPU_CONF);
2937                 ipu_ic_write(ipu, ipu->ic_conf_reg, IC_CONF);
2938
2939                 /* restore double buffer select regs */
2940                 ipu_cm_write(ipu, ipu->cha_db_mode_reg[0], IPU_CHA_DB_MODE_SEL(0));
2941                 ipu_cm_write(ipu, ipu->cha_db_mode_reg[1], IPU_CHA_DB_MODE_SEL(32));
2942                 ipu_cm_write(ipu, ipu->cha_db_mode_reg[2],
2943                                 IPU_ALT_CHA_DB_MODE_SEL(0));
2944                 ipu_cm_write(ipu, ipu->cha_db_mode_reg[3],
2945                                 IPU_ALT_CHA_DB_MODE_SEL(32));
2946
2947                 /* restore triple buffer select regs */
2948                 ipu_cm_write(ipu, ipu->cha_trb_mode_reg[0], IPU_CHA_TRB_MODE_SEL(0));
2949                 ipu_cm_write(ipu, ipu->cha_trb_mode_reg[1], IPU_CHA_TRB_MODE_SEL(32));
2950
2951                 /* restore idamc sub addr regs */
2952                 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[0], IDMAC_SUB_ADDR_0);
2953                 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[1], IDMAC_SUB_ADDR_1);
2954                 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[2], IDMAC_SUB_ADDR_2);
2955                 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[3], IDMAC_SUB_ADDR_3);
2956                 ipu_idmac_write(ipu, ipu->idma_sub_addr_reg[4], IDMAC_SUB_ADDR_4);
2957
2958                 /* restart idma channel*/
2959                 ipu_idmac_write(ipu, ipu->idma_enable_reg[0], IDMAC_CHA_EN(0));
2960                 ipu_idmac_write(ipu, ipu->idma_enable_reg[1], IDMAC_CHA_EN(32));
2961         } else {
2962                 _ipu_get(ipu);
2963                 _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
2964                 _ipu_init_dc_mappings(ipu);
2965                 /* Set sync refresh channels as high priority */
2966                 ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
2967                 _ipu_put(ipu);
2968         }
2969
2970         return 0;
2971 }
2972
2973 /*!
2974  * This structure contains pointers to the power management callback functions.
2975  */
2976 static struct platform_driver mxcipu_driver = {
2977         .driver = {
2978                    .name = "imx-ipuv3",
2979                    },
2980         .probe = ipu_probe,
2981         .remove = ipu_remove,
2982         .suspend = ipu_suspend,
2983         .resume = ipu_resume,
2984 };
2985
2986 int32_t __init ipu_gen_init(void)
2987 {
2988         int32_t ret;
2989
2990         ret = platform_driver_register(&mxcipu_driver);
2991         return 0;
2992 }
2993
2994 subsys_initcall(ipu_gen_init);
2995
2996 static void __exit ipu_gen_uninit(void)
2997 {
2998         platform_driver_unregister(&mxcipu_driver);
2999 }
3000
3001 module_exit(ipu_gen_uninit);