2 * linux/drivers/video/omap2/dss/dsi.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #define DSS_SUBSYS_NAME "DSI"
22 #include <linux/kernel.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/semaphore.h>
31 #include <linux/seq_file.h>
32 #include <linux/platform_device.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/wait.h>
35 #include <linux/workqueue.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/debugfs.h>
39 #include <linux/pm_runtime.h>
41 #include <video/omapdss.h>
42 #include <video/mipi_display.h>
43 #include <plat/clock.h>
46 #include "dss_features.h"
48 /*#define VERBOSE_IRQ*/
49 #define DSI_CATCH_MISSING_TE
51 struct dsi_reg { u16 idx; };
53 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
55 #define DSI_SZ_REGS SZ_1K
56 /* DSI Protocol Engine */
58 #define DSI_REVISION DSI_REG(0x0000)
59 #define DSI_SYSCONFIG DSI_REG(0x0010)
60 #define DSI_SYSSTATUS DSI_REG(0x0014)
61 #define DSI_IRQSTATUS DSI_REG(0x0018)
62 #define DSI_IRQENABLE DSI_REG(0x001C)
63 #define DSI_CTRL DSI_REG(0x0040)
64 #define DSI_GNQ DSI_REG(0x0044)
65 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
66 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
67 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
68 #define DSI_CLK_CTRL DSI_REG(0x0054)
69 #define DSI_TIMING1 DSI_REG(0x0058)
70 #define DSI_TIMING2 DSI_REG(0x005C)
71 #define DSI_VM_TIMING1 DSI_REG(0x0060)
72 #define DSI_VM_TIMING2 DSI_REG(0x0064)
73 #define DSI_VM_TIMING3 DSI_REG(0x0068)
74 #define DSI_CLK_TIMING DSI_REG(0x006C)
75 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
76 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
77 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
78 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
79 #define DSI_VM_TIMING4 DSI_REG(0x0080)
80 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
81 #define DSI_VM_TIMING5 DSI_REG(0x0088)
82 #define DSI_VM_TIMING6 DSI_REG(0x008C)
83 #define DSI_VM_TIMING7 DSI_REG(0x0090)
84 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
85 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
86 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
87 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
88 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
89 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
90 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
91 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
95 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
96 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
97 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
98 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
99 #define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
101 /* DSI_PLL_CTRL_SCP */
103 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
104 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
105 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
106 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
107 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
109 #define REG_GET(dsidev, idx, start, end) \
110 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
112 #define REG_FLD_MOD(dsidev, idx, val, start, end) \
113 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
115 /* Global interrupts */
116 #define DSI_IRQ_VC0 (1 << 0)
117 #define DSI_IRQ_VC1 (1 << 1)
118 #define DSI_IRQ_VC2 (1 << 2)
119 #define DSI_IRQ_VC3 (1 << 3)
120 #define DSI_IRQ_WAKEUP (1 << 4)
121 #define DSI_IRQ_RESYNC (1 << 5)
122 #define DSI_IRQ_PLL_LOCK (1 << 7)
123 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
124 #define DSI_IRQ_PLL_RECALL (1 << 9)
125 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
126 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
127 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
128 #define DSI_IRQ_TE_TRIGGER (1 << 16)
129 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
130 #define DSI_IRQ_SYNC_LOST (1 << 18)
131 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
132 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
133 #define DSI_IRQ_ERROR_MASK \
134 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
136 #define DSI_IRQ_CHANNEL_MASK 0xf
138 /* Virtual channel interrupts */
139 #define DSI_VC_IRQ_CS (1 << 0)
140 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
141 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
142 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
143 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
144 #define DSI_VC_IRQ_BTA (1 << 5)
145 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
146 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
147 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
148 #define DSI_VC_IRQ_ERROR_MASK \
149 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
150 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
151 DSI_VC_IRQ_FIFO_TX_UDF)
153 /* ComplexIO interrupts */
154 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
155 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
156 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
157 #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
158 #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
159 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
160 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
161 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
162 #define DSI_CIO_IRQ_ERRESC4 (1 << 8)
163 #define DSI_CIO_IRQ_ERRESC5 (1 << 9)
164 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
165 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
166 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
167 #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
168 #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
169 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
170 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
171 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
172 #define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
173 #define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
174 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
175 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
176 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
177 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
178 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
179 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
180 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
181 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
182 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
183 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
184 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
185 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
186 #define DSI_CIO_IRQ_ERROR_MASK \
187 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
188 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
189 DSI_CIO_IRQ_ERRSYNCESC5 | \
190 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
191 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
192 DSI_CIO_IRQ_ERRESC5 | \
193 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
194 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
195 DSI_CIO_IRQ_ERRCONTROL5 | \
196 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
198 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
199 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
200 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
202 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
204 #define DSI_MAX_NR_ISRS 2
206 struct dsi_isr_data {
214 DSI_FIFO_SIZE_32 = 1,
215 DSI_FIFO_SIZE_64 = 2,
216 DSI_FIFO_SIZE_96 = 3,
217 DSI_FIFO_SIZE_128 = 4,
221 DSI_VC_SOURCE_L4 = 0,
228 DSI_DATA1_P = 1 << 2,
229 DSI_DATA1_N = 1 << 3,
230 DSI_DATA2_P = 1 << 4,
231 DSI_DATA2_N = 1 << 5,
232 DSI_DATA3_P = 1 << 6,
233 DSI_DATA3_N = 1 << 7,
234 DSI_DATA4_P = 1 << 8,
235 DSI_DATA4_N = 1 << 9,
238 struct dsi_update_region {
240 struct omap_dss_device *device;
243 struct dsi_irq_stats {
244 unsigned long last_reset;
246 unsigned dsi_irqs[32];
247 unsigned vc_irqs[4][32];
248 unsigned cio_irqs[32];
251 struct dsi_isr_tables {
252 struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
253 struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
254 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
258 struct platform_device *pdev;
266 int (*enable_pads)(int dsi_id, unsigned lane_mask);
267 void (*disable_pads)(int dsi_id, unsigned lane_mask);
269 struct dsi_clock_info current_cinfo;
271 bool vdds_dsi_enabled;
272 struct regulator *vdds_dsi_reg;
275 enum dsi_vc_source source;
276 struct omap_dss_device *dssdev;
277 enum fifo_size fifo_size;
282 struct semaphore bus_lock;
287 struct dsi_isr_tables isr_tables;
288 /* space for a copy used by the interrupt handler */
289 struct dsi_isr_tables isr_tables_copy;
292 struct dsi_update_region update_region;
297 void (*framedone_callback)(int, void *);
298 void *framedone_data;
300 struct delayed_work framedone_timeout_work;
302 #ifdef DSI_CATCH_MISSING_TE
303 struct timer_list te_timer;
306 unsigned long cache_req_pck;
307 unsigned long cache_clk_freq;
308 struct dsi_clock_info cache_cinfo;
311 spinlock_t errors_lock;
313 ktime_t perf_setup_time;
314 ktime_t perf_start_time;
319 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
320 spinlock_t irq_stats_lock;
321 struct dsi_irq_stats irq_stats;
323 /* DSI PLL Parameter Ranges */
324 unsigned long regm_max, regn_max;
325 unsigned long regm_dispc_max, regm_dsi_max;
326 unsigned long fint_min, fint_max;
327 unsigned long lpdiv_max;
331 unsigned scp_clk_refcount;
334 struct dsi_packet_sent_handler_data {
335 struct platform_device *dsidev;
336 struct completion *completion;
339 static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
342 static unsigned int dsi_perf;
343 module_param_named(dsi_perf, dsi_perf, bool, 0644);
346 static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
348 return dev_get_drvdata(&dsidev->dev);
351 static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
353 return dsi_pdev_map[dssdev->phy.dsi.module];
356 struct platform_device *dsi_get_dsidev_from_id(int module)
358 return dsi_pdev_map[module];
361 static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
366 static inline void dsi_write_reg(struct platform_device *dsidev,
367 const struct dsi_reg idx, u32 val)
369 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
371 __raw_writel(val, dsi->base + idx.idx);
374 static inline u32 dsi_read_reg(struct platform_device *dsidev,
375 const struct dsi_reg idx)
377 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
379 return __raw_readl(dsi->base + idx.idx);
382 void dsi_bus_lock(struct omap_dss_device *dssdev)
384 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
385 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
387 down(&dsi->bus_lock);
389 EXPORT_SYMBOL(dsi_bus_lock);
391 void dsi_bus_unlock(struct omap_dss_device *dssdev)
393 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
394 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
398 EXPORT_SYMBOL(dsi_bus_unlock);
400 static bool dsi_bus_is_locked(struct platform_device *dsidev)
402 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
404 return dsi->bus_lock.count == 0;
407 static void dsi_completion_handler(void *data, u32 mask)
409 complete((struct completion *)data);
412 static inline int wait_for_bit_change(struct platform_device *dsidev,
413 const struct dsi_reg idx, int bitnum, int value)
417 while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
425 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
428 case OMAP_DSS_DSI_FMT_RGB888:
429 case OMAP_DSS_DSI_FMT_RGB666:
431 case OMAP_DSS_DSI_FMT_RGB666_PACKED:
433 case OMAP_DSS_DSI_FMT_RGB565:
441 static void dsi_perf_mark_setup(struct platform_device *dsidev)
443 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
444 dsi->perf_setup_time = ktime_get();
447 static void dsi_perf_mark_start(struct platform_device *dsidev)
449 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
450 dsi->perf_start_time = ktime_get();
453 static void dsi_perf_show(struct platform_device *dsidev, const char *name)
455 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
456 struct omap_dss_device *dssdev = dsi->update_region.device;
457 ktime_t t, setup_time, trans_time;
459 u32 setup_us, trans_us, total_us;
466 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
467 setup_us = (u32)ktime_to_us(setup_time);
471 trans_time = ktime_sub(t, dsi->perf_start_time);
472 trans_us = (u32)ktime_to_us(trans_time);
476 total_us = setup_us + trans_us;
478 total_bytes = dsi->update_region.w *
479 dsi->update_region.h *
480 dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
482 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
483 "%u bytes, %u kbytes/sec\n",
488 1000*1000 / total_us,
490 total_bytes * 1000 / total_us);
493 static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
497 static inline void dsi_perf_mark_start(struct platform_device *dsidev)
501 static inline void dsi_perf_show(struct platform_device *dsidev,
507 static void print_irq_status(u32 status)
513 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
516 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
519 if (status & DSI_IRQ_##x) \
545 static void print_irq_status_vc(int channel, u32 status)
551 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
554 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
557 if (status & DSI_VC_IRQ_##x) \
574 static void print_irq_status_cio(u32 status)
579 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
582 if (status & DSI_CIO_IRQ_##x) \
596 PIS(ERRCONTENTIONLP0_1);
597 PIS(ERRCONTENTIONLP1_1);
598 PIS(ERRCONTENTIONLP0_2);
599 PIS(ERRCONTENTIONLP1_2);
600 PIS(ERRCONTENTIONLP0_3);
601 PIS(ERRCONTENTIONLP1_3);
602 PIS(ULPSACTIVENOT_ALL0);
603 PIS(ULPSACTIVENOT_ALL1);
609 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
610 static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
611 u32 *vcstatus, u32 ciostatus)
613 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
616 spin_lock(&dsi->irq_stats_lock);
618 dsi->irq_stats.irq_count++;
619 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
621 for (i = 0; i < 4; ++i)
622 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
624 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
626 spin_unlock(&dsi->irq_stats_lock);
629 #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
632 static int debug_irq;
634 static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
635 u32 *vcstatus, u32 ciostatus)
637 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
640 if (irqstatus & DSI_IRQ_ERROR_MASK) {
641 DSSERR("DSI error, irqstatus %x\n", irqstatus);
642 print_irq_status(irqstatus);
643 spin_lock(&dsi->errors_lock);
644 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
645 spin_unlock(&dsi->errors_lock);
646 } else if (debug_irq) {
647 print_irq_status(irqstatus);
650 for (i = 0; i < 4; ++i) {
651 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
652 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
654 print_irq_status_vc(i, vcstatus[i]);
655 } else if (debug_irq) {
656 print_irq_status_vc(i, vcstatus[i]);
660 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
661 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
662 print_irq_status_cio(ciostatus);
663 } else if (debug_irq) {
664 print_irq_status_cio(ciostatus);
668 static void dsi_call_isrs(struct dsi_isr_data *isr_array,
669 unsigned isr_array_size, u32 irqstatus)
671 struct dsi_isr_data *isr_data;
674 for (i = 0; i < isr_array_size; i++) {
675 isr_data = &isr_array[i];
676 if (isr_data->isr && isr_data->mask & irqstatus)
677 isr_data->isr(isr_data->arg, irqstatus);
681 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
682 u32 irqstatus, u32 *vcstatus, u32 ciostatus)
686 dsi_call_isrs(isr_tables->isr_table,
687 ARRAY_SIZE(isr_tables->isr_table),
690 for (i = 0; i < 4; ++i) {
691 if (vcstatus[i] == 0)
693 dsi_call_isrs(isr_tables->isr_table_vc[i],
694 ARRAY_SIZE(isr_tables->isr_table_vc[i]),
699 dsi_call_isrs(isr_tables->isr_table_cio,
700 ARRAY_SIZE(isr_tables->isr_table_cio),
704 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
706 struct platform_device *dsidev;
707 struct dsi_data *dsi;
708 u32 irqstatus, vcstatus[4], ciostatus;
711 dsidev = (struct platform_device *) arg;
712 dsi = dsi_get_dsidrv_data(dsidev);
714 spin_lock(&dsi->irq_lock);
716 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
718 /* IRQ is not for us */
720 spin_unlock(&dsi->irq_lock);
724 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
725 /* flush posted write */
726 dsi_read_reg(dsidev, DSI_IRQSTATUS);
728 for (i = 0; i < 4; ++i) {
729 if ((irqstatus & (1 << i)) == 0) {
734 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
736 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
737 /* flush posted write */
738 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
741 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
742 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
744 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
745 /* flush posted write */
746 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
751 #ifdef DSI_CATCH_MISSING_TE
752 if (irqstatus & DSI_IRQ_TE_TRIGGER)
753 del_timer(&dsi->te_timer);
756 /* make a copy and unlock, so that isrs can unregister
758 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
759 sizeof(dsi->isr_tables));
761 spin_unlock(&dsi->irq_lock);
763 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
765 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
767 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
772 /* dsi->irq_lock has to be locked by the caller */
773 static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
774 struct dsi_isr_data *isr_array,
775 unsigned isr_array_size, u32 default_mask,
776 const struct dsi_reg enable_reg,
777 const struct dsi_reg status_reg)
779 struct dsi_isr_data *isr_data;
786 for (i = 0; i < isr_array_size; i++) {
787 isr_data = &isr_array[i];
789 if (isr_data->isr == NULL)
792 mask |= isr_data->mask;
795 old_mask = dsi_read_reg(dsidev, enable_reg);
796 /* clear the irqstatus for newly enabled irqs */
797 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
798 dsi_write_reg(dsidev, enable_reg, mask);
800 /* flush posted writes */
801 dsi_read_reg(dsidev, enable_reg);
802 dsi_read_reg(dsidev, status_reg);
805 /* dsi->irq_lock has to be locked by the caller */
806 static void _omap_dsi_set_irqs(struct platform_device *dsidev)
808 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
809 u32 mask = DSI_IRQ_ERROR_MASK;
810 #ifdef DSI_CATCH_MISSING_TE
811 mask |= DSI_IRQ_TE_TRIGGER;
813 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
814 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
815 DSI_IRQENABLE, DSI_IRQSTATUS);
818 /* dsi->irq_lock has to be locked by the caller */
819 static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
821 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
823 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
824 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
825 DSI_VC_IRQ_ERROR_MASK,
826 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
829 /* dsi->irq_lock has to be locked by the caller */
830 static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
832 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
834 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
835 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
836 DSI_CIO_IRQ_ERROR_MASK,
837 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
840 static void _dsi_initialize_irq(struct platform_device *dsidev)
842 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
846 spin_lock_irqsave(&dsi->irq_lock, flags);
848 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
850 _omap_dsi_set_irqs(dsidev);
851 for (vc = 0; vc < 4; ++vc)
852 _omap_dsi_set_irqs_vc(dsidev, vc);
853 _omap_dsi_set_irqs_cio(dsidev);
855 spin_unlock_irqrestore(&dsi->irq_lock, flags);
858 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
859 struct dsi_isr_data *isr_array, unsigned isr_array_size)
861 struct dsi_isr_data *isr_data;
867 /* check for duplicate entry and find a free slot */
869 for (i = 0; i < isr_array_size; i++) {
870 isr_data = &isr_array[i];
872 if (isr_data->isr == isr && isr_data->arg == arg &&
873 isr_data->mask == mask) {
877 if (isr_data->isr == NULL && free_idx == -1)
884 isr_data = &isr_array[free_idx];
887 isr_data->mask = mask;
892 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
893 struct dsi_isr_data *isr_array, unsigned isr_array_size)
895 struct dsi_isr_data *isr_data;
898 for (i = 0; i < isr_array_size; i++) {
899 isr_data = &isr_array[i];
900 if (isr_data->isr != isr || isr_data->arg != arg ||
901 isr_data->mask != mask)
904 isr_data->isr = NULL;
905 isr_data->arg = NULL;
914 static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
917 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
921 spin_lock_irqsave(&dsi->irq_lock, flags);
923 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
924 ARRAY_SIZE(dsi->isr_tables.isr_table));
927 _omap_dsi_set_irqs(dsidev);
929 spin_unlock_irqrestore(&dsi->irq_lock, flags);
934 static int dsi_unregister_isr(struct platform_device *dsidev,
935 omap_dsi_isr_t isr, void *arg, u32 mask)
937 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
941 spin_lock_irqsave(&dsi->irq_lock, flags);
943 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
944 ARRAY_SIZE(dsi->isr_tables.isr_table));
947 _omap_dsi_set_irqs(dsidev);
949 spin_unlock_irqrestore(&dsi->irq_lock, flags);
954 static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
955 omap_dsi_isr_t isr, void *arg, u32 mask)
957 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
961 spin_lock_irqsave(&dsi->irq_lock, flags);
963 r = _dsi_register_isr(isr, arg, mask,
964 dsi->isr_tables.isr_table_vc[channel],
965 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
968 _omap_dsi_set_irqs_vc(dsidev, channel);
970 spin_unlock_irqrestore(&dsi->irq_lock, flags);
975 static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
976 omap_dsi_isr_t isr, void *arg, u32 mask)
978 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
982 spin_lock_irqsave(&dsi->irq_lock, flags);
984 r = _dsi_unregister_isr(isr, arg, mask,
985 dsi->isr_tables.isr_table_vc[channel],
986 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
989 _omap_dsi_set_irqs_vc(dsidev, channel);
991 spin_unlock_irqrestore(&dsi->irq_lock, flags);
996 static int dsi_register_isr_cio(struct platform_device *dsidev,
997 omap_dsi_isr_t isr, void *arg, u32 mask)
999 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1000 unsigned long flags;
1003 spin_lock_irqsave(&dsi->irq_lock, flags);
1005 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1006 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1009 _omap_dsi_set_irqs_cio(dsidev);
1011 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1016 static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1017 omap_dsi_isr_t isr, void *arg, u32 mask)
1019 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1020 unsigned long flags;
1023 spin_lock_irqsave(&dsi->irq_lock, flags);
1025 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1026 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1029 _omap_dsi_set_irqs_cio(dsidev);
1031 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1036 static u32 dsi_get_errors(struct platform_device *dsidev)
1038 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1039 unsigned long flags;
1041 spin_lock_irqsave(&dsi->errors_lock, flags);
1044 spin_unlock_irqrestore(&dsi->errors_lock, flags);
1048 int dsi_runtime_get(struct platform_device *dsidev)
1051 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1053 DSSDBG("dsi_runtime_get\n");
1055 r = pm_runtime_get_sync(&dsi->pdev->dev);
1057 return r < 0 ? r : 0;
1060 void dsi_runtime_put(struct platform_device *dsidev)
1062 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1065 DSSDBG("dsi_runtime_put\n");
1067 r = pm_runtime_put(&dsi->pdev->dev);
1071 /* source clock for DSI PLL. this could also be PCLKFREE */
1072 static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1075 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1078 clk_enable(dsi->sys_clk);
1080 clk_disable(dsi->sys_clk);
1082 if (enable && dsi->pll_locked) {
1083 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1084 DSSERR("cannot lock PLL when enabling clocks\n");
1089 static void _dsi_print_reset_status(struct platform_device *dsidev)
1097 /* A dummy read using the SCP interface to any DSIPHY register is
1098 * required after DSIPHY reset to complete the reset of the DSI complex
1100 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1102 printk(KERN_DEBUG "DSI resets: ");
1104 l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1105 printk("PLL (%d) ", FLD_GET(l, 0, 0));
1107 l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1108 printk("CIO (%d) ", FLD_GET(l, 29, 29));
1110 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1120 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1121 printk("PHY (%x%x%x, %d, %d, %d)\n",
1127 FLD_GET(l, 31, 31));
1130 #define _dsi_print_reset_status(x)
1133 static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1135 DSSDBG("dsi_if_enable(%d)\n", enable);
1137 enable = enable ? 1 : 0;
1138 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1140 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1141 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1148 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1150 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1152 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1155 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1157 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1159 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1162 static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1164 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1166 return dsi->current_cinfo.clkin4ddr / 16;
1169 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1172 int dsi_module = dsi_get_dsidev_id(dsidev);
1173 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1175 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1176 /* DSI FCLK source is DSS_CLK_FCK */
1177 r = clk_get_rate(dsi->dss_clk);
1179 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1180 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1186 static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1188 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1189 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1190 unsigned long dsi_fclk;
1191 unsigned lp_clk_div;
1192 unsigned long lp_clk;
1194 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1196 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1199 dsi_fclk = dsi_fclk_rate(dsidev);
1201 lp_clk = dsi_fclk / 2 / lp_clk_div;
1203 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1204 dsi->current_cinfo.lp_clk = lp_clk;
1205 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1207 /* LP_CLK_DIVISOR */
1208 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1210 /* LP_RX_SYNCHRO_ENABLE */
1211 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1216 static void dsi_enable_scp_clk(struct platform_device *dsidev)
1218 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1220 if (dsi->scp_clk_refcount++ == 0)
1221 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1224 static void dsi_disable_scp_clk(struct platform_device *dsidev)
1226 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1228 WARN_ON(dsi->scp_clk_refcount == 0);
1229 if (--dsi->scp_clk_refcount == 0)
1230 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1233 enum dsi_pll_power_state {
1234 DSI_PLL_POWER_OFF = 0x0,
1235 DSI_PLL_POWER_ON_HSCLK = 0x1,
1236 DSI_PLL_POWER_ON_ALL = 0x2,
1237 DSI_PLL_POWER_ON_DIV = 0x3,
1240 static int dsi_pll_power(struct platform_device *dsidev,
1241 enum dsi_pll_power_state state)
1245 /* DSI-PLL power command 0x3 is not working */
1246 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1247 state == DSI_PLL_POWER_ON_DIV)
1248 state = DSI_PLL_POWER_ON_ALL;
1251 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1253 /* PLL_PWR_STATUS */
1254 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1256 DSSERR("Failed to set DSI PLL power mode to %d\n",
1266 /* calculate clock rates using dividers in cinfo */
1267 static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1268 struct dsi_clock_info *cinfo)
1270 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1271 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1273 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1276 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1279 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1282 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1285 if (cinfo->use_sys_clk) {
1286 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1287 /* XXX it is unclear if highfreq should be used
1288 * with DSS_SYS_CLK source also */
1289 cinfo->highfreq = 0;
1291 cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
1293 if (cinfo->clkin < 32000000)
1294 cinfo->highfreq = 0;
1296 cinfo->highfreq = 1;
1299 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1301 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1304 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1306 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1309 if (cinfo->regm_dispc > 0)
1310 cinfo->dsi_pll_hsdiv_dispc_clk =
1311 cinfo->clkin4ddr / cinfo->regm_dispc;
1313 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1315 if (cinfo->regm_dsi > 0)
1316 cinfo->dsi_pll_hsdiv_dsi_clk =
1317 cinfo->clkin4ddr / cinfo->regm_dsi;
1319 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1324 int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1325 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1326 struct dispc_clock_info *dispc_cinfo)
1328 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1329 struct dsi_clock_info cur, best;
1330 struct dispc_clock_info best_dispc;
1331 int min_fck_per_pck;
1333 unsigned long dss_sys_clk, max_dss_fck;
1335 dss_sys_clk = clk_get_rate(dsi->sys_clk);
1337 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1339 if (req_pck == dsi->cache_req_pck &&
1340 dsi->cache_cinfo.clkin == dss_sys_clk) {
1341 DSSDBG("DSI clock info found from cache\n");
1342 *dsi_cinfo = dsi->cache_cinfo;
1343 dispc_find_clk_divs(is_tft, req_pck,
1344 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1348 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1350 if (min_fck_per_pck &&
1351 req_pck * min_fck_per_pck > max_dss_fck) {
1352 DSSERR("Requested pixel clock not possible with the current "
1353 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1354 "the constraint off.\n");
1355 min_fck_per_pck = 0;
1358 DSSDBG("dsi_pll_calc\n");
1361 memset(&best, 0, sizeof(best));
1362 memset(&best_dispc, 0, sizeof(best_dispc));
1364 memset(&cur, 0, sizeof(cur));
1365 cur.clkin = dss_sys_clk;
1366 cur.use_sys_clk = 1;
1369 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1370 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1371 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1372 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1373 if (cur.highfreq == 0)
1374 cur.fint = cur.clkin / cur.regn;
1376 cur.fint = cur.clkin / (2 * cur.regn);
1378 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1381 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1382 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1385 a = 2 * cur.regm * (cur.clkin/1000);
1386 b = cur.regn * (cur.highfreq + 1);
1387 cur.clkin4ddr = a / b * 1000;
1389 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1392 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1393 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1394 for (cur.regm_dispc = 1; cur.regm_dispc <
1395 dsi->regm_dispc_max; ++cur.regm_dispc) {
1396 struct dispc_clock_info cur_dispc;
1397 cur.dsi_pll_hsdiv_dispc_clk =
1398 cur.clkin4ddr / cur.regm_dispc;
1400 /* this will narrow down the search a bit,
1401 * but still give pixclocks below what was
1403 if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
1406 if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1409 if (min_fck_per_pck &&
1410 cur.dsi_pll_hsdiv_dispc_clk <
1411 req_pck * min_fck_per_pck)
1416 dispc_find_clk_divs(is_tft, req_pck,
1417 cur.dsi_pll_hsdiv_dispc_clk,
1420 if (abs(cur_dispc.pck - req_pck) <
1421 abs(best_dispc.pck - req_pck)) {
1423 best_dispc = cur_dispc;
1425 if (cur_dispc.pck == req_pck)
1433 if (min_fck_per_pck) {
1434 DSSERR("Could not find suitable clock settings.\n"
1435 "Turning FCK/PCK constraint off and"
1437 min_fck_per_pck = 0;
1441 DSSERR("Could not find suitable clock settings.\n");
1446 /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1448 best.dsi_pll_hsdiv_dsi_clk = 0;
1453 *dispc_cinfo = best_dispc;
1455 dsi->cache_req_pck = req_pck;
1456 dsi->cache_clk_freq = 0;
1457 dsi->cache_cinfo = best;
1462 int dsi_pll_set_clock_div(struct platform_device *dsidev,
1463 struct dsi_clock_info *cinfo)
1465 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1469 u8 regn_start, regn_end, regm_start, regm_end;
1470 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1474 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1475 dsi->current_cinfo.highfreq = cinfo->highfreq;
1477 dsi->current_cinfo.fint = cinfo->fint;
1478 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1479 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1480 cinfo->dsi_pll_hsdiv_dispc_clk;
1481 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1482 cinfo->dsi_pll_hsdiv_dsi_clk;
1484 dsi->current_cinfo.regn = cinfo->regn;
1485 dsi->current_cinfo.regm = cinfo->regm;
1486 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1487 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1489 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1491 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1492 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1496 /* DSIPHY == CLKIN4DDR */
1497 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1501 cinfo->highfreq + 1,
1504 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1505 cinfo->clkin4ddr / 1000 / 1000 / 2);
1507 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1509 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1510 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1511 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1512 cinfo->dsi_pll_hsdiv_dispc_clk);
1513 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1514 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1515 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1516 cinfo->dsi_pll_hsdiv_dsi_clk);
1518 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, ®n_start, ®n_end);
1519 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, ®m_start, ®m_end);
1520 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, ®m_dispc_start,
1522 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start,
1525 /* DSI_PLL_AUTOMODE = manual */
1526 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1528 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1529 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1531 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1533 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1535 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1536 regm_dispc_start, regm_dispc_end);
1537 /* DSIPROTO_CLOCK_DIV */
1538 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1539 regm_dsi_start, regm_dsi_end);
1540 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1542 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1544 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1545 f = cinfo->fint < 1000000 ? 0x3 :
1546 cinfo->fint < 1250000 ? 0x4 :
1547 cinfo->fint < 1500000 ? 0x5 :
1548 cinfo->fint < 1750000 ? 0x6 :
1552 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1554 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1555 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1556 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1557 11, 11); /* DSI_PLL_CLKSEL */
1558 l = FLD_MOD(l, cinfo->highfreq,
1559 12, 12); /* DSI_PLL_HIGHFREQ */
1560 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1561 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1562 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1563 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1565 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1567 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1568 DSSERR("dsi pll go bit not going down.\n");
1573 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1574 DSSERR("cannot lock PLL\n");
1579 dsi->pll_locked = 1;
1581 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1582 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1583 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1584 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1585 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1586 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1587 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1588 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1589 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1590 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1591 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1592 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1593 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1594 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1595 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1596 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1598 DSSDBG("PLL config done\n");
1603 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1606 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1608 enum dsi_pll_power_state pwstate;
1610 DSSDBG("PLL init\n");
1612 if (dsi->vdds_dsi_reg == NULL) {
1613 struct regulator *vdds_dsi;
1615 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1617 if (IS_ERR(vdds_dsi)) {
1618 DSSERR("can't get VDDS_DSI regulator\n");
1619 return PTR_ERR(vdds_dsi);
1622 dsi->vdds_dsi_reg = vdds_dsi;
1625 dsi_enable_pll_clock(dsidev, 1);
1627 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1629 dsi_enable_scp_clk(dsidev);
1631 if (!dsi->vdds_dsi_enabled) {
1632 r = regulator_enable(dsi->vdds_dsi_reg);
1635 dsi->vdds_dsi_enabled = true;
1638 /* XXX PLL does not come out of reset without this... */
1639 dispc_pck_free_enable(1);
1641 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1642 DSSERR("PLL not coming out of reset.\n");
1644 dispc_pck_free_enable(0);
1648 /* XXX ... but if left on, we get problems when planes do not
1649 * fill the whole display. No idea about this */
1650 dispc_pck_free_enable(0);
1652 if (enable_hsclk && enable_hsdiv)
1653 pwstate = DSI_PLL_POWER_ON_ALL;
1654 else if (enable_hsclk)
1655 pwstate = DSI_PLL_POWER_ON_HSCLK;
1656 else if (enable_hsdiv)
1657 pwstate = DSI_PLL_POWER_ON_DIV;
1659 pwstate = DSI_PLL_POWER_OFF;
1661 r = dsi_pll_power(dsidev, pwstate);
1666 DSSDBG("PLL init done\n");
1670 if (dsi->vdds_dsi_enabled) {
1671 regulator_disable(dsi->vdds_dsi_reg);
1672 dsi->vdds_dsi_enabled = false;
1675 dsi_disable_scp_clk(dsidev);
1676 dsi_enable_pll_clock(dsidev, 0);
1680 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1682 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1684 dsi->pll_locked = 0;
1685 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1686 if (disconnect_lanes) {
1687 WARN_ON(!dsi->vdds_dsi_enabled);
1688 regulator_disable(dsi->vdds_dsi_reg);
1689 dsi->vdds_dsi_enabled = false;
1692 dsi_disable_scp_clk(dsidev);
1693 dsi_enable_pll_clock(dsidev, 0);
1695 DSSDBG("PLL uninit done\n");
1698 static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1701 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1702 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1703 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1704 int dsi_module = dsi_get_dsidev_id(dsidev);
1706 dispc_clk_src = dss_get_dispc_clk_source();
1707 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1709 if (dsi_runtime_get(dsidev))
1712 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1714 seq_printf(s, "dsi pll source = %s\n",
1715 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1717 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1719 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1720 cinfo->clkin4ddr, cinfo->regm);
1722 seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
1723 dss_get_generic_clk_source_name(dispc_clk_src),
1724 dss_feat_get_clk_source_name(dispc_clk_src),
1725 cinfo->dsi_pll_hsdiv_dispc_clk,
1727 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1730 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
1731 dss_get_generic_clk_source_name(dsi_clk_src),
1732 dss_feat_get_clk_source_name(dsi_clk_src),
1733 cinfo->dsi_pll_hsdiv_dsi_clk,
1735 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1738 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1740 seq_printf(s, "dsi fclk source = %s (%s)\n",
1741 dss_get_generic_clk_source_name(dsi_clk_src),
1742 dss_feat_get_clk_source_name(dsi_clk_src));
1744 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1746 seq_printf(s, "DDR_CLK\t\t%lu\n",
1747 cinfo->clkin4ddr / 4);
1749 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1751 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1753 dsi_runtime_put(dsidev);
1756 void dsi_dump_clocks(struct seq_file *s)
1758 struct platform_device *dsidev;
1761 for (i = 0; i < MAX_NUM_DSI; i++) {
1762 dsidev = dsi_get_dsidev_from_id(i);
1764 dsi_dump_dsidev_clocks(dsidev, s);
1768 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1769 static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1772 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1773 unsigned long flags;
1774 struct dsi_irq_stats stats;
1775 int dsi_module = dsi_get_dsidev_id(dsidev);
1777 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1779 stats = dsi->irq_stats;
1780 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1781 dsi->irq_stats.last_reset = jiffies;
1783 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1785 seq_printf(s, "period %u ms\n",
1786 jiffies_to_msecs(jiffies - stats.last_reset));
1788 seq_printf(s, "irqs %d\n", stats.irq_count);
1790 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1792 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1808 PIS(LDO_POWER_GOOD);
1813 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1814 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1815 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1816 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1817 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1819 seq_printf(s, "-- VC interrupts --\n");
1828 PIS(PP_BUSY_CHANGE);
1832 seq_printf(s, "%-20s %10d\n", #x, \
1833 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1835 seq_printf(s, "-- CIO interrupts --\n");
1848 PIS(ERRCONTENTIONLP0_1);
1849 PIS(ERRCONTENTIONLP1_1);
1850 PIS(ERRCONTENTIONLP0_2);
1851 PIS(ERRCONTENTIONLP1_2);
1852 PIS(ERRCONTENTIONLP0_3);
1853 PIS(ERRCONTENTIONLP1_3);
1854 PIS(ULPSACTIVENOT_ALL0);
1855 PIS(ULPSACTIVENOT_ALL1);
1859 static void dsi1_dump_irqs(struct seq_file *s)
1861 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1863 dsi_dump_dsidev_irqs(dsidev, s);
1866 static void dsi2_dump_irqs(struct seq_file *s)
1868 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1870 dsi_dump_dsidev_irqs(dsidev, s);
1873 void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1874 const struct file_operations *debug_fops)
1876 struct platform_device *dsidev;
1878 dsidev = dsi_get_dsidev_from_id(0);
1880 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1881 &dsi1_dump_irqs, debug_fops);
1883 dsidev = dsi_get_dsidev_from_id(1);
1885 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1886 &dsi2_dump_irqs, debug_fops);
1890 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1893 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1895 if (dsi_runtime_get(dsidev))
1897 dsi_enable_scp_clk(dsidev);
1899 DUMPREG(DSI_REVISION);
1900 DUMPREG(DSI_SYSCONFIG);
1901 DUMPREG(DSI_SYSSTATUS);
1902 DUMPREG(DSI_IRQSTATUS);
1903 DUMPREG(DSI_IRQENABLE);
1905 DUMPREG(DSI_COMPLEXIO_CFG1);
1906 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1907 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1908 DUMPREG(DSI_CLK_CTRL);
1909 DUMPREG(DSI_TIMING1);
1910 DUMPREG(DSI_TIMING2);
1911 DUMPREG(DSI_VM_TIMING1);
1912 DUMPREG(DSI_VM_TIMING2);
1913 DUMPREG(DSI_VM_TIMING3);
1914 DUMPREG(DSI_CLK_TIMING);
1915 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1916 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1917 DUMPREG(DSI_COMPLEXIO_CFG2);
1918 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1919 DUMPREG(DSI_VM_TIMING4);
1920 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1921 DUMPREG(DSI_VM_TIMING5);
1922 DUMPREG(DSI_VM_TIMING6);
1923 DUMPREG(DSI_VM_TIMING7);
1924 DUMPREG(DSI_STOPCLK_TIMING);
1926 DUMPREG(DSI_VC_CTRL(0));
1927 DUMPREG(DSI_VC_TE(0));
1928 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1929 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1930 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1931 DUMPREG(DSI_VC_IRQSTATUS(0));
1932 DUMPREG(DSI_VC_IRQENABLE(0));
1934 DUMPREG(DSI_VC_CTRL(1));
1935 DUMPREG(DSI_VC_TE(1));
1936 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1937 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1938 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1939 DUMPREG(DSI_VC_IRQSTATUS(1));
1940 DUMPREG(DSI_VC_IRQENABLE(1));
1942 DUMPREG(DSI_VC_CTRL(2));
1943 DUMPREG(DSI_VC_TE(2));
1944 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1945 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1946 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1947 DUMPREG(DSI_VC_IRQSTATUS(2));
1948 DUMPREG(DSI_VC_IRQENABLE(2));
1950 DUMPREG(DSI_VC_CTRL(3));
1951 DUMPREG(DSI_VC_TE(3));
1952 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1953 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1954 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1955 DUMPREG(DSI_VC_IRQSTATUS(3));
1956 DUMPREG(DSI_VC_IRQENABLE(3));
1958 DUMPREG(DSI_DSIPHY_CFG0);
1959 DUMPREG(DSI_DSIPHY_CFG1);
1960 DUMPREG(DSI_DSIPHY_CFG2);
1961 DUMPREG(DSI_DSIPHY_CFG5);
1963 DUMPREG(DSI_PLL_CONTROL);
1964 DUMPREG(DSI_PLL_STATUS);
1965 DUMPREG(DSI_PLL_GO);
1966 DUMPREG(DSI_PLL_CONFIGURATION1);
1967 DUMPREG(DSI_PLL_CONFIGURATION2);
1969 dsi_disable_scp_clk(dsidev);
1970 dsi_runtime_put(dsidev);
1974 static void dsi1_dump_regs(struct seq_file *s)
1976 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1978 dsi_dump_dsidev_regs(dsidev, s);
1981 static void dsi2_dump_regs(struct seq_file *s)
1983 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1985 dsi_dump_dsidev_regs(dsidev, s);
1988 void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1989 const struct file_operations *debug_fops)
1991 struct platform_device *dsidev;
1993 dsidev = dsi_get_dsidev_from_id(0);
1995 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1996 &dsi1_dump_regs, debug_fops);
1998 dsidev = dsi_get_dsidev_from_id(1);
2000 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
2001 &dsi2_dump_regs, debug_fops);
2003 enum dsi_cio_power_state {
2004 DSI_COMPLEXIO_POWER_OFF = 0x0,
2005 DSI_COMPLEXIO_POWER_ON = 0x1,
2006 DSI_COMPLEXIO_POWER_ULPS = 0x2,
2009 static int dsi_cio_power(struct platform_device *dsidev,
2010 enum dsi_cio_power_state state)
2015 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
2018 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
2021 DSSERR("failed to set complexio power state to "
2031 /* Number of data lanes present on DSI interface */
2032 static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
2034 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
2035 * of data lanes as 2 by default */
2036 if (dss_has_feature(FEAT_DSI_GNQ))
2037 return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
2042 /* Number of data lanes used by the dss device */
2043 static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
2045 int num_data_lanes = 0;
2047 if (dssdev->phy.dsi.data1_lane != 0)
2049 if (dssdev->phy.dsi.data2_lane != 0)
2051 if (dssdev->phy.dsi.data3_lane != 0)
2053 if (dssdev->phy.dsi.data4_lane != 0)
2056 return num_data_lanes;
2059 static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2063 /* line buffer on OMAP3 is 1024 x 24bits */
2064 /* XXX: for some reason using full buffer size causes
2065 * considerable TX slowdown with update sizes that fill the
2067 if (!dss_has_feature(FEAT_DSI_GNQ))
2070 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2074 return 512 * 3; /* 512x24 bits */
2076 return 682 * 3; /* 682x24 bits */
2078 return 853 * 3; /* 853x24 bits */
2080 return 1024 * 3; /* 1024x24 bits */
2082 return 1194 * 3; /* 1194x24 bits */
2084 return 1365 * 3; /* 1365x24 bits */
2090 static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2092 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2094 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2096 int clk_lane = dssdev->phy.dsi.clk_lane;
2097 int data1_lane = dssdev->phy.dsi.data1_lane;
2098 int data2_lane = dssdev->phy.dsi.data2_lane;
2099 int clk_pol = dssdev->phy.dsi.clk_pol;
2100 int data1_pol = dssdev->phy.dsi.data1_pol;
2101 int data2_pol = dssdev->phy.dsi.data2_pol;
2103 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2104 r = FLD_MOD(r, clk_lane, 2, 0);
2105 r = FLD_MOD(r, clk_pol, 3, 3);
2106 r = FLD_MOD(r, data1_lane, 6, 4);
2107 r = FLD_MOD(r, data1_pol, 7, 7);
2108 r = FLD_MOD(r, data2_lane, 10, 8);
2109 r = FLD_MOD(r, data2_pol, 11, 11);
2110 if (num_data_lanes_dssdev > 2) {
2111 int data3_lane = dssdev->phy.dsi.data3_lane;
2112 int data3_pol = dssdev->phy.dsi.data3_pol;
2114 r = FLD_MOD(r, data3_lane, 14, 12);
2115 r = FLD_MOD(r, data3_pol, 15, 15);
2117 if (num_data_lanes_dssdev > 3) {
2118 int data4_lane = dssdev->phy.dsi.data4_lane;
2119 int data4_pol = dssdev->phy.dsi.data4_pol;
2121 r = FLD_MOD(r, data4_lane, 18, 16);
2122 r = FLD_MOD(r, data4_pol, 19, 19);
2124 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2126 /* The configuration of the DSI complex I/O (number of data lanes,
2127 position, differential order) should not be changed while
2128 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
2129 the hardware to take into account a new configuration of the complex
2130 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
2131 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
2132 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
2133 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
2134 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
2135 DSI complex I/O configuration is unknown. */
2138 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2139 REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
2140 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
2141 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
2145 static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2147 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2149 /* convert time in ns to ddr ticks, rounding up */
2150 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2151 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2154 static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2156 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2158 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2159 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2162 static void dsi_cio_timings(struct platform_device *dsidev)
2165 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2166 u32 tlpx_half, tclk_trail, tclk_zero;
2169 /* calculate timings */
2171 /* 1 * DDR_CLK = 2 * UI */
2173 /* min 40ns + 4*UI max 85ns + 6*UI */
2174 ths_prepare = ns2ddr(dsidev, 70) + 2;
2176 /* min 145ns + 10*UI */
2177 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2179 /* min max(8*UI, 60ns+4*UI) */
2180 ths_trail = ns2ddr(dsidev, 60) + 5;
2183 ths_exit = ns2ddr(dsidev, 145);
2186 tlpx_half = ns2ddr(dsidev, 25);
2189 tclk_trail = ns2ddr(dsidev, 60) + 2;
2191 /* min 38ns, max 95ns */
2192 tclk_prepare = ns2ddr(dsidev, 65);
2194 /* min tclk-prepare + tclk-zero = 300ns */
2195 tclk_zero = ns2ddr(dsidev, 260);
2197 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2198 ths_prepare, ddr2ns(dsidev, ths_prepare),
2199 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2200 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2201 ths_trail, ddr2ns(dsidev, ths_trail),
2202 ths_exit, ddr2ns(dsidev, ths_exit));
2204 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2205 "tclk_zero %u (%uns)\n",
2206 tlpx_half, ddr2ns(dsidev, tlpx_half),
2207 tclk_trail, ddr2ns(dsidev, tclk_trail),
2208 tclk_zero, ddr2ns(dsidev, tclk_zero));
2209 DSSDBG("tclk_prepare %u (%uns)\n",
2210 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2212 /* program timings */
2214 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2215 r = FLD_MOD(r, ths_prepare, 31, 24);
2216 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2217 r = FLD_MOD(r, ths_trail, 15, 8);
2218 r = FLD_MOD(r, ths_exit, 7, 0);
2219 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2221 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2222 r = FLD_MOD(r, tlpx_half, 22, 16);
2223 r = FLD_MOD(r, tclk_trail, 15, 8);
2224 r = FLD_MOD(r, tclk_zero, 7, 0);
2225 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2227 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2228 r = FLD_MOD(r, tclk_prepare, 7, 0);
2229 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2232 static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2233 enum dsi_lane lanes)
2235 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2236 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2237 int clk_lane = dssdev->phy.dsi.clk_lane;
2238 int data1_lane = dssdev->phy.dsi.data1_lane;
2239 int data2_lane = dssdev->phy.dsi.data2_lane;
2240 int data3_lane = dssdev->phy.dsi.data3_lane;
2241 int data4_lane = dssdev->phy.dsi.data4_lane;
2242 int clk_pol = dssdev->phy.dsi.clk_pol;
2243 int data1_pol = dssdev->phy.dsi.data1_pol;
2244 int data2_pol = dssdev->phy.dsi.data2_pol;
2245 int data3_pol = dssdev->phy.dsi.data3_pol;
2246 int data4_pol = dssdev->phy.dsi.data4_pol;
2249 u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
2251 if (lanes & DSI_CLK_P)
2252 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2253 if (lanes & DSI_CLK_N)
2254 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2256 if (lanes & DSI_DATA1_P)
2257 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2258 if (lanes & DSI_DATA1_N)
2259 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2261 if (lanes & DSI_DATA2_P)
2262 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2263 if (lanes & DSI_DATA2_N)
2264 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2266 if (lanes & DSI_DATA3_P)
2267 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
2268 if (lanes & DSI_DATA3_N)
2269 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
2271 if (lanes & DSI_DATA4_P)
2272 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
2273 if (lanes & DSI_DATA4_N)
2274 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
2276 * Bits in REGLPTXSCPDAT4TO0DXDY:
2284 /* Set the lane override configuration */
2286 /* REGLPTXSCPDAT4TO0DXDY */
2287 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2289 /* Enable lane override */
2292 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2295 static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2297 /* Disable lane override */
2298 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2299 /* Reset the lane override configuration */
2300 /* REGLPTXSCPDAT4TO0DXDY */
2301 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2304 static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2306 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2311 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2325 if (dssdev->phy.dsi.clk_lane != 0)
2326 in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2327 if (dssdev->phy.dsi.data1_lane != 0)
2328 in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2329 if (dssdev->phy.dsi.data2_lane != 0)
2330 in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2338 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2341 for (i = 0; i < 3; ++i) {
2342 if (!in_use[i] || (l & (1 << bits[i])))
2350 for (i = 0; i < 3; ++i) {
2351 if (!in_use[i] || (l & (1 << bits[i])))
2354 DSSERR("CIO TXCLKESC%d domain not coming " \
2355 "out of reset\n", i);
2364 static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev)
2368 if (dssdev->phy.dsi.clk_lane != 0)
2369 lanes |= 1 << (dssdev->phy.dsi.clk_lane - 1);
2370 if (dssdev->phy.dsi.data1_lane != 0)
2371 lanes |= 1 << (dssdev->phy.dsi.data1_lane - 1);
2372 if (dssdev->phy.dsi.data2_lane != 0)
2373 lanes |= 1 << (dssdev->phy.dsi.data2_lane - 1);
2374 if (dssdev->phy.dsi.data3_lane != 0)
2375 lanes |= 1 << (dssdev->phy.dsi.data3_lane - 1);
2376 if (dssdev->phy.dsi.data4_lane != 0)
2377 lanes |= 1 << (dssdev->phy.dsi.data4_lane - 1);
2382 static int dsi_cio_init(struct omap_dss_device *dssdev)
2384 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2385 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2387 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2392 r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
2396 dsi_enable_scp_clk(dsidev);
2398 /* A dummy read using the SCP interface to any DSIPHY register is
2399 * required after DSIPHY reset to complete the reset of the DSI complex
2401 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2403 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2404 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2406 goto err_scp_clk_dom;
2409 dsi_set_lane_config(dssdev);
2411 /* set TX STOP MODE timer to maximum for this operation */
2412 l = dsi_read_reg(dsidev, DSI_TIMING1);
2413 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2414 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2415 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2416 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2417 dsi_write_reg(dsidev, DSI_TIMING1, l);
2419 if (dsi->ulps_enabled) {
2420 u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
2422 DSSDBG("manual ulps exit\n");
2424 /* ULPS is exited by Mark-1 state for 1ms, followed by
2425 * stop state. DSS HW cannot do this via the normal
2426 * ULPS exit sequence, as after reset the DSS HW thinks
2427 * that we are not in ULPS mode, and refuses to send the
2428 * sequence. So we need to send the ULPS exit sequence
2432 if (num_data_lanes_dssdev > 2)
2433 lane_mask |= DSI_DATA3_P;
2435 if (num_data_lanes_dssdev > 3)
2436 lane_mask |= DSI_DATA4_P;
2438 dsi_cio_enable_lane_override(dssdev, lane_mask);
2441 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2445 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2446 DSSERR("CIO PWR clock domain not coming out of reset.\n");
2448 goto err_cio_pwr_dom;
2451 dsi_if_enable(dsidev, true);
2452 dsi_if_enable(dsidev, false);
2453 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2455 r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2457 goto err_tx_clk_esc_rst;
2459 if (dsi->ulps_enabled) {
2460 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2461 ktime_t wait = ns_to_ktime(1000 * 1000);
2462 set_current_state(TASK_UNINTERRUPTIBLE);
2463 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2465 /* Disable the override. The lanes should be set to Mark-11
2466 * state by the HW */
2467 dsi_cio_disable_lane_override(dsidev);
2470 /* FORCE_TX_STOP_MODE_IO */
2471 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2473 dsi_cio_timings(dsidev);
2475 dsi->ulps_enabled = false;
2477 DSSDBG("CIO init done\n");
2482 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2484 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2486 if (dsi->ulps_enabled)
2487 dsi_cio_disable_lane_override(dsidev);
2489 dsi_disable_scp_clk(dsidev);
2490 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
2494 static void dsi_cio_uninit(struct omap_dss_device *dssdev)
2496 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2497 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2499 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2500 dsi_disable_scp_clk(dsidev);
2501 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
2504 static void dsi_config_tx_fifo(struct platform_device *dsidev,
2505 enum fifo_size size1, enum fifo_size size2,
2506 enum fifo_size size3, enum fifo_size size4)
2508 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2513 dsi->vc[0].fifo_size = size1;
2514 dsi->vc[1].fifo_size = size2;
2515 dsi->vc[2].fifo_size = size3;
2516 dsi->vc[3].fifo_size = size4;
2518 for (i = 0; i < 4; i++) {
2520 int size = dsi->vc[i].fifo_size;
2522 if (add + size > 4) {
2523 DSSERR("Illegal FIFO configuration\n");
2527 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2529 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2533 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2536 static void dsi_config_rx_fifo(struct platform_device *dsidev,
2537 enum fifo_size size1, enum fifo_size size2,
2538 enum fifo_size size3, enum fifo_size size4)
2540 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2545 dsi->vc[0].fifo_size = size1;
2546 dsi->vc[1].fifo_size = size2;
2547 dsi->vc[2].fifo_size = size3;
2548 dsi->vc[3].fifo_size = size4;
2550 for (i = 0; i < 4; i++) {
2552 int size = dsi->vc[i].fifo_size;
2554 if (add + size > 4) {
2555 DSSERR("Illegal FIFO configuration\n");
2559 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2561 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2565 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2568 static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2572 r = dsi_read_reg(dsidev, DSI_TIMING1);
2573 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2574 dsi_write_reg(dsidev, DSI_TIMING1, r);
2576 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2577 DSSERR("TX_STOP bit not going down\n");
2584 static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2586 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2589 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2591 struct dsi_packet_sent_handler_data *vp_data =
2592 (struct dsi_packet_sent_handler_data *) data;
2593 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2594 const int channel = dsi->update_channel;
2595 u8 bit = dsi->te_enabled ? 30 : 31;
2597 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2598 complete(vp_data->completion);
2601 static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2603 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2604 DECLARE_COMPLETION_ONSTACK(completion);
2605 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2609 bit = dsi->te_enabled ? 30 : 31;
2611 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2612 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2616 /* Wait for completion only if TE_EN/TE_START is still set */
2617 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2618 if (wait_for_completion_timeout(&completion,
2619 msecs_to_jiffies(10)) == 0) {
2620 DSSERR("Failed to complete previous frame transfer\n");
2626 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2627 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2631 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2632 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2637 static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2639 struct dsi_packet_sent_handler_data *l4_data =
2640 (struct dsi_packet_sent_handler_data *) data;
2641 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2642 const int channel = dsi->update_channel;
2644 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2645 complete(l4_data->completion);
2648 static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2650 DECLARE_COMPLETION_ONSTACK(completion);
2651 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2654 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2655 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2659 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2660 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2661 if (wait_for_completion_timeout(&completion,
2662 msecs_to_jiffies(10)) == 0) {
2663 DSSERR("Failed to complete previous l4 transfer\n");
2669 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2670 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2674 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2675 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2680 static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2682 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2684 WARN_ON(!dsi_bus_is_locked(dsidev));
2686 WARN_ON(in_interrupt());
2688 if (!dsi_vc_is_enabled(dsidev, channel))
2691 switch (dsi->vc[channel].source) {
2692 case DSI_VC_SOURCE_VP:
2693 return dsi_sync_vc_vp(dsidev, channel);
2694 case DSI_VC_SOURCE_L4:
2695 return dsi_sync_vc_l4(dsidev, channel);
2701 static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2704 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2707 enable = enable ? 1 : 0;
2709 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2711 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2712 0, enable) != enable) {
2713 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2720 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2724 DSSDBGF("%d", channel);
2726 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2728 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2729 DSSERR("VC(%d) busy when trying to configure it!\n",
2732 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2733 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2734 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2735 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2736 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2737 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2738 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2739 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2740 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2742 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2743 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2745 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2748 static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
2749 enum dsi_vc_source source)
2751 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2753 if (dsi->vc[channel].source == source)
2756 DSSDBGF("%d", channel);
2758 dsi_sync_vc(dsidev, channel);
2760 dsi_vc_enable(dsidev, channel, 0);
2763 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2764 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2768 /* SOURCE, 0 = L4, 1 = video port */
2769 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
2771 /* DCS_CMD_ENABLE */
2772 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2773 bool enable = source == DSI_VC_SOURCE_VP;
2774 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
2777 dsi_vc_enable(dsidev, channel, 1);
2779 dsi->vc[channel].source = source;
2784 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2787 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2789 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2791 WARN_ON(!dsi_bus_is_locked(dsidev));
2793 dsi_vc_enable(dsidev, channel, 0);
2794 dsi_if_enable(dsidev, 0);
2796 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2798 dsi_vc_enable(dsidev, channel, 1);
2799 dsi_if_enable(dsidev, 1);
2801 dsi_force_tx_stop_mode_io(dsidev);
2803 EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2805 static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2807 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2809 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2810 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2814 (val >> 24) & 0xff);
2818 static void dsi_show_rx_ack_with_err(u16 err)
2820 DSSERR("\tACK with ERROR (%#x):\n", err);
2822 DSSERR("\t\tSoT Error\n");
2824 DSSERR("\t\tSoT Sync Error\n");
2826 DSSERR("\t\tEoT Sync Error\n");
2828 DSSERR("\t\tEscape Mode Entry Command Error\n");
2830 DSSERR("\t\tLP Transmit Sync Error\n");
2832 DSSERR("\t\tHS Receive Timeout Error\n");
2834 DSSERR("\t\tFalse Control Error\n");
2836 DSSERR("\t\t(reserved7)\n");
2838 DSSERR("\t\tECC Error, single-bit (corrected)\n");
2840 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2841 if (err & (1 << 10))
2842 DSSERR("\t\tChecksum Error\n");
2843 if (err & (1 << 11))
2844 DSSERR("\t\tData type not recognized\n");
2845 if (err & (1 << 12))
2846 DSSERR("\t\tInvalid VC ID\n");
2847 if (err & (1 << 13))
2848 DSSERR("\t\tInvalid Transmission Length\n");
2849 if (err & (1 << 14))
2850 DSSERR("\t\t(reserved14)\n");
2851 if (err & (1 << 15))
2852 DSSERR("\t\tDSI Protocol Violation\n");
2855 static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2858 /* RX_FIFO_NOT_EMPTY */
2859 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2862 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2863 DSSERR("\trawval %#08x\n", val);
2864 dt = FLD_GET(val, 5, 0);
2865 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
2866 u16 err = FLD_GET(val, 23, 8);
2867 dsi_show_rx_ack_with_err(err);
2868 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2869 DSSERR("\tDCS short response, 1 byte: %#x\n",
2870 FLD_GET(val, 23, 8));
2871 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2872 DSSERR("\tDCS short response, 2 byte: %#x\n",
2873 FLD_GET(val, 23, 8));
2874 } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2875 DSSERR("\tDCS long response, len %d\n",
2876 FLD_GET(val, 23, 8));
2877 dsi_vc_flush_long_data(dsidev, channel);
2879 DSSERR("\tunknown datatype 0x%02x\n", dt);
2885 static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2887 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2889 if (dsi->debug_write || dsi->debug_read)
2890 DSSDBG("dsi_vc_send_bta %d\n", channel);
2892 WARN_ON(!dsi_bus_is_locked(dsidev));
2894 /* RX_FIFO_NOT_EMPTY */
2895 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2896 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2897 dsi_vc_flush_receive_data(dsidev, channel);
2900 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2905 int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2907 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2908 DECLARE_COMPLETION_ONSTACK(completion);
2912 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2913 &completion, DSI_VC_IRQ_BTA);
2917 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2918 DSI_IRQ_ERROR_MASK);
2922 r = dsi_vc_send_bta(dsidev, channel);
2926 if (wait_for_completion_timeout(&completion,
2927 msecs_to_jiffies(500)) == 0) {
2928 DSSERR("Failed to receive BTA\n");
2933 err = dsi_get_errors(dsidev);
2935 DSSERR("Error while sending BTA: %x\n", err);
2940 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2941 DSI_IRQ_ERROR_MASK);
2943 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2944 &completion, DSI_VC_IRQ_BTA);
2948 EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2950 static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2951 int channel, u8 data_type, u16 len, u8 ecc)
2953 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2957 WARN_ON(!dsi_bus_is_locked(dsidev));
2959 data_id = data_type | dsi->vc[channel].vc_id << 6;
2961 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2962 FLD_VAL(ecc, 31, 24);
2964 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2967 static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2968 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2972 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
2974 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2975 b1, b2, b3, b4, val); */
2977 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2980 static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2981 u8 data_type, u8 *data, u16 len, u8 ecc)
2984 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2990 if (dsi->debug_write)
2991 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2994 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
2995 DSSERR("unable to send long packet: packet too long.\n");
2999 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3001 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
3004 for (i = 0; i < len >> 2; i++) {
3005 if (dsi->debug_write)
3006 DSSDBG("\tsending full packet %d\n", i);
3013 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
3018 b1 = 0; b2 = 0; b3 = 0;
3020 if (dsi->debug_write)
3021 DSSDBG("\tsending remainder bytes %d\n", i);
3038 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3044 static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3045 u8 data_type, u16 data, u8 ecc)
3047 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3051 WARN_ON(!dsi_bus_is_locked(dsidev));
3053 if (dsi->debug_write)
3054 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3056 data_type, data & 0xff, (data >> 8) & 0xff);
3058 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3060 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3061 DSSERR("ERROR FIFO FULL, aborting transfer\n");
3065 data_id = data_type | dsi->vc[channel].vc_id << 6;
3067 r = (data_id << 0) | (data << 8) | (ecc << 24);
3069 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3074 int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3076 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3077 u8 nullpkg[] = {0, 0, 0, 0};
3079 return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, nullpkg,
3082 EXPORT_SYMBOL(dsi_vc_send_null);
3084 static int dsi_vc_write_nosync_common(struct omap_dss_device *dssdev,
3085 int channel, u8 *data, int len, enum dss_dsi_content_type type)
3087 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3091 BUG_ON(type == DSS_DSI_CONTENT_DCS);
3092 r = dsi_vc_send_short(dsidev, channel,
3093 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
3094 } else if (len == 1) {
3095 r = dsi_vc_send_short(dsidev, channel,
3096 type == DSS_DSI_CONTENT_GENERIC ?
3097 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
3098 MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
3099 } else if (len == 2) {
3100 r = dsi_vc_send_short(dsidev, channel,
3101 type == DSS_DSI_CONTENT_GENERIC ?
3102 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
3103 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
3104 data[0] | (data[1] << 8), 0);
3106 r = dsi_vc_send_long(dsidev, channel,
3107 type == DSS_DSI_CONTENT_GENERIC ?
3108 MIPI_DSI_GENERIC_LONG_WRITE :
3109 MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
3115 int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3118 return dsi_vc_write_nosync_common(dssdev, channel, data, len,
3119 DSS_DSI_CONTENT_DCS);
3121 EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3123 int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
3126 return dsi_vc_write_nosync_common(dssdev, channel, data, len,
3127 DSS_DSI_CONTENT_GENERIC);
3129 EXPORT_SYMBOL(dsi_vc_generic_write_nosync);
3131 static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
3132 u8 *data, int len, enum dss_dsi_content_type type)
3134 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3137 r = dsi_vc_write_nosync_common(dssdev, channel, data, len, type);
3141 r = dsi_vc_send_bta_sync(dssdev, channel);
3145 /* RX_FIFO_NOT_EMPTY */
3146 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3147 DSSERR("rx fifo not empty after write, dumping data:\n");
3148 dsi_vc_flush_receive_data(dsidev, channel);
3155 DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
3156 channel, data[0], len);
3160 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3163 return dsi_vc_write_common(dssdev, channel, data, len,
3164 DSS_DSI_CONTENT_DCS);
3166 EXPORT_SYMBOL(dsi_vc_dcs_write);
3168 int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3171 return dsi_vc_write_common(dssdev, channel, data, len,
3172 DSS_DSI_CONTENT_GENERIC);
3174 EXPORT_SYMBOL(dsi_vc_generic_write);
3176 int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3178 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3180 EXPORT_SYMBOL(dsi_vc_dcs_write_0);
3182 int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel)
3184 return dsi_vc_generic_write(dssdev, channel, NULL, 0);
3186 EXPORT_SYMBOL(dsi_vc_generic_write_0);
3188 int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3194 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3196 EXPORT_SYMBOL(dsi_vc_dcs_write_1);
3198 int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
3201 return dsi_vc_generic_write(dssdev, channel, ¶m, 1);
3203 EXPORT_SYMBOL(dsi_vc_generic_write_1);
3205 int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
3206 u8 param1, u8 param2)
3211 return dsi_vc_generic_write(dssdev, channel, buf, 2);
3213 EXPORT_SYMBOL(dsi_vc_generic_write_2);
3215 static int dsi_vc_dcs_send_read_request(struct omap_dss_device *dssdev,
3216 int channel, u8 dcs_cmd)
3218 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3219 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3222 if (dsi->debug_read)
3223 DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
3226 r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
3228 DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
3229 " failed\n", channel, dcs_cmd);
3236 static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
3237 int channel, u8 *reqdata, int reqlen)
3239 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3240 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3245 if (dsi->debug_read)
3246 DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
3250 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
3252 } else if (reqlen == 1) {
3253 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
3255 } else if (reqlen == 2) {
3256 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
3257 data = reqdata[0] | (reqdata[1] << 8);
3262 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
3264 DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
3265 " failed\n", channel, reqlen);
3272 static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3273 u8 *buf, int buflen, enum dss_dsi_content_type type)
3275 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3280 /* RX_FIFO_NOT_EMPTY */
3281 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3282 DSSERR("RX fifo empty when trying to read.\n");
3287 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3288 if (dsi->debug_read)
3289 DSSDBG("\theader: %08x\n", val);
3290 dt = FLD_GET(val, 5, 0);
3291 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
3292 u16 err = FLD_GET(val, 23, 8);
3293 dsi_show_rx_ack_with_err(err);
3297 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3298 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
3299 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
3300 u8 data = FLD_GET(val, 15, 8);
3301 if (dsi->debug_read)
3302 DSSDBG("\t%s short response, 1 byte: %02x\n",
3303 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3314 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3315 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
3316 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
3317 u16 data = FLD_GET(val, 23, 8);
3318 if (dsi->debug_read)
3319 DSSDBG("\t%s short response, 2 byte: %04x\n",
3320 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3328 buf[0] = data & 0xff;
3329 buf[1] = (data >> 8) & 0xff;
3332 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3333 MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
3334 MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
3336 int len = FLD_GET(val, 23, 8);
3337 if (dsi->debug_read)
3338 DSSDBG("\t%s long response, len %d\n",
3339 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3347 /* two byte checksum ends the packet, not included in len */
3348 for (w = 0; w < len + 2;) {
3350 val = dsi_read_reg(dsidev,
3351 DSI_VC_SHORT_PACKET_HEADER(channel));
3352 if (dsi->debug_read)
3353 DSSDBG("\t\t%02x %02x %02x %02x\n",
3357 (val >> 24) & 0xff);
3359 for (b = 0; b < 4; ++b) {
3361 buf[w] = (val >> (b * 8)) & 0xff;
3362 /* we discard the 2 byte checksum */
3369 DSSERR("\tunknown datatype 0x%02x\n", dt);
3376 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3377 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3382 int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3383 u8 *buf, int buflen)
3385 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3388 r = dsi_vc_dcs_send_read_request(dssdev, channel, dcs_cmd);
3392 r = dsi_vc_send_bta_sync(dssdev, channel);
3396 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3397 DSS_DSI_CONTENT_DCS);
3408 DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
3411 EXPORT_SYMBOL(dsi_vc_dcs_read);
3413 static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3414 u8 *reqdata, int reqlen, u8 *buf, int buflen)
3416 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3419 r = dsi_vc_generic_send_read_request(dssdev, channel, reqdata, reqlen);
3423 r = dsi_vc_send_bta_sync(dssdev, channel);
3427 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3428 DSS_DSI_CONTENT_GENERIC);
3440 int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
3445 r = dsi_vc_generic_read(dssdev, channel, NULL, 0, buf, buflen);
3447 DSSERR("dsi_vc_generic_read_0(ch %d) failed\n", channel);
3453 EXPORT_SYMBOL(dsi_vc_generic_read_0);
3455 int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
3456 u8 *buf, int buflen)
3460 r = dsi_vc_generic_read(dssdev, channel, ¶m, 1, buf, buflen);
3462 DSSERR("dsi_vc_generic_read_1(ch %d) failed\n", channel);
3468 EXPORT_SYMBOL(dsi_vc_generic_read_1);
3470 int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
3471 u8 param1, u8 param2, u8 *buf, int buflen)
3476 reqdata[0] = param1;
3477 reqdata[1] = param2;
3479 r = dsi_vc_generic_read(dssdev, channel, reqdata, 2, buf, buflen);
3481 DSSERR("dsi_vc_generic_read_2(ch %d) failed\n", channel);
3487 EXPORT_SYMBOL(dsi_vc_generic_read_2);
3489 int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3492 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3494 return dsi_vc_send_short(dsidev, channel,
3495 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
3497 EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3499 static int dsi_enter_ulps(struct platform_device *dsidev)
3501 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3502 DECLARE_COMPLETION_ONSTACK(completion);
3507 WARN_ON(!dsi_bus_is_locked(dsidev));
3509 WARN_ON(dsi->ulps_enabled);
3511 if (dsi->ulps_enabled)
3514 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3515 DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3519 dsi_sync_vc(dsidev, 0);
3520 dsi_sync_vc(dsidev, 1);
3521 dsi_sync_vc(dsidev, 2);
3522 dsi_sync_vc(dsidev, 3);
3524 dsi_force_tx_stop_mode_io(dsidev);
3526 dsi_vc_enable(dsidev, 0, false);
3527 dsi_vc_enable(dsidev, 1, false);
3528 dsi_vc_enable(dsidev, 2, false);
3529 dsi_vc_enable(dsidev, 3, false);
3531 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3532 DSSERR("HS busy when enabling ULPS\n");
3536 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3537 DSSERR("LP busy when enabling ULPS\n");
3541 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3542 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3546 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3547 /* LANEx_ULPS_SIG2 */
3548 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3551 if (wait_for_completion_timeout(&completion,
3552 msecs_to_jiffies(1000)) == 0) {
3553 DSSERR("ULPS enable timeout\n");
3558 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3559 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3561 /* Reset LANEx_ULPS_SIG2 */
3562 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
3565 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3567 dsi_if_enable(dsidev, false);
3569 dsi->ulps_enabled = true;
3574 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3575 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3579 static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3580 unsigned ticks, bool x4, bool x16)
3583 unsigned long total_ticks;
3586 BUG_ON(ticks > 0x1fff);
3588 /* ticks in DSI_FCK */
3589 fck = dsi_fclk_rate(dsidev);
3591 r = dsi_read_reg(dsidev, DSI_TIMING2);
3592 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3593 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3594 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3595 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3596 dsi_write_reg(dsidev, DSI_TIMING2, r);
3598 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3600 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3602 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3603 (total_ticks * 1000) / (fck / 1000 / 1000));
3606 static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3610 unsigned long total_ticks;
3613 BUG_ON(ticks > 0x1fff);
3615 /* ticks in DSI_FCK */
3616 fck = dsi_fclk_rate(dsidev);
3618 r = dsi_read_reg(dsidev, DSI_TIMING1);
3619 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3620 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3621 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3622 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3623 dsi_write_reg(dsidev, DSI_TIMING1, r);
3625 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3627 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3629 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3630 (total_ticks * 1000) / (fck / 1000 / 1000));
3633 static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3634 unsigned ticks, bool x4, bool x16)
3637 unsigned long total_ticks;
3640 BUG_ON(ticks > 0x1fff);
3642 /* ticks in DSI_FCK */
3643 fck = dsi_fclk_rate(dsidev);
3645 r = dsi_read_reg(dsidev, DSI_TIMING1);
3646 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3647 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3648 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3649 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3650 dsi_write_reg(dsidev, DSI_TIMING1, r);
3652 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3654 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3656 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3657 (total_ticks * 1000) / (fck / 1000 / 1000));
3660 static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3661 unsigned ticks, bool x4, bool x16)
3664 unsigned long total_ticks;
3667 BUG_ON(ticks > 0x1fff);
3669 /* ticks in TxByteClkHS */
3670 fck = dsi_get_txbyteclkhs(dsidev);
3672 r = dsi_read_reg(dsidev, DSI_TIMING2);
3673 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3674 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3675 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3676 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3677 dsi_write_reg(dsidev, DSI_TIMING2, r);
3679 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3681 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3683 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3684 (total_ticks * 1000) / (fck / 1000 / 1000));
3686 static int dsi_proto_config(struct omap_dss_device *dssdev)
3688 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3692 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3697 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3702 /* XXX what values for the timeouts? */
3703 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
3704 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
3705 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
3706 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
3708 switch (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)) {
3722 r = dsi_read_reg(dsidev, DSI_CTRL);
3723 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
3724 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
3725 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
3726 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
3727 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
3728 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
3729 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
3730 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
3731 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
3732 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
3733 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3734 /* DCS_CMD_CODE, 1=start, 0=continue */
3735 r = FLD_MOD(r, 0, 25, 25);
3738 dsi_write_reg(dsidev, DSI_CTRL, r);
3740 dsi_vc_initial_config(dsidev, 0);
3741 dsi_vc_initial_config(dsidev, 1);
3742 dsi_vc_initial_config(dsidev, 2);
3743 dsi_vc_initial_config(dsidev, 3);
3748 static void dsi_proto_timings(struct omap_dss_device *dssdev)
3750 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3751 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
3752 unsigned tclk_pre, tclk_post;
3753 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
3754 unsigned ths_trail, ths_exit;
3755 unsigned ddr_clk_pre, ddr_clk_post;
3756 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
3760 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3761 ths_prepare = FLD_GET(r, 31, 24);
3762 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
3763 ths_zero = ths_prepare_ths_zero - ths_prepare;
3764 ths_trail = FLD_GET(r, 15, 8);
3765 ths_exit = FLD_GET(r, 7, 0);
3767 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3768 tlpx = FLD_GET(r, 22, 16) * 2;
3769 tclk_trail = FLD_GET(r, 15, 8);
3770 tclk_zero = FLD_GET(r, 7, 0);
3772 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
3773 tclk_prepare = FLD_GET(r, 7, 0);
3777 /* min 60ns + 52*UI */
3778 tclk_post = ns2ddr(dsidev, 60) + 26;
3780 ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
3782 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
3784 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
3786 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
3787 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
3789 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3790 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
3791 r = FLD_MOD(r, ddr_clk_post, 7, 0);
3792 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
3794 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
3798 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
3799 DIV_ROUND_UP(ths_prepare, 4) +
3800 DIV_ROUND_UP(ths_zero + 3, 4);
3802 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
3804 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
3805 FLD_VAL(exit_hs_mode_lat, 15, 0);
3806 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
3808 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
3809 enter_hs_mode_lat, exit_hs_mode_lat);
3812 static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3813 u16 x, u16 y, u16 w, u16 h)
3815 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3816 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3821 unsigned packet_payload;
3822 unsigned packet_len;
3825 const unsigned channel = dsi->update_channel;
3826 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3828 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3831 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
3833 bytespp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
3834 bytespl = w * bytespp;
3835 bytespf = bytespl * h;
3837 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
3838 * number of lines in a packet. See errata about VP_CLK_RATIO */
3840 if (bytespf < line_buf_size)
3841 packet_payload = bytespf;
3843 packet_payload = (line_buf_size) / bytespl * bytespl;
3845 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
3846 total_len = (bytespf / packet_payload) * packet_len;
3848 if (bytespf % packet_payload)
3849 total_len += (bytespf % packet_payload) + 1;
3851 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3852 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3854 dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
3857 if (dsi->te_enabled)
3858 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3860 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3861 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3863 /* We put SIDLEMODE to no-idle for the duration of the transfer,
3864 * because DSS interrupts are not capable of waking up the CPU and the
3865 * framedone interrupt could be delayed for quite a long time. I think
3866 * the same goes for any DSS interrupts, but for some reason I have not
3867 * seen the problem anywhere else than here.
3869 dispc_disable_sidle();
3871 dsi_perf_mark_start(dsidev);
3873 r = schedule_delayed_work(&dsi->framedone_timeout_work,
3874 msecs_to_jiffies(250));
3877 dss_start_update(dssdev);
3879 if (dsi->te_enabled) {
3880 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3881 * for TE is longer than the timer allows */
3882 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3884 dsi_vc_send_bta(dsidev, channel);
3886 #ifdef DSI_CATCH_MISSING_TE
3887 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3892 #ifdef DSI_CATCH_MISSING_TE
3893 static void dsi_te_timeout(unsigned long arg)
3895 DSSERR("TE not received for 250ms!\n");
3899 static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3901 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3903 /* SIDLEMODE back to smart-idle */
3904 dispc_enable_sidle();
3906 if (dsi->te_enabled) {
3907 /* enable LP_RX_TO again after the TE */
3908 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3911 dsi->framedone_callback(error, dsi->framedone_data);
3914 dsi_perf_show(dsidev, "DISPC");
3917 static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3919 struct dsi_data *dsi = container_of(work, struct dsi_data,
3920 framedone_timeout_work.work);
3921 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3922 * 250ms which would conflict with this timeout work. What should be
3923 * done is first cancel the transfer on the HW, and then cancel the
3924 * possibly scheduled framedone work. However, cancelling the transfer
3925 * on the HW is buggy, and would probably require resetting the whole
3928 DSSERR("Framedone not received for 250ms!\n");
3930 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3933 static void dsi_framedone_irq_callback(void *data, u32 mask)
3935 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3936 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3937 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3939 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3940 * turns itself off. However, DSI still has the pixels in its buffers,
3941 * and is sending the data.
3944 __cancel_delayed_work(&dsi->framedone_timeout_work);
3946 dsi_handle_framedone(dsidev, 0);
3948 #ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3949 dispc_fake_vsync_irq();
3953 int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3954 u16 *x, u16 *y, u16 *w, u16 *h,
3955 bool enlarge_update_area)
3957 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3960 dssdev->driver->get_resolution(dssdev, &dw, &dh);
3962 if (*x > dw || *y > dh)
3974 if (*w == 0 || *h == 0)
3977 dsi_perf_mark_setup(dsidev);
3979 dss_setup_partial_planes(dssdev, x, y, w, h,
3980 enlarge_update_area);
3981 dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
3985 EXPORT_SYMBOL(omap_dsi_prepare_update);
3987 int omap_dsi_update(struct omap_dss_device *dssdev,
3989 u16 x, u16 y, u16 w, u16 h,
3990 void (*callback)(int, void *), void *data)
3992 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3993 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3995 dsi->update_channel = channel;
3997 /* OMAP DSS cannot send updates of odd widths.
3998 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
3999 * here to make sure we catch erroneous updates. Otherwise we'll only
4000 * see rather obscure HW error happening, as DSS halts. */
4003 dsi->framedone_callback = callback;
4004 dsi->framedone_data = data;
4006 dsi->update_region.x = x;
4007 dsi->update_region.y = y;
4008 dsi->update_region.w = w;
4009 dsi->update_region.h = h;
4010 dsi->update_region.device = dssdev;
4012 dsi_update_screen_dispc(dssdev, x, y, w, h);
4016 EXPORT_SYMBOL(omap_dsi_update);
4020 static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4025 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4026 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4028 r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
4031 DSSERR("can't get FRAMEDONE irq\n");
4035 dispc_mgr_set_lcd_display_type(dssdev->manager->id,
4036 OMAP_DSS_LCD_DISPLAY_TFT);
4038 dispc_mgr_enable_stallmode(dssdev->manager->id, true);
4039 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
4041 dispc_mgr_set_tft_data_lines(dssdev->manager->id,
4042 dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt));
4045 struct omap_video_timings timings = {
4054 dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
4060 static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
4064 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4065 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4067 omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
4071 static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4073 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4074 struct dsi_clock_info cinfo;
4077 /* we always use DSS_CLK_SYSCK as input clock */
4078 cinfo.use_sys_clk = true;
4079 cinfo.regn = dssdev->clocks.dsi.regn;
4080 cinfo.regm = dssdev->clocks.dsi.regm;
4081 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4082 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4083 r = dsi_calc_clock_rates(dssdev, &cinfo);
4085 DSSERR("Failed to calc dsi clocks\n");
4089 r = dsi_pll_set_clock_div(dsidev, &cinfo);
4091 DSSERR("Failed to set dsi clocks\n");
4098 static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4100 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4101 struct dispc_clock_info dispc_cinfo;
4103 unsigned long long fck;
4105 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4107 dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
4108 dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
4110 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4112 DSSERR("Failed to calc dispc clocks\n");
4116 r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
4118 DSSERR("Failed to set dispc clocks\n");
4125 static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4127 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4128 int dsi_module = dsi_get_dsidev_id(dsidev);
4131 r = dsi_pll_init(dsidev, true, true);
4135 r = dsi_configure_dsi_clocks(dssdev);
4139 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4140 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4141 dss_select_lcd_clk_source(dssdev->manager->id,
4142 dssdev->clocks.dispc.channel.lcd_clk_src);
4146 r = dsi_configure_dispc_clocks(dssdev);
4150 r = dsi_cio_init(dssdev);
4154 _dsi_print_reset_status(dsidev);
4156 dsi_proto_timings(dssdev);
4157 dsi_set_lp_clk_divisor(dssdev);
4160 _dsi_print_reset_status(dsidev);
4162 r = dsi_proto_config(dssdev);
4166 /* enable interface */
4167 dsi_vc_enable(dsidev, 0, 1);
4168 dsi_vc_enable(dsidev, 1, 1);
4169 dsi_vc_enable(dsidev, 2, 1);
4170 dsi_vc_enable(dsidev, 3, 1);
4171 dsi_if_enable(dsidev, 1);
4172 dsi_force_tx_stop_mode_io(dsidev);
4176 dsi_cio_uninit(dssdev);
4178 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4179 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4180 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4183 dsi_pll_uninit(dsidev, true);
4188 static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4189 bool disconnect_lanes, bool enter_ulps)
4191 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4192 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4193 int dsi_module = dsi_get_dsidev_id(dsidev);
4195 if (enter_ulps && !dsi->ulps_enabled)
4196 dsi_enter_ulps(dsidev);
4198 /* disable interface */
4199 dsi_if_enable(dsidev, 0);
4200 dsi_vc_enable(dsidev, 0, 0);
4201 dsi_vc_enable(dsidev, 1, 0);
4202 dsi_vc_enable(dsidev, 2, 0);
4203 dsi_vc_enable(dsidev, 3, 0);
4205 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4206 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4207 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4208 dsi_cio_uninit(dssdev);
4209 dsi_pll_uninit(dsidev, disconnect_lanes);
4212 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4214 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4215 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4218 DSSDBG("dsi_display_enable\n");
4220 WARN_ON(!dsi_bus_is_locked(dsidev));
4222 mutex_lock(&dsi->lock);
4224 if (dssdev->manager == NULL) {
4225 DSSERR("failed to enable display: no manager\n");
4230 r = omap_dss_start_device(dssdev);
4232 DSSERR("failed to start device\n");
4236 r = dsi_runtime_get(dsidev);
4240 dsi_enable_pll_clock(dsidev, 1);
4242 _dsi_initialize_irq(dsidev);
4244 r = dsi_display_init_dispc(dssdev);
4246 goto err_init_dispc;
4248 r = dsi_display_init_dsi(dssdev);
4252 mutex_unlock(&dsi->lock);
4257 dsi_display_uninit_dispc(dssdev);
4259 dsi_enable_pll_clock(dsidev, 0);
4260 dsi_runtime_put(dsidev);
4262 omap_dss_stop_device(dssdev);
4264 mutex_unlock(&dsi->lock);
4265 DSSDBG("dsi_display_enable FAILED\n");
4268 EXPORT_SYMBOL(omapdss_dsi_display_enable);
4270 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4271 bool disconnect_lanes, bool enter_ulps)
4273 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4274 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4276 DSSDBG("dsi_display_disable\n");
4278 WARN_ON(!dsi_bus_is_locked(dsidev));
4280 mutex_lock(&dsi->lock);
4282 dsi_sync_vc(dsidev, 0);
4283 dsi_sync_vc(dsidev, 1);
4284 dsi_sync_vc(dsidev, 2);
4285 dsi_sync_vc(dsidev, 3);
4287 dsi_display_uninit_dispc(dssdev);
4289 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4291 dsi_runtime_put(dsidev);
4292 dsi_enable_pll_clock(dsidev, 0);
4294 omap_dss_stop_device(dssdev);
4296 mutex_unlock(&dsi->lock);
4298 EXPORT_SYMBOL(omapdss_dsi_display_disable);
4300 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4302 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4303 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4305 dsi->te_enabled = enable;
4308 EXPORT_SYMBOL(omapdss_dsi_enable_te);
4310 void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4311 u32 fifo_size, u32 burst_size,
4312 u32 *fifo_low, u32 *fifo_high)
4314 *fifo_high = fifo_size - burst_size;
4315 *fifo_low = fifo_size - burst_size * 2;
4318 int dsi_init_display(struct omap_dss_device *dssdev)
4320 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4321 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4322 int dsi_module = dsi_get_dsidev_id(dsidev);
4324 DSSDBG("DSI init\n");
4326 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
4327 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
4328 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
4331 if (dsi->vdds_dsi_reg == NULL) {
4332 struct regulator *vdds_dsi;
4334 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4336 if (IS_ERR(vdds_dsi)) {
4337 DSSERR("can't get VDDS_DSI regulator\n");
4338 return PTR_ERR(vdds_dsi);
4341 dsi->vdds_dsi_reg = vdds_dsi;
4344 if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
4345 DSSERR("DSI%d can't support more than %d data lanes\n",
4346 dsi_module + 1, dsi->num_data_lanes);
4353 int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4355 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4356 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4359 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4360 if (!dsi->vc[i].dssdev) {
4361 dsi->vc[i].dssdev = dssdev;
4367 DSSERR("cannot get VC for display %s", dssdev->name);
4370 EXPORT_SYMBOL(omap_dsi_request_vc);
4372 int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
4374 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4375 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4377 if (vc_id < 0 || vc_id > 3) {
4378 DSSERR("VC ID out of range\n");
4382 if (channel < 0 || channel > 3) {
4383 DSSERR("Virtual Channel out of range\n");
4387 if (dsi->vc[channel].dssdev != dssdev) {
4388 DSSERR("Virtual Channel not allocated to display %s\n",
4393 dsi->vc[channel].vc_id = vc_id;
4397 EXPORT_SYMBOL(omap_dsi_set_vc_id);
4399 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
4401 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4402 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4404 if ((channel >= 0 && channel <= 3) &&
4405 dsi->vc[channel].dssdev == dssdev) {
4406 dsi->vc[channel].dssdev = NULL;
4407 dsi->vc[channel].vc_id = 0;
4410 EXPORT_SYMBOL(omap_dsi_release_vc);
4412 void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
4414 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
4415 DSSERR("%s (%s) not active\n",
4416 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
4417 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
4420 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
4422 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
4423 DSSERR("%s (%s) not active\n",
4424 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
4425 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
4428 static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4430 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4432 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
4433 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
4434 dsi->regm_dispc_max =
4435 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
4436 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4437 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4438 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4439 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4442 static int dsi_get_clocks(struct platform_device *dsidev)
4444 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4447 clk = clk_get(&dsidev->dev, "fck");
4449 DSSERR("can't get fck\n");
4450 return PTR_ERR(clk);
4455 clk = clk_get(&dsidev->dev, "sys_clk");
4457 DSSERR("can't get sys_clk\n");
4458 clk_put(dsi->dss_clk);
4459 dsi->dss_clk = NULL;
4460 return PTR_ERR(clk);
4468 static void dsi_put_clocks(struct platform_device *dsidev)
4470 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4473 clk_put(dsi->dss_clk);
4475 clk_put(dsi->sys_clk);
4478 /* DSI1 HW IP initialisation */
4479 static int omap_dsihw_probe(struct platform_device *dsidev)
4481 struct omap_display_platform_data *dss_plat_data;
4482 struct omap_dss_board_info *board_info;
4484 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
4485 struct resource *dsi_mem;
4486 struct dsi_data *dsi;
4488 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4495 dsi_pdev_map[dsi_module] = dsidev;
4496 dev_set_drvdata(&dsidev->dev, dsi);
4498 dss_plat_data = dsidev->dev.platform_data;
4499 board_info = dss_plat_data->board_data;
4500 dsi->enable_pads = board_info->dsi_enable_pads;
4501 dsi->disable_pads = board_info->dsi_disable_pads;
4503 spin_lock_init(&dsi->irq_lock);
4504 spin_lock_init(&dsi->errors_lock);
4507 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4508 spin_lock_init(&dsi->irq_stats_lock);
4509 dsi->irq_stats.last_reset = jiffies;
4512 mutex_init(&dsi->lock);
4513 sema_init(&dsi->bus_lock, 1);
4515 r = dsi_get_clocks(dsidev);
4519 pm_runtime_enable(&dsidev->dev);
4521 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4522 dsi_framedone_timeout_work_callback);
4524 #ifdef DSI_CATCH_MISSING_TE
4525 init_timer(&dsi->te_timer);
4526 dsi->te_timer.function = dsi_te_timeout;
4527 dsi->te_timer.data = 0;
4529 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
4531 DSSERR("can't get IORESOURCE_MEM DSI\n");
4535 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4537 DSSERR("can't ioremap DSI\n");
4541 dsi->irq = platform_get_irq(dsi->pdev, 0);
4543 DSSERR("platform_get_irq failed\n");
4548 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4549 dev_name(&dsidev->dev), dsi->pdev);
4551 DSSERR("request_irq failed\n");
4555 /* DSI VCs initialization */
4556 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4557 dsi->vc[i].source = DSI_VC_SOURCE_L4;
4558 dsi->vc[i].dssdev = NULL;
4559 dsi->vc[i].vc_id = 0;
4562 dsi_calc_clock_param_ranges(dsidev);
4564 r = dsi_runtime_get(dsidev);
4568 rev = dsi_read_reg(dsidev, DSI_REVISION);
4569 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
4570 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
4572 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4574 dsi_runtime_put(dsidev);
4579 free_irq(dsi->irq, dsi->pdev);
4583 pm_runtime_disable(&dsidev->dev);
4590 static int omap_dsihw_remove(struct platform_device *dsidev)
4592 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4594 WARN_ON(dsi->scp_clk_refcount > 0);
4596 pm_runtime_disable(&dsidev->dev);
4598 dsi_put_clocks(dsidev);
4600 if (dsi->vdds_dsi_reg != NULL) {
4601 if (dsi->vdds_dsi_enabled) {
4602 regulator_disable(dsi->vdds_dsi_reg);
4603 dsi->vdds_dsi_enabled = false;
4606 regulator_put(dsi->vdds_dsi_reg);
4607 dsi->vdds_dsi_reg = NULL;
4610 free_irq(dsi->irq, dsi->pdev);
4618 static int dsi_runtime_suspend(struct device *dev)
4620 dispc_runtime_put();
4626 static int dsi_runtime_resume(struct device *dev)
4630 r = dss_runtime_get();
4634 r = dispc_runtime_get();
4646 static const struct dev_pm_ops dsi_pm_ops = {
4647 .runtime_suspend = dsi_runtime_suspend,
4648 .runtime_resume = dsi_runtime_resume,
4651 static struct platform_driver omap_dsihw_driver = {
4652 .probe = omap_dsihw_probe,
4653 .remove = omap_dsihw_remove,
4655 .name = "omapdss_dsi",
4656 .owner = THIS_MODULE,
4661 int dsi_init_platform_driver(void)
4663 return platform_driver_register(&omap_dsihw_driver);
4666 void dsi_uninit_platform_driver(void)
4668 return platform_driver_unregister(&omap_dsihw_driver);