]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/tty/serial/imx.c
serial: imx: set up aging timer interrupt as DMA trigger
[linux-beck.git] / drivers / tty / serial / imx.c
1 /*
2  * Driver for Motorola/Freescale IMX serial ports
3  *
4  * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5  *
6  * Author: Sascha Hauer <sascha@saschahauer.de>
7  * Copyright (C) 2004 Pengutronix
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ
22 #endif
23
24 #include <linux/module.h>
25 #include <linux/ioport.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/sysrq.h>
29 #include <linux/platform_device.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial_core.h>
33 #include <linux/serial.h>
34 #include <linux/clk.h>
35 #include <linux/delay.h>
36 #include <linux/rational.h>
37 #include <linux/slab.h>
38 #include <linux/of.h>
39 #include <linux/of_device.h>
40 #include <linux/io.h>
41 #include <linux/dma-mapping.h>
42
43 #include <asm/irq.h>
44 #include <linux/platform_data/serial-imx.h>
45 #include <linux/platform_data/dma-imx.h>
46
47 /* Register definitions */
48 #define URXD0 0x0  /* Receiver Register */
49 #define URTX0 0x40 /* Transmitter Register */
50 #define UCR1  0x80 /* Control Register 1 */
51 #define UCR2  0x84 /* Control Register 2 */
52 #define UCR3  0x88 /* Control Register 3 */
53 #define UCR4  0x8c /* Control Register 4 */
54 #define UFCR  0x90 /* FIFO Control Register */
55 #define USR1  0x94 /* Status Register 1 */
56 #define USR2  0x98 /* Status Register 2 */
57 #define UESC  0x9c /* Escape Character Register */
58 #define UTIM  0xa0 /* Escape Timer Register */
59 #define UBIR  0xa4 /* BRM Incremental Register */
60 #define UBMR  0xa8 /* BRM Modulator Register */
61 #define UBRC  0xac /* Baud Rate Count Register */
62 #define IMX21_ONEMS 0xb0 /* One Millisecond register */
63 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
64 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
65
66 /* UART Control Register Bit Fields.*/
67 #define URXD_DUMMY_READ (1<<16)
68 #define URXD_CHARRDY    (1<<15)
69 #define URXD_ERR        (1<<14)
70 #define URXD_OVRRUN     (1<<13)
71 #define URXD_FRMERR     (1<<12)
72 #define URXD_BRK        (1<<11)
73 #define URXD_PRERR      (1<<10)
74 #define URXD_RX_DATA    (0xFF<<0)
75 #define UCR1_ADEN       (1<<15) /* Auto detect interrupt */
76 #define UCR1_ADBR       (1<<14) /* Auto detect baud rate */
77 #define UCR1_TRDYEN     (1<<13) /* Transmitter ready interrupt enable */
78 #define UCR1_IDEN       (1<<12) /* Idle condition interrupt */
79 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
80 #define UCR1_RRDYEN     (1<<9)  /* Recv ready interrupt enable */
81 #define UCR1_RDMAEN     (1<<8)  /* Recv ready DMA enable */
82 #define UCR1_IREN       (1<<7)  /* Infrared interface enable */
83 #define UCR1_TXMPTYEN   (1<<6)  /* Transimitter empty interrupt enable */
84 #define UCR1_RTSDEN     (1<<5)  /* RTS delta interrupt enable */
85 #define UCR1_SNDBRK     (1<<4)  /* Send break */
86 #define UCR1_TDMAEN     (1<<3)  /* Transmitter ready DMA enable */
87 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
88 #define UCR1_ATDMAEN    (1<<2)  /* Aging DMA Timer Enable */
89 #define UCR1_DOZE       (1<<1)  /* Doze */
90 #define UCR1_UARTEN     (1<<0)  /* UART enabled */
91 #define UCR2_ESCI       (1<<15) /* Escape seq interrupt enable */
92 #define UCR2_IRTS       (1<<14) /* Ignore RTS pin */
93 #define UCR2_CTSC       (1<<13) /* CTS pin control */
94 #define UCR2_CTS        (1<<12) /* Clear to send */
95 #define UCR2_ESCEN      (1<<11) /* Escape enable */
96 #define UCR2_PREN       (1<<8)  /* Parity enable */
97 #define UCR2_PROE       (1<<7)  /* Parity odd/even */
98 #define UCR2_STPB       (1<<6)  /* Stop */
99 #define UCR2_WS         (1<<5)  /* Word size */
100 #define UCR2_RTSEN      (1<<4)  /* Request to send interrupt enable */
101 #define UCR2_ATEN       (1<<3)  /* Aging Timer Enable */
102 #define UCR2_TXEN       (1<<2)  /* Transmitter enabled */
103 #define UCR2_RXEN       (1<<1)  /* Receiver enabled */
104 #define UCR2_SRST       (1<<0)  /* SW reset */
105 #define UCR3_DTREN      (1<<13) /* DTR interrupt enable */
106 #define UCR3_PARERREN   (1<<12) /* Parity enable */
107 #define UCR3_FRAERREN   (1<<11) /* Frame error interrupt enable */
108 #define UCR3_DSR        (1<<10) /* Data set ready */
109 #define UCR3_DCD        (1<<9)  /* Data carrier detect */
110 #define UCR3_RI         (1<<8)  /* Ring indicator */
111 #define UCR3_ADNIMP     (1<<7)  /* Autobaud Detection Not Improved */
112 #define UCR3_RXDSEN     (1<<6)  /* Receive status interrupt enable */
113 #define UCR3_AIRINTEN   (1<<5)  /* Async IR wake interrupt enable */
114 #define UCR3_AWAKEN     (1<<4)  /* Async wake interrupt enable */
115 #define IMX21_UCR3_RXDMUXSEL    (1<<2)  /* RXD Muxed Input Select */
116 #define UCR3_INVT       (1<<1)  /* Inverted Infrared transmission */
117 #define UCR3_BPEN       (1<<0)  /* Preset registers enable */
118 #define UCR4_CTSTL_SHF  10      /* CTS trigger level shift */
119 #define UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
120 #define UCR4_INVR       (1<<9)  /* Inverted infrared reception */
121 #define UCR4_ENIRI      (1<<8)  /* Serial infrared interrupt enable */
122 #define UCR4_WKEN       (1<<7)  /* Wake interrupt enable */
123 #define UCR4_REF16      (1<<6)  /* Ref freq 16 MHz */
124 #define UCR4_IDDMAEN    (1<<6)  /* DMA IDLE Condition Detected */
125 #define UCR4_IRSC       (1<<5)  /* IR special case */
126 #define UCR4_TCEN       (1<<3)  /* Transmit complete interrupt enable */
127 #define UCR4_BKEN       (1<<2)  /* Break condition interrupt enable */
128 #define UCR4_OREN       (1<<1)  /* Receiver overrun interrupt enable */
129 #define UCR4_DREN       (1<<0)  /* Recv data ready interrupt enable */
130 #define UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
131 #define UFCR_DCEDTE     (1<<6)  /* DCE/DTE mode select */
132 #define UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
133 #define UFCR_RFDIV_REG(x)       (((x) < 7 ? 6 - (x) : 6) << 7)
134 #define UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
135 #define USR1_PARITYERR  (1<<15) /* Parity error interrupt flag */
136 #define USR1_RTSS       (1<<14) /* RTS pin status */
137 #define USR1_TRDY       (1<<13) /* Transmitter ready interrupt/dma flag */
138 #define USR1_RTSD       (1<<12) /* RTS delta */
139 #define USR1_ESCF       (1<<11) /* Escape seq interrupt flag */
140 #define USR1_FRAMERR    (1<<10) /* Frame error interrupt flag */
141 #define USR1_RRDY       (1<<9)   /* Receiver ready interrupt/dma flag */
142 #define USR1_AGTIM      (1<<8)   /* Ageing timer interrupt flag */
143 #define USR1_TIMEOUT    (1<<7)   /* Receive timeout interrupt status */
144 #define USR1_RXDS        (1<<6)  /* Receiver idle interrupt flag */
145 #define USR1_AIRINT      (1<<5)  /* Async IR wake interrupt flag */
146 #define USR1_AWAKE       (1<<4)  /* Aysnc wake interrupt flag */
147 #define USR2_ADET        (1<<15) /* Auto baud rate detect complete */
148 #define USR2_TXFE        (1<<14) /* Transmit buffer FIFO empty */
149 #define USR2_DTRF        (1<<13) /* DTR edge interrupt flag */
150 #define USR2_IDLE        (1<<12) /* Idle condition */
151 #define USR2_IRINT       (1<<8)  /* Serial infrared interrupt flag */
152 #define USR2_WAKE        (1<<7)  /* Wake */
153 #define USR2_RTSF        (1<<4)  /* RTS edge interrupt flag */
154 #define USR2_TXDC        (1<<3)  /* Transmitter complete */
155 #define USR2_BRCD        (1<<2)  /* Break condition */
156 #define USR2_ORE        (1<<1)   /* Overrun error */
157 #define USR2_RDR        (1<<0)   /* Recv data ready */
158 #define UTS_FRCPERR     (1<<13) /* Force parity error */
159 #define UTS_LOOP        (1<<12)  /* Loop tx and rx */
160 #define UTS_TXEMPTY      (1<<6)  /* TxFIFO empty */
161 #define UTS_RXEMPTY      (1<<5)  /* RxFIFO empty */
162 #define UTS_TXFULL       (1<<4)  /* TxFIFO full */
163 #define UTS_RXFULL       (1<<3)  /* RxFIFO full */
164 #define UTS_SOFTRST      (1<<0)  /* Software reset */
165
166 /* We've been assigned a range on the "Low-density serial ports" major */
167 #define SERIAL_IMX_MAJOR        207
168 #define MINOR_START             16
169 #define DEV_NAME                "ttymxc"
170
171 /*
172  * This determines how often we check the modem status signals
173  * for any change.  They generally aren't connected to an IRQ
174  * so we have to poll them.  We also check immediately before
175  * filling the TX fifo incase CTS has been dropped.
176  */
177 #define MCTRL_TIMEOUT   (250*HZ/1000)
178
179 #define DRIVER_NAME "IMX-uart"
180
181 #define UART_NR 8
182
183 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
184 enum imx_uart_type {
185         IMX1_UART,
186         IMX21_UART,
187         IMX6Q_UART,
188 };
189
190 /* device type dependent stuff */
191 struct imx_uart_data {
192         unsigned uts_reg;
193         enum imx_uart_type devtype;
194 };
195
196 struct imx_port {
197         struct uart_port        port;
198         struct timer_list       timer;
199         unsigned int            old_status;
200         unsigned int            have_rtscts:1;
201         unsigned int            dte_mode:1;
202         unsigned int            irda_inv_rx:1;
203         unsigned int            irda_inv_tx:1;
204         unsigned short          trcv_delay; /* transceiver delay */
205         struct clk              *clk_ipg;
206         struct clk              *clk_per;
207         const struct imx_uart_data *devdata;
208
209         /* DMA fields */
210         unsigned int            dma_is_inited:1;
211         unsigned int            dma_is_enabled:1;
212         unsigned int            dma_is_rxing:1;
213         unsigned int            dma_is_txing:1;
214         struct dma_chan         *dma_chan_rx, *dma_chan_tx;
215         struct scatterlist      rx_sgl, tx_sgl[2];
216         void                    *rx_buf;
217         unsigned int            tx_bytes;
218         unsigned int            dma_tx_nents;
219         wait_queue_head_t       dma_wait;
220         unsigned int            saved_reg[10];
221         bool                    context_saved;
222 };
223
224 struct imx_port_ucrs {
225         unsigned int    ucr1;
226         unsigned int    ucr2;
227         unsigned int    ucr3;
228 };
229
230 static struct imx_uart_data imx_uart_devdata[] = {
231         [IMX1_UART] = {
232                 .uts_reg = IMX1_UTS,
233                 .devtype = IMX1_UART,
234         },
235         [IMX21_UART] = {
236                 .uts_reg = IMX21_UTS,
237                 .devtype = IMX21_UART,
238         },
239         [IMX6Q_UART] = {
240                 .uts_reg = IMX21_UTS,
241                 .devtype = IMX6Q_UART,
242         },
243 };
244
245 static const struct platform_device_id imx_uart_devtype[] = {
246         {
247                 .name = "imx1-uart",
248                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
249         }, {
250                 .name = "imx21-uart",
251                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
252         }, {
253                 .name = "imx6q-uart",
254                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
255         }, {
256                 /* sentinel */
257         }
258 };
259 MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
260
261 static const struct of_device_id imx_uart_dt_ids[] = {
262         { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
263         { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
264         { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
265         { /* sentinel */ }
266 };
267 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
268
269 static inline unsigned uts_reg(struct imx_port *sport)
270 {
271         return sport->devdata->uts_reg;
272 }
273
274 static inline int is_imx1_uart(struct imx_port *sport)
275 {
276         return sport->devdata->devtype == IMX1_UART;
277 }
278
279 static inline int is_imx21_uart(struct imx_port *sport)
280 {
281         return sport->devdata->devtype == IMX21_UART;
282 }
283
284 static inline int is_imx6q_uart(struct imx_port *sport)
285 {
286         return sport->devdata->devtype == IMX6Q_UART;
287 }
288 /*
289  * Save and restore functions for UCR1, UCR2 and UCR3 registers
290  */
291 #if defined(CONFIG_SERIAL_IMX_CONSOLE)
292 static void imx_port_ucrs_save(struct uart_port *port,
293                                struct imx_port_ucrs *ucr)
294 {
295         /* save control registers */
296         ucr->ucr1 = readl(port->membase + UCR1);
297         ucr->ucr2 = readl(port->membase + UCR2);
298         ucr->ucr3 = readl(port->membase + UCR3);
299 }
300
301 static void imx_port_ucrs_restore(struct uart_port *port,
302                                   struct imx_port_ucrs *ucr)
303 {
304         /* restore control registers */
305         writel(ucr->ucr1, port->membase + UCR1);
306         writel(ucr->ucr2, port->membase + UCR2);
307         writel(ucr->ucr3, port->membase + UCR3);
308 }
309 #endif
310
311 /*
312  * Handle any change of modem status signal since we were last called.
313  */
314 static void imx_mctrl_check(struct imx_port *sport)
315 {
316         unsigned int status, changed;
317
318         status = sport->port.ops->get_mctrl(&sport->port);
319         changed = status ^ sport->old_status;
320
321         if (changed == 0)
322                 return;
323
324         sport->old_status = status;
325
326         if (changed & TIOCM_RI)
327                 sport->port.icount.rng++;
328         if (changed & TIOCM_DSR)
329                 sport->port.icount.dsr++;
330         if (changed & TIOCM_CAR)
331                 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
332         if (changed & TIOCM_CTS)
333                 uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
334
335         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
336 }
337
338 /*
339  * This is our per-port timeout handler, for checking the
340  * modem status signals.
341  */
342 static void imx_timeout(unsigned long data)
343 {
344         struct imx_port *sport = (struct imx_port *)data;
345         unsigned long flags;
346
347         if (sport->port.state) {
348                 spin_lock_irqsave(&sport->port.lock, flags);
349                 imx_mctrl_check(sport);
350                 spin_unlock_irqrestore(&sport->port.lock, flags);
351
352                 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
353         }
354 }
355
356 /*
357  * interrupts disabled on entry
358  */
359 static void imx_stop_tx(struct uart_port *port)
360 {
361         struct imx_port *sport = (struct imx_port *)port;
362         unsigned long temp;
363
364         /*
365          * We are maybe in the SMP context, so if the DMA TX thread is running
366          * on other cpu, we have to wait for it to finish.
367          */
368         if (sport->dma_is_enabled && sport->dma_is_txing)
369                 return;
370
371         temp = readl(port->membase + UCR1);
372         writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1);
373
374         /* in rs485 mode disable transmitter if shifter is empty */
375         if (port->rs485.flags & SER_RS485_ENABLED &&
376             readl(port->membase + USR2) & USR2_TXDC) {
377                 temp = readl(port->membase + UCR2);
378                 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
379                         temp &= ~UCR2_CTS;
380                 else
381                         temp |= UCR2_CTS;
382                 writel(temp, port->membase + UCR2);
383
384                 temp = readl(port->membase + UCR4);
385                 temp &= ~UCR4_TCEN;
386                 writel(temp, port->membase + UCR4);
387         }
388 }
389
390 /*
391  * interrupts disabled on entry
392  */
393 static void imx_stop_rx(struct uart_port *port)
394 {
395         struct imx_port *sport = (struct imx_port *)port;
396         unsigned long temp;
397
398         if (sport->dma_is_enabled && sport->dma_is_rxing) {
399                 if (sport->port.suspended) {
400                         dmaengine_terminate_all(sport->dma_chan_rx);
401                         sport->dma_is_rxing = 0;
402                 } else {
403                         return;
404                 }
405         }
406
407         temp = readl(sport->port.membase + UCR2);
408         writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
409
410         /* disable the `Receiver Ready Interrrupt` */
411         temp = readl(sport->port.membase + UCR1);
412         writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
413 }
414
415 /*
416  * Set the modem control timer to fire immediately.
417  */
418 static void imx_enable_ms(struct uart_port *port)
419 {
420         struct imx_port *sport = (struct imx_port *)port;
421
422         mod_timer(&sport->timer, jiffies);
423 }
424
425 static void imx_dma_tx(struct imx_port *sport);
426 static inline void imx_transmit_buffer(struct imx_port *sport)
427 {
428         struct circ_buf *xmit = &sport->port.state->xmit;
429         unsigned long temp;
430
431         if (sport->port.x_char) {
432                 /* Send next char */
433                 writel(sport->port.x_char, sport->port.membase + URTX0);
434                 sport->port.icount.tx++;
435                 sport->port.x_char = 0;
436                 return;
437         }
438
439         if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
440                 imx_stop_tx(&sport->port);
441                 return;
442         }
443
444         if (sport->dma_is_enabled) {
445                 /*
446                  * We've just sent a X-char Ensure the TX DMA is enabled
447                  * and the TX IRQ is disabled.
448                  **/
449                 temp = readl(sport->port.membase + UCR1);
450                 temp &= ~UCR1_TXMPTYEN;
451                 if (sport->dma_is_txing) {
452                         temp |= UCR1_TDMAEN;
453                         writel(temp, sport->port.membase + UCR1);
454                 } else {
455                         writel(temp, sport->port.membase + UCR1);
456                         imx_dma_tx(sport);
457                 }
458         }
459
460         while (!uart_circ_empty(xmit) &&
461                !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
462                 /* send xmit->buf[xmit->tail]
463                  * out the port here */
464                 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
465                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
466                 sport->port.icount.tx++;
467         }
468
469         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
470                 uart_write_wakeup(&sport->port);
471
472         if (uart_circ_empty(xmit))
473                 imx_stop_tx(&sport->port);
474 }
475
476 static void dma_tx_callback(void *data)
477 {
478         struct imx_port *sport = data;
479         struct scatterlist *sgl = &sport->tx_sgl[0];
480         struct circ_buf *xmit = &sport->port.state->xmit;
481         unsigned long flags;
482         unsigned long temp;
483
484         spin_lock_irqsave(&sport->port.lock, flags);
485
486         dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
487
488         temp = readl(sport->port.membase + UCR1);
489         temp &= ~UCR1_TDMAEN;
490         writel(temp, sport->port.membase + UCR1);
491
492         /* update the stat */
493         xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
494         sport->port.icount.tx += sport->tx_bytes;
495
496         dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
497
498         sport->dma_is_txing = 0;
499
500         spin_unlock_irqrestore(&sport->port.lock, flags);
501
502         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
503                 uart_write_wakeup(&sport->port);
504
505         if (waitqueue_active(&sport->dma_wait)) {
506                 wake_up(&sport->dma_wait);
507                 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
508                 return;
509         }
510
511         spin_lock_irqsave(&sport->port.lock, flags);
512         if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
513                 imx_dma_tx(sport);
514         spin_unlock_irqrestore(&sport->port.lock, flags);
515 }
516
517 static void imx_dma_tx(struct imx_port *sport)
518 {
519         struct circ_buf *xmit = &sport->port.state->xmit;
520         struct scatterlist *sgl = sport->tx_sgl;
521         struct dma_async_tx_descriptor *desc;
522         struct dma_chan *chan = sport->dma_chan_tx;
523         struct device *dev = sport->port.dev;
524         unsigned long temp;
525         int ret;
526
527         if (sport->dma_is_txing)
528                 return;
529
530         sport->tx_bytes = uart_circ_chars_pending(xmit);
531
532         if (xmit->tail < xmit->head) {
533                 sport->dma_tx_nents = 1;
534                 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
535         } else {
536                 sport->dma_tx_nents = 2;
537                 sg_init_table(sgl, 2);
538                 sg_set_buf(sgl, xmit->buf + xmit->tail,
539                                 UART_XMIT_SIZE - xmit->tail);
540                 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
541         }
542
543         ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
544         if (ret == 0) {
545                 dev_err(dev, "DMA mapping error for TX.\n");
546                 return;
547         }
548         desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
549                                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
550         if (!desc) {
551                 dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
552                              DMA_TO_DEVICE);
553                 dev_err(dev, "We cannot prepare for the TX slave dma!\n");
554                 return;
555         }
556         desc->callback = dma_tx_callback;
557         desc->callback_param = sport;
558
559         dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
560                         uart_circ_chars_pending(xmit));
561
562         temp = readl(sport->port.membase + UCR1);
563         temp |= UCR1_TDMAEN;
564         writel(temp, sport->port.membase + UCR1);
565
566         /* fire it */
567         sport->dma_is_txing = 1;
568         dmaengine_submit(desc);
569         dma_async_issue_pending(chan);
570         return;
571 }
572
573 /*
574  * interrupts disabled on entry
575  */
576 static void imx_start_tx(struct uart_port *port)
577 {
578         struct imx_port *sport = (struct imx_port *)port;
579         unsigned long temp;
580
581         if (port->rs485.flags & SER_RS485_ENABLED) {
582                 /* enable transmitter and shifter empty irq */
583                 temp = readl(port->membase + UCR2);
584                 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
585                         temp &= ~UCR2_CTS;
586                 else
587                         temp |= UCR2_CTS;
588                 writel(temp, port->membase + UCR2);
589
590                 temp = readl(port->membase + UCR4);
591                 temp |= UCR4_TCEN;
592                 writel(temp, port->membase + UCR4);
593         }
594
595         if (!sport->dma_is_enabled) {
596                 temp = readl(sport->port.membase + UCR1);
597                 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
598         }
599
600         if (sport->dma_is_enabled) {
601                 if (sport->port.x_char) {
602                         /* We have X-char to send, so enable TX IRQ and
603                          * disable TX DMA to let TX interrupt to send X-char */
604                         temp = readl(sport->port.membase + UCR1);
605                         temp &= ~UCR1_TDMAEN;
606                         temp |= UCR1_TXMPTYEN;
607                         writel(temp, sport->port.membase + UCR1);
608                         return;
609                 }
610
611                 if (!uart_circ_empty(&port->state->xmit) &&
612                     !uart_tx_stopped(port))
613                         imx_dma_tx(sport);
614                 return;
615         }
616 }
617
618 static irqreturn_t imx_rtsint(int irq, void *dev_id)
619 {
620         struct imx_port *sport = dev_id;
621         unsigned int val;
622         unsigned long flags;
623
624         spin_lock_irqsave(&sport->port.lock, flags);
625
626         writel(USR1_RTSD, sport->port.membase + USR1);
627         val = readl(sport->port.membase + USR1) & USR1_RTSS;
628         uart_handle_cts_change(&sport->port, !!val);
629         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
630
631         spin_unlock_irqrestore(&sport->port.lock, flags);
632         return IRQ_HANDLED;
633 }
634
635 static irqreturn_t imx_txint(int irq, void *dev_id)
636 {
637         struct imx_port *sport = dev_id;
638         unsigned long flags;
639
640         spin_lock_irqsave(&sport->port.lock, flags);
641         imx_transmit_buffer(sport);
642         spin_unlock_irqrestore(&sport->port.lock, flags);
643         return IRQ_HANDLED;
644 }
645
646 static irqreturn_t imx_rxint(int irq, void *dev_id)
647 {
648         struct imx_port *sport = dev_id;
649         unsigned int rx, flg, ignored = 0;
650         struct tty_port *port = &sport->port.state->port;
651         unsigned long flags, temp;
652
653         spin_lock_irqsave(&sport->port.lock, flags);
654
655         while (readl(sport->port.membase + USR2) & USR2_RDR) {
656                 flg = TTY_NORMAL;
657                 sport->port.icount.rx++;
658
659                 rx = readl(sport->port.membase + URXD0);
660
661                 temp = readl(sport->port.membase + USR2);
662                 if (temp & USR2_BRCD) {
663                         writel(USR2_BRCD, sport->port.membase + USR2);
664                         if (uart_handle_break(&sport->port))
665                                 continue;
666                 }
667
668                 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
669                         continue;
670
671                 if (unlikely(rx & URXD_ERR)) {
672                         if (rx & URXD_BRK)
673                                 sport->port.icount.brk++;
674                         else if (rx & URXD_PRERR)
675                                 sport->port.icount.parity++;
676                         else if (rx & URXD_FRMERR)
677                                 sport->port.icount.frame++;
678                         if (rx & URXD_OVRRUN)
679                                 sport->port.icount.overrun++;
680
681                         if (rx & sport->port.ignore_status_mask) {
682                                 if (++ignored > 100)
683                                         goto out;
684                                 continue;
685                         }
686
687                         rx &= (sport->port.read_status_mask | 0xFF);
688
689                         if (rx & URXD_BRK)
690                                 flg = TTY_BREAK;
691                         else if (rx & URXD_PRERR)
692                                 flg = TTY_PARITY;
693                         else if (rx & URXD_FRMERR)
694                                 flg = TTY_FRAME;
695                         if (rx & URXD_OVRRUN)
696                                 flg = TTY_OVERRUN;
697
698 #ifdef SUPPORT_SYSRQ
699                         sport->port.sysrq = 0;
700 #endif
701                 }
702
703                 if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
704                         goto out;
705
706                 if (tty_insert_flip_char(port, rx, flg) == 0)
707                         sport->port.icount.buf_overrun++;
708         }
709
710 out:
711         spin_unlock_irqrestore(&sport->port.lock, flags);
712         tty_flip_buffer_push(port);
713         return IRQ_HANDLED;
714 }
715
716 static int start_rx_dma(struct imx_port *sport);
717 /*
718  * If the RXFIFO is filled with some data, and then we
719  * arise a DMA operation to receive them.
720  */
721 static void imx_dma_rxint(struct imx_port *sport)
722 {
723         unsigned long temp;
724         unsigned long flags;
725
726         spin_lock_irqsave(&sport->port.lock, flags);
727
728         temp = readl(sport->port.membase + USR2);
729         if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
730                 sport->dma_is_rxing = 1;
731
732                 /* disable the receiver ready and aging timer interrupts */
733                 temp = readl(sport->port.membase + UCR1);
734                 temp &= ~(UCR1_RRDYEN);
735                 writel(temp, sport->port.membase + UCR1);
736
737                 temp = readl(sport->port.membase + UCR2);
738                 temp &= ~(UCR2_ATEN);
739                 writel(temp, sport->port.membase + UCR2);
740
741                 /* tell the DMA to receive the data. */
742                 start_rx_dma(sport);
743         }
744
745         spin_unlock_irqrestore(&sport->port.lock, flags);
746 }
747
748 static irqreturn_t imx_int(int irq, void *dev_id)
749 {
750         struct imx_port *sport = dev_id;
751         unsigned int sts;
752         unsigned int sts2;
753
754         sts = readl(sport->port.membase + USR1);
755         sts2 = readl(sport->port.membase + USR2);
756
757         if (sts & (USR1_RRDY | USR1_AGTIM)) {
758                 if (sport->dma_is_enabled)
759                         imx_dma_rxint(sport);
760                 else
761                         imx_rxint(irq, dev_id);
762         }
763
764         if ((sts & USR1_TRDY &&
765              readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
766             (sts2 & USR2_TXDC &&
767              readl(sport->port.membase + UCR4) & UCR4_TCEN))
768                 imx_txint(irq, dev_id);
769
770         if (sts & USR1_RTSD)
771                 imx_rtsint(irq, dev_id);
772
773         if (sts & USR1_AWAKE)
774                 writel(USR1_AWAKE, sport->port.membase + USR1);
775
776         if (sts2 & USR2_ORE) {
777                 sport->port.icount.overrun++;
778                 writel(USR2_ORE, sport->port.membase + USR2);
779         }
780
781         return IRQ_HANDLED;
782 }
783
784 /*
785  * Return TIOCSER_TEMT when transmitter is not busy.
786  */
787 static unsigned int imx_tx_empty(struct uart_port *port)
788 {
789         struct imx_port *sport = (struct imx_port *)port;
790         unsigned int ret;
791
792         ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ?  TIOCSER_TEMT : 0;
793
794         /* If the TX DMA is working, return 0. */
795         if (sport->dma_is_enabled && sport->dma_is_txing)
796                 ret = 0;
797
798         return ret;
799 }
800
801 /*
802  * We have a modem side uart, so the meanings of RTS and CTS are inverted.
803  */
804 static unsigned int imx_get_mctrl(struct uart_port *port)
805 {
806         struct imx_port *sport = (struct imx_port *)port;
807         unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
808
809         if (readl(sport->port.membase + USR1) & USR1_RTSS)
810                 tmp |= TIOCM_CTS;
811
812         if (readl(sport->port.membase + UCR2) & UCR2_CTS)
813                 tmp |= TIOCM_RTS;
814
815         if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP)
816                 tmp |= TIOCM_LOOP;
817
818         return tmp;
819 }
820
821 static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
822 {
823         struct imx_port *sport = (struct imx_port *)port;
824         unsigned long temp;
825
826         if (!(port->rs485.flags & SER_RS485_ENABLED)) {
827                 temp = readl(sport->port.membase + UCR2);
828                 temp &= ~(UCR2_CTS | UCR2_CTSC);
829                 if (mctrl & TIOCM_RTS)
830                         temp |= UCR2_CTS | UCR2_CTSC;
831                 writel(temp, sport->port.membase + UCR2);
832         }
833
834         temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
835         if (mctrl & TIOCM_LOOP)
836                 temp |= UTS_LOOP;
837         writel(temp, sport->port.membase + uts_reg(sport));
838 }
839
840 /*
841  * Interrupts always disabled.
842  */
843 static void imx_break_ctl(struct uart_port *port, int break_state)
844 {
845         struct imx_port *sport = (struct imx_port *)port;
846         unsigned long flags, temp;
847
848         spin_lock_irqsave(&sport->port.lock, flags);
849
850         temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
851
852         if (break_state != 0)
853                 temp |= UCR1_SNDBRK;
854
855         writel(temp, sport->port.membase + UCR1);
856
857         spin_unlock_irqrestore(&sport->port.lock, flags);
858 }
859
860 #define RX_BUF_SIZE     (PAGE_SIZE)
861 static void imx_rx_dma_done(struct imx_port *sport)
862 {
863         unsigned long temp;
864         unsigned long flags;
865
866         spin_lock_irqsave(&sport->port.lock, flags);
867
868         /* re-enable interrupts to get notified when new symbols are incoming */
869         temp = readl(sport->port.membase + UCR1);
870         temp |= UCR1_RRDYEN;
871         writel(temp, sport->port.membase + UCR1);
872
873         temp = readl(sport->port.membase + UCR2);
874         temp |= UCR2_ATEN;
875         writel(temp, sport->port.membase + UCR2);
876
877         sport->dma_is_rxing = 0;
878
879         /* Is the shutdown waiting for us? */
880         if (waitqueue_active(&sport->dma_wait))
881                 wake_up(&sport->dma_wait);
882
883         spin_unlock_irqrestore(&sport->port.lock, flags);
884 }
885
886 /*
887  * There are three kinds of RX DMA interrupts(such as in the MX6Q):
888  *   [1] the RX DMA buffer is full.
889  *   [2] the Aging timer expires(wait for 8 bytes long)
890  *   [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
891  *
892  * The [2] is trigger when a character was been sitting in the FIFO
893  * meanwhile [3] can wait for 32 bytes long when the RX line is
894  * on IDLE state and RxFIFO is empty.
895  */
896 static void dma_rx_callback(void *data)
897 {
898         struct imx_port *sport = data;
899         struct dma_chan *chan = sport->dma_chan_rx;
900         struct scatterlist *sgl = &sport->rx_sgl;
901         struct tty_port *port = &sport->port.state->port;
902         struct dma_tx_state state;
903         enum dma_status status;
904         unsigned int count;
905
906         /* unmap it first */
907         dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
908
909         status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
910         count = RX_BUF_SIZE - state.residue;
911
912         if (readl(sport->port.membase + USR2) & USR2_IDLE) {
913                 /* In condition [3] the SDMA counted up too early */
914                 count--;
915
916                 writel(USR2_IDLE, sport->port.membase + USR2);
917         }
918
919         dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
920
921         if (count) {
922                 if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
923                         int bytes = tty_insert_flip_string(port, sport->rx_buf,
924                                         count);
925
926                         if (bytes != count)
927                                 sport->port.icount.buf_overrun++;
928                 }
929                 tty_flip_buffer_push(port);
930
931                 start_rx_dma(sport);
932         } else if (readl(sport->port.membase + USR2) & USR2_RDR) {
933                 /*
934                  * start rx_dma directly once data in RXFIFO, more efficient
935                  * than before:
936                  *      1. call imx_rx_dma_done to stop dma if no data received
937                  *      2. wait next  RDR interrupt to start dma transfer.
938                  */
939                 start_rx_dma(sport);
940         } else {
941                 /*
942                  * stop dma to prevent too many IDLE event trigged if no data
943                  * in RXFIFO
944                  */
945                 imx_rx_dma_done(sport);
946         }
947 }
948
949 static int start_rx_dma(struct imx_port *sport)
950 {
951         struct scatterlist *sgl = &sport->rx_sgl;
952         struct dma_chan *chan = sport->dma_chan_rx;
953         struct device *dev = sport->port.dev;
954         struct dma_async_tx_descriptor *desc;
955         int ret;
956
957         sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
958         ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
959         if (ret == 0) {
960                 dev_err(dev, "DMA mapping error for RX.\n");
961                 return -EINVAL;
962         }
963         desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
964                                         DMA_PREP_INTERRUPT);
965         if (!desc) {
966                 dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
967                 dev_err(dev, "We cannot prepare for the RX slave dma!\n");
968                 return -EINVAL;
969         }
970         desc->callback = dma_rx_callback;
971         desc->callback_param = sport;
972
973         dev_dbg(dev, "RX: prepare for the DMA.\n");
974         dmaengine_submit(desc);
975         dma_async_issue_pending(chan);
976         return 0;
977 }
978
979 #define TXTL_DEFAULT 2 /* reset default */
980 #define RXTL_DEFAULT 1 /* reset default */
981
982 static void imx_setup_ufcr(struct imx_port *sport,
983                           unsigned char txwl, unsigned char rxwl)
984 {
985         unsigned int val;
986
987         /* set receiver / transmitter trigger level */
988         val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
989         val |= txwl << UFCR_TXTL_SHF | rxwl;
990         writel(val, sport->port.membase + UFCR);
991 }
992
993 static void imx_uart_dma_exit(struct imx_port *sport)
994 {
995         if (sport->dma_chan_rx) {
996                 dma_release_channel(sport->dma_chan_rx);
997                 sport->dma_chan_rx = NULL;
998
999                 kfree(sport->rx_buf);
1000                 sport->rx_buf = NULL;
1001         }
1002
1003         if (sport->dma_chan_tx) {
1004                 dma_release_channel(sport->dma_chan_tx);
1005                 sport->dma_chan_tx = NULL;
1006         }
1007
1008         sport->dma_is_inited = 0;
1009 }
1010
1011 static int imx_uart_dma_init(struct imx_port *sport)
1012 {
1013         struct dma_slave_config slave_config = {};
1014         struct device *dev = sport->port.dev;
1015         int ret;
1016
1017         /* Prepare for RX : */
1018         sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
1019         if (!sport->dma_chan_rx) {
1020                 dev_dbg(dev, "cannot get the DMA channel.\n");
1021                 ret = -EINVAL;
1022                 goto err;
1023         }
1024
1025         slave_config.direction = DMA_DEV_TO_MEM;
1026         slave_config.src_addr = sport->port.mapbase + URXD0;
1027         slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1028         slave_config.src_maxburst = RXTL_DEFAULT;
1029         ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
1030         if (ret) {
1031                 dev_err(dev, "error in RX dma configuration.\n");
1032                 goto err;
1033         }
1034
1035         sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1036         if (!sport->rx_buf) {
1037                 ret = -ENOMEM;
1038                 goto err;
1039         }
1040
1041         /* Prepare for TX : */
1042         sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1043         if (!sport->dma_chan_tx) {
1044                 dev_err(dev, "cannot get the TX DMA channel!\n");
1045                 ret = -EINVAL;
1046                 goto err;
1047         }
1048
1049         slave_config.direction = DMA_MEM_TO_DEV;
1050         slave_config.dst_addr = sport->port.mapbase + URTX0;
1051         slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1052         slave_config.dst_maxburst = TXTL_DEFAULT;
1053         ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1054         if (ret) {
1055                 dev_err(dev, "error in TX dma configuration.");
1056                 goto err;
1057         }
1058
1059         sport->dma_is_inited = 1;
1060
1061         return 0;
1062 err:
1063         imx_uart_dma_exit(sport);
1064         return ret;
1065 }
1066
1067 static void imx_enable_dma(struct imx_port *sport)
1068 {
1069         unsigned long temp;
1070
1071         init_waitqueue_head(&sport->dma_wait);
1072
1073         /* set UCR1 */
1074         temp = readl(sport->port.membase + UCR1);
1075         temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
1076                 /* wait for 32 idle frames for IDDMA interrupt */
1077                 UCR1_ICD_REG(3);
1078         writel(temp, sport->port.membase + UCR1);
1079
1080         temp = readl(sport->port.membase + UCR2);
1081         temp |= UCR2_ATEN;
1082         writel(temp, sport->port.membase + UCR2);
1083
1084         /* set UCR4 */
1085         temp = readl(sport->port.membase + UCR4);
1086         temp |= UCR4_IDDMAEN;
1087         writel(temp, sport->port.membase + UCR4);
1088
1089         sport->dma_is_enabled = 1;
1090 }
1091
1092 static void imx_disable_dma(struct imx_port *sport)
1093 {
1094         unsigned long temp;
1095
1096         /* clear UCR1 */
1097         temp = readl(sport->port.membase + UCR1);
1098         temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1099         writel(temp, sport->port.membase + UCR1);
1100
1101         /* clear UCR2 */
1102         temp = readl(sport->port.membase + UCR2);
1103         temp &= ~(UCR2_CTSC | UCR2_CTS | UCR2_ATEN);
1104         writel(temp, sport->port.membase + UCR2);
1105
1106         /* clear UCR4 */
1107         temp = readl(sport->port.membase + UCR4);
1108         temp &= ~UCR4_IDDMAEN;
1109         writel(temp, sport->port.membase + UCR4);
1110
1111         sport->dma_is_enabled = 0;
1112 }
1113
1114 /* half the RX buffer size */
1115 #define CTSTL 16
1116
1117 static int imx_startup(struct uart_port *port)
1118 {
1119         struct imx_port *sport = (struct imx_port *)port;
1120         int retval, i;
1121         unsigned long flags, temp;
1122
1123         retval = clk_prepare_enable(sport->clk_per);
1124         if (retval)
1125                 return retval;
1126         retval = clk_prepare_enable(sport->clk_ipg);
1127         if (retval) {
1128                 clk_disable_unprepare(sport->clk_per);
1129                 return retval;
1130         }
1131
1132         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1133
1134         /* disable the DREN bit (Data Ready interrupt enable) before
1135          * requesting IRQs
1136          */
1137         temp = readl(sport->port.membase + UCR4);
1138
1139         /* set the trigger level for CTS */
1140         temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1141         temp |= CTSTL << UCR4_CTSTL_SHF;
1142
1143         writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1144
1145         spin_lock_irqsave(&sport->port.lock, flags);
1146         /* Reset fifo's and state machines */
1147         i = 100;
1148
1149         temp = readl(sport->port.membase + UCR2);
1150         temp &= ~UCR2_SRST;
1151         writel(temp, sport->port.membase + UCR2);
1152
1153         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1154                 udelay(1);
1155
1156         /*
1157          * Finally, clear and enable interrupts
1158          */
1159         writel(USR1_RTSD, sport->port.membase + USR1);
1160         writel(USR2_ORE, sport->port.membase + USR2);
1161
1162         temp = readl(sport->port.membase + UCR1);
1163         temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1164
1165         writel(temp, sport->port.membase + UCR1);
1166
1167         temp = readl(sport->port.membase + UCR4);
1168         temp |= UCR4_OREN;
1169         writel(temp, sport->port.membase + UCR4);
1170
1171         temp = readl(sport->port.membase + UCR2);
1172         temp |= (UCR2_RXEN | UCR2_TXEN);
1173         if (!sport->have_rtscts)
1174                 temp |= UCR2_IRTS;
1175         writel(temp, sport->port.membase + UCR2);
1176
1177         if (!is_imx1_uart(sport)) {
1178                 temp = readl(sport->port.membase + UCR3);
1179                 temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
1180                 writel(temp, sport->port.membase + UCR3);
1181         }
1182
1183         /*
1184          * Enable modem status interrupts
1185          */
1186         imx_enable_ms(&sport->port);
1187         spin_unlock_irqrestore(&sport->port.lock, flags);
1188
1189         return 0;
1190 }
1191
1192 static void imx_shutdown(struct uart_port *port)
1193 {
1194         struct imx_port *sport = (struct imx_port *)port;
1195         unsigned long temp;
1196         unsigned long flags;
1197
1198         if (sport->dma_is_enabled) {
1199                 int ret;
1200
1201                 /* We have to wait for the DMA to finish. */
1202                 ret = wait_event_interruptible(sport->dma_wait,
1203                         !sport->dma_is_rxing && !sport->dma_is_txing);
1204                 if (ret != 0) {
1205                         sport->dma_is_rxing = 0;
1206                         sport->dma_is_txing = 0;
1207                         dmaengine_terminate_all(sport->dma_chan_tx);
1208                         dmaengine_terminate_all(sport->dma_chan_rx);
1209                 }
1210                 spin_lock_irqsave(&sport->port.lock, flags);
1211                 imx_stop_tx(port);
1212                 imx_stop_rx(port);
1213                 imx_disable_dma(sport);
1214                 spin_unlock_irqrestore(&sport->port.lock, flags);
1215                 imx_uart_dma_exit(sport);
1216         }
1217
1218         spin_lock_irqsave(&sport->port.lock, flags);
1219         temp = readl(sport->port.membase + UCR2);
1220         temp &= ~(UCR2_TXEN);
1221         writel(temp, sport->port.membase + UCR2);
1222         spin_unlock_irqrestore(&sport->port.lock, flags);
1223
1224         /*
1225          * Stop our timer.
1226          */
1227         del_timer_sync(&sport->timer);
1228
1229         /*
1230          * Disable all interrupts, port and break condition.
1231          */
1232
1233         spin_lock_irqsave(&sport->port.lock, flags);
1234         temp = readl(sport->port.membase + UCR1);
1235         temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
1236
1237         writel(temp, sport->port.membase + UCR1);
1238         spin_unlock_irqrestore(&sport->port.lock, flags);
1239
1240         clk_disable_unprepare(sport->clk_per);
1241         clk_disable_unprepare(sport->clk_ipg);
1242 }
1243
1244 static void imx_flush_buffer(struct uart_port *port)
1245 {
1246         struct imx_port *sport = (struct imx_port *)port;
1247         struct scatterlist *sgl = &sport->tx_sgl[0];
1248         unsigned long temp;
1249         int i = 100, ubir, ubmr, uts;
1250
1251         if (!sport->dma_chan_tx)
1252                 return;
1253
1254         sport->tx_bytes = 0;
1255         dmaengine_terminate_all(sport->dma_chan_tx);
1256         if (sport->dma_is_txing) {
1257                 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
1258                              DMA_TO_DEVICE);
1259                 temp = readl(sport->port.membase + UCR1);
1260                 temp &= ~UCR1_TDMAEN;
1261                 writel(temp, sport->port.membase + UCR1);
1262                 sport->dma_is_txing = false;
1263         }
1264
1265         /*
1266          * According to the Reference Manual description of the UART SRST bit:
1267          * "Reset the transmit and receive state machines,
1268          * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
1269          * and UTS[6-3]". As we don't need to restore the old values from
1270          * USR1, USR2, URXD, UTXD, only save/restore the other four registers
1271          */
1272         ubir = readl(sport->port.membase + UBIR);
1273         ubmr = readl(sport->port.membase + UBMR);
1274         uts = readl(sport->port.membase + IMX21_UTS);
1275
1276         temp = readl(sport->port.membase + UCR2);
1277         temp &= ~UCR2_SRST;
1278         writel(temp, sport->port.membase + UCR2);
1279
1280         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1281                 udelay(1);
1282
1283         /* Restore the registers */
1284         writel(ubir, sport->port.membase + UBIR);
1285         writel(ubmr, sport->port.membase + UBMR);
1286         writel(uts, sport->port.membase + IMX21_UTS);
1287 }
1288
1289 static void
1290 imx_set_termios(struct uart_port *port, struct ktermios *termios,
1291                    struct ktermios *old)
1292 {
1293         struct imx_port *sport = (struct imx_port *)port;
1294         unsigned long flags;
1295         unsigned int ucr2, old_ucr1, old_ucr2, baud, quot;
1296         unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1297         unsigned int div, ufcr;
1298         unsigned long num, denom;
1299         uint64_t tdiv64;
1300
1301         /*
1302          * We only support CS7 and CS8.
1303          */
1304         while ((termios->c_cflag & CSIZE) != CS7 &&
1305                (termios->c_cflag & CSIZE) != CS8) {
1306                 termios->c_cflag &= ~CSIZE;
1307                 termios->c_cflag |= old_csize;
1308                 old_csize = CS8;
1309         }
1310
1311         if ((termios->c_cflag & CSIZE) == CS8)
1312                 ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
1313         else
1314                 ucr2 = UCR2_SRST | UCR2_IRTS;
1315
1316         if (termios->c_cflag & CRTSCTS) {
1317                 if (sport->have_rtscts) {
1318                         ucr2 &= ~UCR2_IRTS;
1319
1320                         if (port->rs485.flags & SER_RS485_ENABLED) {
1321                                 /*
1322                                  * RTS is mandatory for rs485 operation, so keep
1323                                  * it under manual control and keep transmitter
1324                                  * disabled.
1325                                  */
1326                                 if (!(port->rs485.flags &
1327                                       SER_RS485_RTS_AFTER_SEND))
1328                                         ucr2 |= UCR2_CTS;
1329                         } else {
1330                                 ucr2 |= UCR2_CTSC;
1331                         }
1332
1333                         /* Can we enable the DMA support? */
1334                         if (is_imx6q_uart(sport) && !uart_console(port)
1335                                 && !sport->dma_is_inited)
1336                                 imx_uart_dma_init(sport);
1337                 } else {
1338                         termios->c_cflag &= ~CRTSCTS;
1339                 }
1340         } else if (port->rs485.flags & SER_RS485_ENABLED)
1341                 /* disable transmitter */
1342                 if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
1343                         ucr2 |= UCR2_CTS;
1344
1345         if (termios->c_cflag & CSTOPB)
1346                 ucr2 |= UCR2_STPB;
1347         if (termios->c_cflag & PARENB) {
1348                 ucr2 |= UCR2_PREN;
1349                 if (termios->c_cflag & PARODD)
1350                         ucr2 |= UCR2_PROE;
1351         }
1352
1353         del_timer_sync(&sport->timer);
1354
1355         /*
1356          * Ask the core to calculate the divisor for us.
1357          */
1358         baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1359         quot = uart_get_divisor(port, baud);
1360
1361         spin_lock_irqsave(&sport->port.lock, flags);
1362
1363         sport->port.read_status_mask = 0;
1364         if (termios->c_iflag & INPCK)
1365                 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1366         if (termios->c_iflag & (BRKINT | PARMRK))
1367                 sport->port.read_status_mask |= URXD_BRK;
1368
1369         /*
1370          * Characters to ignore
1371          */
1372         sport->port.ignore_status_mask = 0;
1373         if (termios->c_iflag & IGNPAR)
1374                 sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
1375         if (termios->c_iflag & IGNBRK) {
1376                 sport->port.ignore_status_mask |= URXD_BRK;
1377                 /*
1378                  * If we're ignoring parity and break indicators,
1379                  * ignore overruns too (for real raw support).
1380                  */
1381                 if (termios->c_iflag & IGNPAR)
1382                         sport->port.ignore_status_mask |= URXD_OVRRUN;
1383         }
1384
1385         if ((termios->c_cflag & CREAD) == 0)
1386                 sport->port.ignore_status_mask |= URXD_DUMMY_READ;
1387
1388         /*
1389          * Update the per-port timeout.
1390          */
1391         uart_update_timeout(port, termios->c_cflag, baud);
1392
1393         /*
1394          * disable interrupts and drain transmitter
1395          */
1396         old_ucr1 = readl(sport->port.membase + UCR1);
1397         writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
1398                         sport->port.membase + UCR1);
1399
1400         while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
1401                 barrier();
1402
1403         /* then, disable everything */
1404         old_ucr2 = readl(sport->port.membase + UCR2);
1405         writel(old_ucr2 & ~(UCR2_TXEN | UCR2_RXEN),
1406                         sport->port.membase + UCR2);
1407         old_ucr2 &= (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN);
1408
1409         /* custom-baudrate handling */
1410         div = sport->port.uartclk / (baud * 16);
1411         if (baud == 38400 && quot != div)
1412                 baud = sport->port.uartclk / (quot * 16);
1413
1414         div = sport->port.uartclk / (baud * 16);
1415         if (div > 7)
1416                 div = 7;
1417         if (!div)
1418                 div = 1;
1419
1420         rational_best_approximation(16 * div * baud, sport->port.uartclk,
1421                 1 << 16, 1 << 16, &num, &denom);
1422
1423         tdiv64 = sport->port.uartclk;
1424         tdiv64 *= num;
1425         do_div(tdiv64, denom * 16 * div);
1426         tty_termios_encode_baud_rate(termios,
1427                                 (speed_t)tdiv64, (speed_t)tdiv64);
1428
1429         num -= 1;
1430         denom -= 1;
1431
1432         ufcr = readl(sport->port.membase + UFCR);
1433         ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1434         if (sport->dte_mode)
1435                 ufcr |= UFCR_DCEDTE;
1436         writel(ufcr, sport->port.membase + UFCR);
1437
1438         writel(num, sport->port.membase + UBIR);
1439         writel(denom, sport->port.membase + UBMR);
1440
1441         if (!is_imx1_uart(sport))
1442                 writel(sport->port.uartclk / div / 1000,
1443                                 sport->port.membase + IMX21_ONEMS);
1444
1445         writel(old_ucr1, sport->port.membase + UCR1);
1446
1447         /* set the parity, stop bits and data size */
1448         writel(ucr2 | old_ucr2, sport->port.membase + UCR2);
1449
1450         if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1451                 imx_enable_ms(&sport->port);
1452
1453         if (sport->dma_is_inited && !sport->dma_is_enabled)
1454                 imx_enable_dma(sport);
1455         spin_unlock_irqrestore(&sport->port.lock, flags);
1456 }
1457
1458 static const char *imx_type(struct uart_port *port)
1459 {
1460         struct imx_port *sport = (struct imx_port *)port;
1461
1462         return sport->port.type == PORT_IMX ? "IMX" : NULL;
1463 }
1464
1465 /*
1466  * Configure/autoconfigure the port.
1467  */
1468 static void imx_config_port(struct uart_port *port, int flags)
1469 {
1470         struct imx_port *sport = (struct imx_port *)port;
1471
1472         if (flags & UART_CONFIG_TYPE)
1473                 sport->port.type = PORT_IMX;
1474 }
1475
1476 /*
1477  * Verify the new serial_struct (for TIOCSSERIAL).
1478  * The only change we allow are to the flags and type, and
1479  * even then only between PORT_IMX and PORT_UNKNOWN
1480  */
1481 static int
1482 imx_verify_port(struct uart_port *port, struct serial_struct *ser)
1483 {
1484         struct imx_port *sport = (struct imx_port *)port;
1485         int ret = 0;
1486
1487         if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1488                 ret = -EINVAL;
1489         if (sport->port.irq != ser->irq)
1490                 ret = -EINVAL;
1491         if (ser->io_type != UPIO_MEM)
1492                 ret = -EINVAL;
1493         if (sport->port.uartclk / 16 != ser->baud_base)
1494                 ret = -EINVAL;
1495         if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1496                 ret = -EINVAL;
1497         if (sport->port.iobase != ser->port)
1498                 ret = -EINVAL;
1499         if (ser->hub6 != 0)
1500                 ret = -EINVAL;
1501         return ret;
1502 }
1503
1504 #if defined(CONFIG_CONSOLE_POLL)
1505
1506 static int imx_poll_init(struct uart_port *port)
1507 {
1508         struct imx_port *sport = (struct imx_port *)port;
1509         unsigned long flags;
1510         unsigned long temp;
1511         int retval;
1512
1513         retval = clk_prepare_enable(sport->clk_ipg);
1514         if (retval)
1515                 return retval;
1516         retval = clk_prepare_enable(sport->clk_per);
1517         if (retval)
1518                 clk_disable_unprepare(sport->clk_ipg);
1519
1520         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1521
1522         spin_lock_irqsave(&sport->port.lock, flags);
1523
1524         temp = readl(sport->port.membase + UCR1);
1525         if (is_imx1_uart(sport))
1526                 temp |= IMX1_UCR1_UARTCLKEN;
1527         temp |= UCR1_UARTEN | UCR1_RRDYEN;
1528         temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
1529         writel(temp, sport->port.membase + UCR1);
1530
1531         temp = readl(sport->port.membase + UCR2);
1532         temp |= UCR2_RXEN;
1533         writel(temp, sport->port.membase + UCR2);
1534
1535         spin_unlock_irqrestore(&sport->port.lock, flags);
1536
1537         return 0;
1538 }
1539
1540 static int imx_poll_get_char(struct uart_port *port)
1541 {
1542         if (!(readl_relaxed(port->membase + USR2) & USR2_RDR))
1543                 return NO_POLL_CHAR;
1544
1545         return readl_relaxed(port->membase + URXD0) & URXD_RX_DATA;
1546 }
1547
1548 static void imx_poll_put_char(struct uart_port *port, unsigned char c)
1549 {
1550         unsigned int status;
1551
1552         /* drain */
1553         do {
1554                 status = readl_relaxed(port->membase + USR1);
1555         } while (~status & USR1_TRDY);
1556
1557         /* write */
1558         writel_relaxed(c, port->membase + URTX0);
1559
1560         /* flush */
1561         do {
1562                 status = readl_relaxed(port->membase + USR2);
1563         } while (~status & USR2_TXDC);
1564 }
1565 #endif
1566
1567 static int imx_rs485_config(struct uart_port *port,
1568                             struct serial_rs485 *rs485conf)
1569 {
1570         struct imx_port *sport = (struct imx_port *)port;
1571
1572         /* unimplemented */
1573         rs485conf->delay_rts_before_send = 0;
1574         rs485conf->delay_rts_after_send = 0;
1575         rs485conf->flags |= SER_RS485_RX_DURING_TX;
1576
1577         /* RTS is required to control the transmitter */
1578         if (!sport->have_rtscts)
1579                 rs485conf->flags &= ~SER_RS485_ENABLED;
1580
1581         if (rs485conf->flags & SER_RS485_ENABLED) {
1582                 unsigned long temp;
1583
1584                 /* disable transmitter */
1585                 temp = readl(sport->port.membase + UCR2);
1586                 temp &= ~UCR2_CTSC;
1587                 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
1588                         temp &= ~UCR2_CTS;
1589                 else
1590                         temp |= UCR2_CTS;
1591                 writel(temp, sport->port.membase + UCR2);
1592         }
1593
1594         port->rs485 = *rs485conf;
1595
1596         return 0;
1597 }
1598
1599 static struct uart_ops imx_pops = {
1600         .tx_empty       = imx_tx_empty,
1601         .set_mctrl      = imx_set_mctrl,
1602         .get_mctrl      = imx_get_mctrl,
1603         .stop_tx        = imx_stop_tx,
1604         .start_tx       = imx_start_tx,
1605         .stop_rx        = imx_stop_rx,
1606         .enable_ms      = imx_enable_ms,
1607         .break_ctl      = imx_break_ctl,
1608         .startup        = imx_startup,
1609         .shutdown       = imx_shutdown,
1610         .flush_buffer   = imx_flush_buffer,
1611         .set_termios    = imx_set_termios,
1612         .type           = imx_type,
1613         .config_port    = imx_config_port,
1614         .verify_port    = imx_verify_port,
1615 #if defined(CONFIG_CONSOLE_POLL)
1616         .poll_init      = imx_poll_init,
1617         .poll_get_char  = imx_poll_get_char,
1618         .poll_put_char  = imx_poll_put_char,
1619 #endif
1620 };
1621
1622 static struct imx_port *imx_ports[UART_NR];
1623
1624 #ifdef CONFIG_SERIAL_IMX_CONSOLE
1625 static void imx_console_putchar(struct uart_port *port, int ch)
1626 {
1627         struct imx_port *sport = (struct imx_port *)port;
1628
1629         while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1630                 barrier();
1631
1632         writel(ch, sport->port.membase + URTX0);
1633 }
1634
1635 /*
1636  * Interrupts are disabled on entering
1637  */
1638 static void
1639 imx_console_write(struct console *co, const char *s, unsigned int count)
1640 {
1641         struct imx_port *sport = imx_ports[co->index];
1642         struct imx_port_ucrs old_ucr;
1643         unsigned int ucr1;
1644         unsigned long flags = 0;
1645         int locked = 1;
1646         int retval;
1647
1648         retval = clk_prepare_enable(sport->clk_per);
1649         if (retval)
1650                 return;
1651         retval = clk_prepare_enable(sport->clk_ipg);
1652         if (retval) {
1653                 clk_disable_unprepare(sport->clk_per);
1654                 return;
1655         }
1656
1657         if (sport->port.sysrq)
1658                 locked = 0;
1659         else if (oops_in_progress)
1660                 locked = spin_trylock_irqsave(&sport->port.lock, flags);
1661         else
1662                 spin_lock_irqsave(&sport->port.lock, flags);
1663
1664         /*
1665          *      First, save UCR1/2/3 and then disable interrupts
1666          */
1667         imx_port_ucrs_save(&sport->port, &old_ucr);
1668         ucr1 = old_ucr.ucr1;
1669
1670         if (is_imx1_uart(sport))
1671                 ucr1 |= IMX1_UCR1_UARTCLKEN;
1672         ucr1 |= UCR1_UARTEN;
1673         ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1674
1675         writel(ucr1, sport->port.membase + UCR1);
1676
1677         writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
1678
1679         uart_console_write(&sport->port, s, count, imx_console_putchar);
1680
1681         /*
1682          *      Finally, wait for transmitter to become empty
1683          *      and restore UCR1/2/3
1684          */
1685         while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
1686
1687         imx_port_ucrs_restore(&sport->port, &old_ucr);
1688
1689         if (locked)
1690                 spin_unlock_irqrestore(&sport->port.lock, flags);
1691
1692         clk_disable_unprepare(sport->clk_ipg);
1693         clk_disable_unprepare(sport->clk_per);
1694 }
1695
1696 /*
1697  * If the port was already initialised (eg, by a boot loader),
1698  * try to determine the current setup.
1699  */
1700 static void __init
1701 imx_console_get_options(struct imx_port *sport, int *baud,
1702                            int *parity, int *bits)
1703 {
1704
1705         if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1706                 /* ok, the port was enabled */
1707                 unsigned int ucr2, ubir, ubmr, uartclk;
1708                 unsigned int baud_raw;
1709                 unsigned int ucfr_rfdiv;
1710
1711                 ucr2 = readl(sport->port.membase + UCR2);
1712
1713                 *parity = 'n';
1714                 if (ucr2 & UCR2_PREN) {
1715                         if (ucr2 & UCR2_PROE)
1716                                 *parity = 'o';
1717                         else
1718                                 *parity = 'e';
1719                 }
1720
1721                 if (ucr2 & UCR2_WS)
1722                         *bits = 8;
1723                 else
1724                         *bits = 7;
1725
1726                 ubir = readl(sport->port.membase + UBIR) & 0xffff;
1727                 ubmr = readl(sport->port.membase + UBMR) & 0xffff;
1728
1729                 ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
1730                 if (ucfr_rfdiv == 6)
1731                         ucfr_rfdiv = 7;
1732                 else
1733                         ucfr_rfdiv = 6 - ucfr_rfdiv;
1734
1735                 uartclk = clk_get_rate(sport->clk_per);
1736                 uartclk /= ucfr_rfdiv;
1737
1738                 {       /*
1739                          * The next code provides exact computation of
1740                          *   baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
1741                          * without need of float support or long long division,
1742                          * which would be required to prevent 32bit arithmetic overflow
1743                          */
1744                         unsigned int mul = ubir + 1;
1745                         unsigned int div = 16 * (ubmr + 1);
1746                         unsigned int rem = uartclk % div;
1747
1748                         baud_raw = (uartclk / div) * mul;
1749                         baud_raw += (rem * mul + div / 2) / div;
1750                         *baud = (baud_raw + 50) / 100 * 100;
1751                 }
1752
1753                 if (*baud != baud_raw)
1754                         pr_info("Console IMX rounded baud rate from %d to %d\n",
1755                                 baud_raw, *baud);
1756         }
1757 }
1758
1759 static int __init
1760 imx_console_setup(struct console *co, char *options)
1761 {
1762         struct imx_port *sport;
1763         int baud = 9600;
1764         int bits = 8;
1765         int parity = 'n';
1766         int flow = 'n';
1767         int retval;
1768
1769         /*
1770          * Check whether an invalid uart number has been specified, and
1771          * if so, search for the first available port that does have
1772          * console support.
1773          */
1774         if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1775                 co->index = 0;
1776         sport = imx_ports[co->index];
1777         if (sport == NULL)
1778                 return -ENODEV;
1779
1780         /* For setting the registers, we only need to enable the ipg clock. */
1781         retval = clk_prepare_enable(sport->clk_ipg);
1782         if (retval)
1783                 goto error_console;
1784
1785         if (options)
1786                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1787         else
1788                 imx_console_get_options(sport, &baud, &parity, &bits);
1789
1790         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1791
1792         retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1793
1794         clk_disable_unprepare(sport->clk_ipg);
1795
1796 error_console:
1797         return retval;
1798 }
1799
1800 static struct uart_driver imx_reg;
1801 static struct console imx_console = {
1802         .name           = DEV_NAME,
1803         .write          = imx_console_write,
1804         .device         = uart_console_device,
1805         .setup          = imx_console_setup,
1806         .flags          = CON_PRINTBUFFER,
1807         .index          = -1,
1808         .data           = &imx_reg,
1809 };
1810
1811 #define IMX_CONSOLE     &imx_console
1812
1813 #ifdef CONFIG_OF
1814 static void imx_console_early_putchar(struct uart_port *port, int ch)
1815 {
1816         while (readl_relaxed(port->membase + IMX21_UTS) & UTS_TXFULL)
1817                 cpu_relax();
1818
1819         writel_relaxed(ch, port->membase + URTX0);
1820 }
1821
1822 static void imx_console_early_write(struct console *con, const char *s,
1823                                     unsigned count)
1824 {
1825         struct earlycon_device *dev = con->data;
1826
1827         uart_console_write(&dev->port, s, count, imx_console_early_putchar);
1828 }
1829
1830 static int __init
1831 imx_console_early_setup(struct earlycon_device *dev, const char *opt)
1832 {
1833         if (!dev->port.membase)
1834                 return -ENODEV;
1835
1836         dev->con->write = imx_console_early_write;
1837
1838         return 0;
1839 }
1840 OF_EARLYCON_DECLARE(ec_imx6q, "fsl,imx6q-uart", imx_console_early_setup);
1841 OF_EARLYCON_DECLARE(ec_imx21, "fsl,imx21-uart", imx_console_early_setup);
1842 #endif
1843
1844 #else
1845 #define IMX_CONSOLE     NULL
1846 #endif
1847
1848 static struct uart_driver imx_reg = {
1849         .owner          = THIS_MODULE,
1850         .driver_name    = DRIVER_NAME,
1851         .dev_name       = DEV_NAME,
1852         .major          = SERIAL_IMX_MAJOR,
1853         .minor          = MINOR_START,
1854         .nr             = ARRAY_SIZE(imx_ports),
1855         .cons           = IMX_CONSOLE,
1856 };
1857
1858 #ifdef CONFIG_OF
1859 /*
1860  * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
1861  * could successfully get all information from dt or a negative errno.
1862  */
1863 static int serial_imx_probe_dt(struct imx_port *sport,
1864                 struct platform_device *pdev)
1865 {
1866         struct device_node *np = pdev->dev.of_node;
1867         const struct of_device_id *of_id =
1868                         of_match_device(imx_uart_dt_ids, &pdev->dev);
1869         int ret;
1870
1871         if (!np)
1872                 /* no device tree device */
1873                 return 1;
1874
1875         ret = of_alias_get_id(np, "serial");
1876         if (ret < 0) {
1877                 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1878                 return ret;
1879         }
1880         sport->port.line = ret;
1881
1882         if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1883                 sport->have_rtscts = 1;
1884
1885         if (of_get_property(np, "fsl,dte-mode", NULL))
1886                 sport->dte_mode = 1;
1887
1888         sport->devdata = of_id->data;
1889
1890         return 0;
1891 }
1892 #else
1893 static inline int serial_imx_probe_dt(struct imx_port *sport,
1894                 struct platform_device *pdev)
1895 {
1896         return 1;
1897 }
1898 #endif
1899
1900 static void serial_imx_probe_pdata(struct imx_port *sport,
1901                 struct platform_device *pdev)
1902 {
1903         struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
1904
1905         sport->port.line = pdev->id;
1906         sport->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
1907
1908         if (!pdata)
1909                 return;
1910
1911         if (pdata->flags & IMXUART_HAVE_RTSCTS)
1912                 sport->have_rtscts = 1;
1913 }
1914
1915 static int serial_imx_probe(struct platform_device *pdev)
1916 {
1917         struct imx_port *sport;
1918         void __iomem *base;
1919         int ret = 0, reg;
1920         struct resource *res;
1921         int txirq, rxirq, rtsirq;
1922
1923         sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
1924         if (!sport)
1925                 return -ENOMEM;
1926
1927         ret = serial_imx_probe_dt(sport, pdev);
1928         if (ret > 0)
1929                 serial_imx_probe_pdata(sport, pdev);
1930         else if (ret < 0)
1931                 return ret;
1932
1933         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1934         base = devm_ioremap_resource(&pdev->dev, res);
1935         if (IS_ERR(base))
1936                 return PTR_ERR(base);
1937
1938         rxirq = platform_get_irq(pdev, 0);
1939         txirq = platform_get_irq(pdev, 1);
1940         rtsirq = platform_get_irq(pdev, 2);
1941
1942         sport->port.dev = &pdev->dev;
1943         sport->port.mapbase = res->start;
1944         sport->port.membase = base;
1945         sport->port.type = PORT_IMX,
1946         sport->port.iotype = UPIO_MEM;
1947         sport->port.irq = rxirq;
1948         sport->port.fifosize = 32;
1949         sport->port.ops = &imx_pops;
1950         sport->port.rs485_config = imx_rs485_config;
1951         sport->port.rs485.flags =
1952                 SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
1953         sport->port.flags = UPF_BOOT_AUTOCONF;
1954         init_timer(&sport->timer);
1955         sport->timer.function = imx_timeout;
1956         sport->timer.data     = (unsigned long)sport;
1957
1958         sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1959         if (IS_ERR(sport->clk_ipg)) {
1960                 ret = PTR_ERR(sport->clk_ipg);
1961                 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
1962                 return ret;
1963         }
1964
1965         sport->clk_per = devm_clk_get(&pdev->dev, "per");
1966         if (IS_ERR(sport->clk_per)) {
1967                 ret = PTR_ERR(sport->clk_per);
1968                 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
1969                 return ret;
1970         }
1971
1972         sport->port.uartclk = clk_get_rate(sport->clk_per);
1973
1974         /* For register access, we only need to enable the ipg clock. */
1975         ret = clk_prepare_enable(sport->clk_ipg);
1976         if (ret)
1977                 return ret;
1978
1979         /* Disable interrupts before requesting them */
1980         reg = readl_relaxed(sport->port.membase + UCR1);
1981         reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
1982                  UCR1_TXMPTYEN | UCR1_RTSDEN);
1983         writel_relaxed(reg, sport->port.membase + UCR1);
1984
1985         clk_disable_unprepare(sport->clk_ipg);
1986
1987         /*
1988          * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
1989          * chips only have one interrupt.
1990          */
1991         if (txirq > 0) {
1992                 ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0,
1993                                        dev_name(&pdev->dev), sport);
1994                 if (ret)
1995                         return ret;
1996
1997                 ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0,
1998                                        dev_name(&pdev->dev), sport);
1999                 if (ret)
2000                         return ret;
2001         } else {
2002                 ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
2003                                        dev_name(&pdev->dev), sport);
2004                 if (ret)
2005                         return ret;
2006         }
2007
2008         imx_ports[sport->port.line] = sport;
2009
2010         platform_set_drvdata(pdev, sport);
2011
2012         return uart_add_one_port(&imx_reg, &sport->port);
2013 }
2014
2015 static int serial_imx_remove(struct platform_device *pdev)
2016 {
2017         struct imx_port *sport = platform_get_drvdata(pdev);
2018
2019         return uart_remove_one_port(&imx_reg, &sport->port);
2020 }
2021
2022 static void serial_imx_restore_context(struct imx_port *sport)
2023 {
2024         if (!sport->context_saved)
2025                 return;
2026
2027         writel(sport->saved_reg[4], sport->port.membase + UFCR);
2028         writel(sport->saved_reg[5], sport->port.membase + UESC);
2029         writel(sport->saved_reg[6], sport->port.membase + UTIM);
2030         writel(sport->saved_reg[7], sport->port.membase + UBIR);
2031         writel(sport->saved_reg[8], sport->port.membase + UBMR);
2032         writel(sport->saved_reg[9], sport->port.membase + IMX21_UTS);
2033         writel(sport->saved_reg[0], sport->port.membase + UCR1);
2034         writel(sport->saved_reg[1] | UCR2_SRST, sport->port.membase + UCR2);
2035         writel(sport->saved_reg[2], sport->port.membase + UCR3);
2036         writel(sport->saved_reg[3], sport->port.membase + UCR4);
2037         sport->context_saved = false;
2038 }
2039
2040 static void serial_imx_save_context(struct imx_port *sport)
2041 {
2042         /* Save necessary regs */
2043         sport->saved_reg[0] = readl(sport->port.membase + UCR1);
2044         sport->saved_reg[1] = readl(sport->port.membase + UCR2);
2045         sport->saved_reg[2] = readl(sport->port.membase + UCR3);
2046         sport->saved_reg[3] = readl(sport->port.membase + UCR4);
2047         sport->saved_reg[4] = readl(sport->port.membase + UFCR);
2048         sport->saved_reg[5] = readl(sport->port.membase + UESC);
2049         sport->saved_reg[6] = readl(sport->port.membase + UTIM);
2050         sport->saved_reg[7] = readl(sport->port.membase + UBIR);
2051         sport->saved_reg[8] = readl(sport->port.membase + UBMR);
2052         sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
2053         sport->context_saved = true;
2054 }
2055
2056 static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
2057 {
2058         unsigned int val;
2059
2060         val = readl(sport->port.membase + UCR3);
2061         if (on)
2062                 val |= UCR3_AWAKEN;
2063         else
2064                 val &= ~UCR3_AWAKEN;
2065         writel(val, sport->port.membase + UCR3);
2066
2067         val = readl(sport->port.membase + UCR1);
2068         if (on)
2069                 val |= UCR1_RTSDEN;
2070         else
2071                 val &= ~UCR1_RTSDEN;
2072         writel(val, sport->port.membase + UCR1);
2073 }
2074
2075 static int imx_serial_port_suspend_noirq(struct device *dev)
2076 {
2077         struct platform_device *pdev = to_platform_device(dev);
2078         struct imx_port *sport = platform_get_drvdata(pdev);
2079         int ret;
2080
2081         ret = clk_enable(sport->clk_ipg);
2082         if (ret)
2083                 return ret;
2084
2085         serial_imx_save_context(sport);
2086
2087         clk_disable(sport->clk_ipg);
2088
2089         return 0;
2090 }
2091
2092 static int imx_serial_port_resume_noirq(struct device *dev)
2093 {
2094         struct platform_device *pdev = to_platform_device(dev);
2095         struct imx_port *sport = platform_get_drvdata(pdev);
2096         int ret;
2097
2098         ret = clk_enable(sport->clk_ipg);
2099         if (ret)
2100                 return ret;
2101
2102         serial_imx_restore_context(sport);
2103
2104         clk_disable(sport->clk_ipg);
2105
2106         return 0;
2107 }
2108
2109 static int imx_serial_port_suspend(struct device *dev)
2110 {
2111         struct platform_device *pdev = to_platform_device(dev);
2112         struct imx_port *sport = platform_get_drvdata(pdev);
2113
2114         /* enable wakeup from i.MX UART */
2115         serial_imx_enable_wakeup(sport, true);
2116
2117         uart_suspend_port(&imx_reg, &sport->port);
2118
2119         return 0;
2120 }
2121
2122 static int imx_serial_port_resume(struct device *dev)
2123 {
2124         struct platform_device *pdev = to_platform_device(dev);
2125         struct imx_port *sport = platform_get_drvdata(pdev);
2126
2127         /* disable wakeup from i.MX UART */
2128         serial_imx_enable_wakeup(sport, false);
2129
2130         uart_resume_port(&imx_reg, &sport->port);
2131
2132         return 0;
2133 }
2134
2135 static const struct dev_pm_ops imx_serial_port_pm_ops = {
2136         .suspend_noirq = imx_serial_port_suspend_noirq,
2137         .resume_noirq = imx_serial_port_resume_noirq,
2138         .suspend = imx_serial_port_suspend,
2139         .resume = imx_serial_port_resume,
2140 };
2141
2142 static struct platform_driver serial_imx_driver = {
2143         .probe          = serial_imx_probe,
2144         .remove         = serial_imx_remove,
2145
2146         .id_table       = imx_uart_devtype,
2147         .driver         = {
2148                 .name   = "imx-uart",
2149                 .of_match_table = imx_uart_dt_ids,
2150                 .pm     = &imx_serial_port_pm_ops,
2151         },
2152 };
2153
2154 static int __init imx_serial_init(void)
2155 {
2156         int ret = uart_register_driver(&imx_reg);
2157
2158         if (ret)
2159                 return ret;
2160
2161         ret = platform_driver_register(&serial_imx_driver);
2162         if (ret != 0)
2163                 uart_unregister_driver(&imx_reg);
2164
2165         return ret;
2166 }
2167
2168 static void __exit imx_serial_exit(void)
2169 {
2170         platform_driver_unregister(&serial_imx_driver);
2171         uart_unregister_driver(&imx_reg);
2172 }
2173
2174 module_init(imx_serial_init);
2175 module_exit(imx_serial_exit);
2176
2177 MODULE_AUTHOR("Sascha Hauer");
2178 MODULE_DESCRIPTION("IMX generic serial port driver");
2179 MODULE_LICENSE("GPL");
2180 MODULE_ALIAS("platform:imx-uart");