]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/tty/serial/sh-sci.c
serial: sh-sci: Do not resubmit DMA descriptors
[karo-tx-linux.git] / drivers / tty / serial / sh-sci.c
index 59ce9484875b200181d6c492bbe1c187fa0e9da9..e318f002da399e70862a504575f6d1fdf74598a0 100644 (file)
@@ -104,13 +104,11 @@ struct sci_port {
        struct dma_chan                 *chan_rx;
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
-       struct dma_async_tx_descriptor  *desc_tx;
-       struct dma_async_tx_descriptor  *desc_rx[2];
        dma_cookie_t                    cookie_tx;
        dma_cookie_t                    cookie_rx[2];
        dma_cookie_t                    active_rx;
-       struct scatterlist              sg_tx;
-       unsigned int                    sg_len_tx;
+       dma_addr_t                      tx_dma_addr;
+       unsigned int                    tx_dma_len;
        struct scatterlist              sg_rx[2];
        size_t                          buf_len_rx;
        struct sh_dmae_slave            param_tx;
@@ -1066,11 +1064,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
         * DR flags
         */
        if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
-           (scr_status & SCSCR_RIE)) {
-               if (port->type == PORT_SCIF || port->type == PORT_HSCIF)
-                       sci_handle_fifo_overrun(port);
+           (scr_status & SCSCR_RIE))
                ret = sci_rx_interrupt(irq, ptr);
-       }
 
        /* Error Interrupt */
        if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
@@ -1081,8 +1076,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
                ret = sci_br_interrupt(irq, ptr);
 
        /* Overrun Interrupt */
-       if (orer_status & s->overrun_mask)
+       if (orer_status & s->overrun_mask) {
                sci_handle_fifo_overrun(port);
+               ret = IRQ_HANDLED;
+       }
 
        return ret;
 }
@@ -1281,13 +1278,10 @@ static void sci_dma_tx_complete(void *arg)
 
        spin_lock_irqsave(&port->lock, flags);
 
-       xmit->tail += sg_dma_len(&s->sg_tx);
+       xmit->tail += s->tx_dma_len;
        xmit->tail &= UART_XMIT_SIZE - 1;
 
-       port->icount.tx += sg_dma_len(&s->sg_tx);
-
-       async_tx_ack(s->desc_tx);
-       s->desc_tx = NULL;
+       port->icount.tx += s->tx_dma_len;
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
@@ -1320,7 +1314,8 @@ static int sci_dma_rx_push(struct sci_port *s, size_t count)
        } else if (s->active_rx == s->cookie_rx[1]) {
                active = 1;
        } else {
-               dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
+               dev_err(port->dev, "%s: Rx cookie %d not found!\n", __func__,
+                       s->active_rx);
                return 0;
        }
 
@@ -1346,8 +1341,8 @@ static void sci_dma_rx_complete(void *arg)
        unsigned long flags;
        int count;
 
-       dev_dbg(port->dev, "%s(%d) active #%d\n",
-               __func__, port->line, s->active_rx);
+       dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
+               s->active_rx);
 
        spin_lock_irqsave(&port->lock, flags);
 
@@ -1370,10 +1365,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
 
        s->chan_rx = NULL;
        s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+       dma_free_coherent(chan->device->dev, s->buf_len_rx * 2,
+                         sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
        dma_release_channel(chan);
-       if (sg_dma_address(&s->sg_rx[0]))
-               dma_free_coherent(port->dev, s->buf_len_rx * 2,
-                                 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
        if (enable_pio)
                sci_start_rx(port);
 }
@@ -1385,6 +1379,8 @@ static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
 
        s->chan_tx = NULL;
        s->cookie_tx = -EINVAL;
+       dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
+                        DMA_TO_DEVICE);
        dma_release_channel(chan);
        if (enable_pio)
                sci_start_tx(port);
@@ -1400,36 +1396,34 @@ static void sci_submit_rx(struct sci_port *s)
                struct dma_async_tx_descriptor *desc;
 
                desc = dmaengine_prep_slave_sg(chan,
-                       sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+                       sg, 1, DMA_DEV_TO_MEM,
+                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!desc)
+                       goto fail;
 
-               if (desc) {
-                       s->desc_rx[i] = desc;
-                       desc->callback = sci_dma_rx_complete;
-                       desc->callback_param = s;
-                       s->cookie_rx[i] = desc->tx_submit(desc);
-               }
+               desc->callback = sci_dma_rx_complete;
+               desc->callback_param = s;
+               s->cookie_rx[i] = dmaengine_submit(desc);
+               if (dma_submit_error(s->cookie_rx[i]))
+                       goto fail;
 
-               if (!desc || s->cookie_rx[i] < 0) {
-                       if (i) {
-                               async_tx_ack(s->desc_rx[0]);
-                               s->cookie_rx[0] = -EINVAL;
-                       }
-                       if (desc) {
-                               async_tx_ack(desc);
-                               s->cookie_rx[i] = -EINVAL;
-                       }
-                       dev_warn(s->port.dev,
-                                "failed to re-start DMA, using PIO\n");
-                       sci_rx_dma_release(s, true);
-                       return;
-               }
-               dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n",
-                       __func__, s->cookie_rx[i], i);
+               dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+                       s->cookie_rx[i], i);
        }
 
        s->active_rx = s->cookie_rx[0];
 
        dma_async_issue_pending(chan);
+       return;
+
+fail:
+       if (i)
+               dmaengine_terminate_all(chan);
+       for (i = 0; i < 2; i++)
+               s->cookie_rx[i] = -EINVAL;
+       s->active_rx = -EINVAL;
+       dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
+       sci_rx_dma_release(s, true);
 }
 
 static void work_fn_rx(struct work_struct *work)
@@ -1437,6 +1431,8 @@ static void work_fn_rx(struct work_struct *work)
        struct sci_port *s = container_of(work, struct sci_port, work_rx);
        struct uart_port *port = &s->port;
        struct dma_async_tx_descriptor *desc;
+       struct dma_tx_state state;
+       enum dma_status status;
        int new;
 
        if (s->active_rx == s->cookie_rx[0]) {
@@ -1444,26 +1440,26 @@ static void work_fn_rx(struct work_struct *work)
        } else if (s->active_rx == s->cookie_rx[1]) {
                new = 1;
        } else {
-               dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
+               dev_err(port->dev, "%s: Rx cookie %d not found!\n", __func__,
+                       s->active_rx);
                return;
        }
-       desc = s->desc_rx[new];
 
-       if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
-           DMA_COMPLETE) {
+       status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
+       if (status != DMA_COMPLETE) {
                /* Handle incomplete DMA receive */
                struct dma_chan *chan = s->chan_rx;
-               struct shdma_desc *sh_desc = container_of(desc,
-                                       struct shdma_desc, async_tx);
                unsigned long flags;
+               unsigned int read;
                int count;
 
                dmaengine_terminate_all(chan);
-               dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
-                       sh_desc->partial, sh_desc->cookie);
+               read = sg_dma_len(&s->sg_rx[new]) - state.residue;
+               dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
+                       s->active_rx);
 
                spin_lock_irqsave(&port->lock, flags);
-               count = sci_dma_rx_push(s, sh_desc->partial);
+               count = sci_dma_rx_push(s, read);
                spin_unlock_irqrestore(&port->lock, flags);
 
                if (count)
@@ -1474,17 +1470,27 @@ static void work_fn_rx(struct work_struct *work)
                return;
        }
 
-       s->cookie_rx[new] = desc->tx_submit(desc);
-       if (s->cookie_rx[new] < 0) {
-               dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
-               sci_rx_dma_release(s, true);
-               return;
-       }
+       desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1,
+                                      DMA_DEV_TO_MEM,
+                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+               goto fail;
+
+       desc->callback = sci_dma_rx_complete;
+       desc->callback_param = s;
+       s->cookie_rx[new] = dmaengine_submit(desc);
+       if (dma_submit_error(s->cookie_rx[new]))
+               goto fail;
 
        s->active_rx = s->cookie_rx[!new];
 
-       dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n",
+       dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
                __func__, s->cookie_rx[new], new, s->active_rx);
+       return;
+
+fail:
+       dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
+       sci_rx_dma_release(s, true);
 }
 
 static void work_fn_tx(struct work_struct *work)
@@ -1494,7 +1500,7 @@ static void work_fn_tx(struct work_struct *work)
        struct dma_chan *chan = s->chan_tx;
        struct uart_port *port = &s->port;
        struct circ_buf *xmit = &port->state->xmit;
-       struct scatterlist *sg = &s->sg_tx;
+       dma_addr_t buf;
 
        /*
         * DMA is idle now.
@@ -1504,33 +1510,31 @@ static void work_fn_tx(struct work_struct *work)
         * consistent xmit buffer state.
         */
        spin_lock_irq(&port->lock);
-       sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
-       sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
-               sg->offset;
-       sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+       buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+       s->tx_dma_len = min_t(unsigned int,
+               CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
                CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
        spin_unlock_irq(&port->lock);
 
-       BUG_ON(!sg_dma_len(sg));
-
-       desc = dmaengine_prep_slave_sg(chan,
-                       sg, s->sg_len_tx, DMA_MEM_TO_DEV,
-                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
+                                          DMA_MEM_TO_DEV,
+                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
+               dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
                /* switch to PIO */
                sci_tx_dma_release(s, true);
                return;
        }
 
-       dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+       dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
+                                  DMA_TO_DEVICE);
 
        spin_lock_irq(&port->lock);
-       s->desc_tx = desc;
        desc->callback = sci_dma_tx_complete;
        desc->callback_param = s;
        spin_unlock_irq(&port->lock);
-       s->cookie_tx = desc->tx_submit(desc);
-       if (s->cookie_tx < 0) {
+       s->cookie_tx = dmaengine_submit(desc);
+       if (dma_submit_error(s->cookie_tx)) {
                dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
                /* switch to PIO */
                sci_tx_dma_release(s, true);
@@ -1561,7 +1565,7 @@ static void sci_start_tx(struct uart_port *port)
        }
 
        if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
-           s->cookie_tx < 0) {
+           dma_submit_error(s->cookie_tx)) {
                s->cookie_tx = 0;
                schedule_work(&s->work_tx);
        }
@@ -1678,7 +1682,6 @@ static void sci_request_dma(struct uart_port *port)
        struct sh_dmae_slave *param;
        struct dma_chan *chan;
        dma_cap_mask_t mask;
-       int nent;
 
        dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
 
@@ -1698,22 +1701,20 @@ static void sci_request_dma(struct uart_port *port)
        dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
        if (chan) {
                s->chan_tx = chan;
-               sg_init_table(&s->sg_tx, 1);
                /* UART circular tx buffer is an aligned page. */
-               BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
-               sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
-                           UART_XMIT_SIZE,
-                           (uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
-               nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
-               if (!nent)
-                       sci_tx_dma_release(s, false);
-               else
-                       dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n",
-                               __func__,
-                               sg_dma_len(&s->sg_tx), port->state->xmit.buf,
-                               &sg_dma_address(&s->sg_tx));
-
-               s->sg_len_tx = nent;
+               s->tx_dma_addr = dma_map_single(chan->device->dev,
+                                               port->state->xmit.buf,
+                                               UART_XMIT_SIZE,
+                                               DMA_TO_DEVICE);
+               if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
+                       dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
+                       dma_release_channel(chan);
+                       s->chan_tx = NULL;
+               } else {
+                       dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
+                               __func__, UART_XMIT_SIZE,
+                               port->state->xmit.buf, &s->tx_dma_addr);
+               }
 
                INIT_WORK(&s->work_tx, work_fn_tx);
        }
@@ -1732,14 +1733,17 @@ static void sci_request_dma(struct uart_port *port)
 
                s->chan_rx = chan;
 
-               s->buf_len_rx = 2 * max(16, (int)port->fifosize);
-               buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
-                                           &dma[0], GFP_KERNEL);
+               s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
+               buf[0] = dma_alloc_coherent(chan->device->dev,
+                                           s->buf_len_rx * 2, &dma[0],
+                                           GFP_KERNEL);
 
                if (!buf[0]) {
                        dev_warn(port->dev,
-                                "failed to allocate dma buffer, using PIO\n");
-                       sci_rx_dma_release(s, true);
+                                "Failed to allocate Rx dma buffer, using PIO\n");
+                       dma_release_channel(chan);
+                       s->chan_rx = NULL;
+                       sci_start_rx(port);
                        return;
                }
 
@@ -2011,13 +2015,13 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
        /*
         * Calculate delay for 2 DMA buffers (4 FIFO).
-        * See serial_core.c::uart_update_timeout(). With 10
-        * bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
-        * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
-        * Then below we calculate 5 jiffies (20ms) for 2 DMA buffers (4 FIFO
-        * sizes), but when performing a faster transfer, value obtained by
-        * this formula is may not enough. Therefore, if value is smaller than
-        * 20msec, this sets 20msec as timeout of DMA.
+        * See serial_core.c::uart_update_timeout().
+        * With 10 bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above
+        * function calculates 1 jiffie for the data plus 5 jiffies for the
+        * "slop(e)." Then below we calculate 5 jiffies (20ms) for 2 DMA
+        * buffers (4 FIFO sizes), but when performing a faster transfer, the
+        * value obtained by this formula is too small. Therefore, if the value
+        * is smaller than 20ms, use 20ms as the timeout value for DMA.
         */
        if (s->chan_rx) {
                unsigned int bits;