]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/tty/serial/sh-sci.c
serial: sh-sci: Do not resubmit DMA descriptors
[karo-tx-linux.git] / drivers / tty / serial / sh-sci.c
index 294b283f5c8535c3f227ac82fb01882fe3d7c24c..e318f002da399e70862a504575f6d1fdf74598a0 100644 (file)
@@ -104,8 +104,6 @@ struct sci_port {
        struct dma_chan                 *chan_rx;
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
-       struct dma_async_tx_descriptor  *desc_tx;
-       struct dma_async_tx_descriptor  *desc_rx[2];
        dma_cookie_t                    cookie_tx;
        dma_cookie_t                    cookie_rx[2];
        dma_cookie_t                    active_rx;
@@ -1285,9 +1283,6 @@ static void sci_dma_tx_complete(void *arg)
 
        port->icount.tx += s->tx_dma_len;
 
-       async_tx_ack(s->desc_tx);
-       s->desc_tx = NULL;
-
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
@@ -1401,29 +1396,17 @@ static void sci_submit_rx(struct sci_port *s)
                struct dma_async_tx_descriptor *desc;
 
                desc = dmaengine_prep_slave_sg(chan,
-                       sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+                       sg, 1, DMA_DEV_TO_MEM,
+                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!desc)
+                       goto fail;
 
-               if (desc) {
-                       s->desc_rx[i] = desc;
-                       desc->callback = sci_dma_rx_complete;
-                       desc->callback_param = s;
-                       s->cookie_rx[i] = dmaengine_submit(desc);
-               }
+               desc->callback = sci_dma_rx_complete;
+               desc->callback_param = s;
+               s->cookie_rx[i] = dmaengine_submit(desc);
+               if (dma_submit_error(s->cookie_rx[i]))
+                       goto fail;
 
-               if (!desc || dma_submit_error(s->cookie_rx[i])) {
-                       if (i) {
-                               async_tx_ack(s->desc_rx[0]);
-                               s->cookie_rx[0] = -EINVAL;
-                       }
-                       if (desc) {
-                               async_tx_ack(desc);
-                               s->cookie_rx[i] = -EINVAL;
-                       }
-                       dev_warn(s->port.dev,
-                                "Failed to re-start Rx DMA, using PIO\n");
-                       sci_rx_dma_release(s, true);
-                       return;
-               }
                dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
                        s->cookie_rx[i], i);
        }
@@ -1431,6 +1414,16 @@ static void sci_submit_rx(struct sci_port *s)
        s->active_rx = s->cookie_rx[0];
 
        dma_async_issue_pending(chan);
+       return;
+
+fail:
+       if (i)
+               dmaengine_terminate_all(chan);
+       for (i = 0; i < 2; i++)
+               s->cookie_rx[i] = -EINVAL;
+       s->active_rx = -EINVAL;
+       dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
+       sci_rx_dma_release(s, true);
 }
 
 static void work_fn_rx(struct work_struct *work)
@@ -1438,6 +1431,8 @@ static void work_fn_rx(struct work_struct *work)
        struct sci_port *s = container_of(work, struct sci_port, work_rx);
        struct uart_port *port = &s->port;
        struct dma_async_tx_descriptor *desc;
+       struct dma_tx_state state;
+       enum dma_status status;
        int new;
 
        if (s->active_rx == s->cookie_rx[0]) {
@@ -1449,23 +1444,22 @@ static void work_fn_rx(struct work_struct *work)
                        s->active_rx);
                return;
        }
-       desc = s->desc_rx[new];
 
-       if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
-           DMA_COMPLETE) {
+       status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
+       if (status != DMA_COMPLETE) {
                /* Handle incomplete DMA receive */
                struct dma_chan *chan = s->chan_rx;
-               struct shdma_desc *sh_desc = container_of(desc,
-                                       struct shdma_desc, async_tx);
                unsigned long flags;
+               unsigned int read;
                int count;
 
                dmaengine_terminate_all(chan);
-               dev_dbg(port->dev, "Read %zu bytes with cookie %d\n",
-                       sh_desc->partial, sh_desc->cookie);
+               read = sg_dma_len(&s->sg_rx[new]) - state.residue;
+               dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
+                       s->active_rx);
 
                spin_lock_irqsave(&port->lock, flags);
-               count = sci_dma_rx_push(s, sh_desc->partial);
+               count = sci_dma_rx_push(s, read);
                spin_unlock_irqrestore(&port->lock, flags);
 
                if (count)
@@ -1476,17 +1470,27 @@ static void work_fn_rx(struct work_struct *work)
                return;
        }
 
+       desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1,
+                                      DMA_DEV_TO_MEM,
+                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+               goto fail;
+
+       desc->callback = sci_dma_rx_complete;
+       desc->callback_param = s;
        s->cookie_rx[new] = dmaengine_submit(desc);
-       if (dma_submit_error(s->cookie_rx[new])) {
-               dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
-               sci_rx_dma_release(s, true);
-               return;
-       }
+       if (dma_submit_error(s->cookie_rx[new]))
+               goto fail;
 
        s->active_rx = s->cookie_rx[!new];
 
        dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
                __func__, s->cookie_rx[new], new, s->active_rx);
+       return;
+
+fail:
+       dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
+       sci_rx_dma_release(s, true);
 }
 
 static void work_fn_tx(struct work_struct *work)
@@ -1526,7 +1530,6 @@ static void work_fn_tx(struct work_struct *work)
                                   DMA_TO_DEVICE);
 
        spin_lock_irq(&port->lock);
-       s->desc_tx = desc;
        desc->callback = sci_dma_tx_complete;
        desc->callback_param = s;
        spin_unlock_irq(&port->lock);