X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=drivers%2Ftty%2Fserial%2Fsh-sci.c;h=e318f002da399e70862a504575f6d1fdf74598a0;hb=47aceb927fffa77b0328b6fde846a6b2182dad01;hp=cfef543df25271f5b36fc82aabd852e295931234;hpb=f5835c1d0e30c7d0a48aa36e3a353c7d54ace470;p=karo-tx-linux.git diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index cfef543df252..e318f002da39 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -104,13 +104,11 @@ struct sci_port { struct dma_chan *chan_rx; #ifdef CONFIG_SERIAL_SH_SCI_DMA - struct dma_async_tx_descriptor *desc_tx; - struct dma_async_tx_descriptor *desc_rx[2]; dma_cookie_t cookie_tx; dma_cookie_t cookie_rx[2]; dma_cookie_t active_rx; - struct scatterlist sg_tx; - unsigned int sg_len_tx; + dma_addr_t tx_dma_addr; + unsigned int tx_dma_len; struct scatterlist sg_rx[2]; size_t buf_len_rx; struct sh_dmae_slave param_tx; @@ -1280,13 +1278,10 @@ static void sci_dma_tx_complete(void *arg) spin_lock_irqsave(&port->lock, flags); - xmit->tail += sg_dma_len(&s->sg_tx); + xmit->tail += s->tx_dma_len; xmit->tail &= UART_XMIT_SIZE - 1; - port->icount.tx += sg_dma_len(&s->sg_tx); - - async_tx_ack(s->desc_tx); - s->desc_tx = NULL; + port->icount.tx += s->tx_dma_len; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); @@ -1370,10 +1365,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) s->chan_rx = NULL; s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; + dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, + sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); dma_release_channel(chan); - if (sg_dma_address(&s->sg_rx[0])) - dma_free_coherent(port->dev, s->buf_len_rx * 2, - sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); if (enable_pio) sci_start_rx(port); } @@ -1385,6 +1379,8 @@ static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) s->chan_tx = NULL; s->cookie_tx = -EINVAL; + dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE, + DMA_TO_DEVICE); dma_release_channel(chan); if (enable_pio) sci_start_tx(port); @@ -1400,29 +1396,17 @@ static void sci_submit_rx(struct sci_port *s) struct dma_async_tx_descriptor *desc; desc = dmaengine_prep_slave_sg(chan, - sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + sg, 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + goto fail; - if (desc) { - s->desc_rx[i] = desc; - desc->callback = sci_dma_rx_complete; - desc->callback_param = s; - s->cookie_rx[i] = desc->tx_submit(desc); - } + desc->callback = sci_dma_rx_complete; + desc->callback_param = s; + s->cookie_rx[i] = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_rx[i])) + goto fail; - if (!desc || s->cookie_rx[i] < 0) { - if (i) { - async_tx_ack(s->desc_rx[0]); - s->cookie_rx[0] = -EINVAL; - } - if (desc) { - async_tx_ack(desc); - s->cookie_rx[i] = -EINVAL; - } - dev_warn(s->port.dev, - "Failed to re-start Rx DMA, using PIO\n"); - sci_rx_dma_release(s, true); - return; - } dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__, s->cookie_rx[i], i); } @@ -1430,6 +1414,16 @@ static void sci_submit_rx(struct sci_port *s) s->active_rx = s->cookie_rx[0]; dma_async_issue_pending(chan); + return; + +fail: + if (i) + dmaengine_terminate_all(chan); + for (i = 0; i < 2; i++) + s->cookie_rx[i] = -EINVAL; + s->active_rx = -EINVAL; + dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n"); + sci_rx_dma_release(s, true); } static void work_fn_rx(struct work_struct *work) @@ -1437,6 +1431,8 @@ static void work_fn_rx(struct work_struct *work) struct sci_port *s = container_of(work, struct sci_port, work_rx); struct uart_port *port = &s->port; struct dma_async_tx_descriptor *desc; + struct dma_tx_state state; + enum dma_status status; int new; if (s->active_rx == s->cookie_rx[0]) { @@ -1448,23 +1444,22 @@ static void work_fn_rx(struct work_struct *work) s->active_rx); return; } - desc = s->desc_rx[new]; - if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != - DMA_COMPLETE) { + status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); + if (status != DMA_COMPLETE) { /* Handle incomplete DMA receive */ struct dma_chan *chan = s->chan_rx; - struct shdma_desc *sh_desc = container_of(desc, - struct shdma_desc, async_tx); unsigned long flags; + unsigned int read; int count; dmaengine_terminate_all(chan); - dev_dbg(port->dev, "Read %zu bytes with cookie %d\n", - sh_desc->partial, sh_desc->cookie); + read = sg_dma_len(&s->sg_rx[new]) - state.residue; + dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read, + s->active_rx); spin_lock_irqsave(&port->lock, flags); - count = sci_dma_rx_push(s, sh_desc->partial); + count = sci_dma_rx_push(s, read); spin_unlock_irqrestore(&port->lock, flags); if (count) @@ -1475,17 +1470,27 @@ static void work_fn_rx(struct work_struct *work) return; } - s->cookie_rx[new] = desc->tx_submit(desc); - if (s->cookie_rx[new] < 0) { - dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); - sci_rx_dma_release(s, true); - return; - } + desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + goto fail; + + desc->callback = sci_dma_rx_complete; + desc->callback_param = s; + s->cookie_rx[new] = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_rx[new])) + goto fail; s->active_rx = s->cookie_rx[!new]; dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", __func__, s->cookie_rx[new], new, s->active_rx); + return; + +fail: + dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); + sci_rx_dma_release(s, true); } static void work_fn_tx(struct work_struct *work) @@ -1495,7 +1500,7 @@ static void work_fn_tx(struct work_struct *work) struct dma_chan *chan = s->chan_tx; struct uart_port *port = &s->port; struct circ_buf *xmit = &port->state->xmit; - struct scatterlist *sg = &s->sg_tx; + dma_addr_t buf; /* * DMA is idle now. @@ -1505,18 +1510,15 @@ static void work_fn_tx(struct work_struct *work) * consistent xmit buffer state. */ spin_lock_irq(&port->lock); - sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); - sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + - sg->offset; - sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), + buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1)); + s->tx_dma_len = min_t(unsigned int, + CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); spin_unlock_irq(&port->lock); - BUG_ON(!sg_dma_len(sg)); - - desc = dmaengine_prep_slave_sg(chan, - sg, s->sg_len_tx, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); /* switch to PIO */ @@ -1524,15 +1526,15 @@ static void work_fn_tx(struct work_struct *work) return; } - dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); + dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, + DMA_TO_DEVICE); spin_lock_irq(&port->lock); - s->desc_tx = desc; desc->callback = sci_dma_tx_complete; desc->callback_param = s; spin_unlock_irq(&port->lock); - s->cookie_tx = desc->tx_submit(desc); - if (s->cookie_tx < 0) { + s->cookie_tx = dmaengine_submit(desc); + if (dma_submit_error(s->cookie_tx)) { dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); /* switch to PIO */ sci_tx_dma_release(s, true); @@ -1563,7 +1565,7 @@ static void sci_start_tx(struct uart_port *port) } if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && - s->cookie_tx < 0) { + dma_submit_error(s->cookie_tx)) { s->cookie_tx = 0; schedule_work(&s->work_tx); } @@ -1680,7 +1682,6 @@ static void sci_request_dma(struct uart_port *port) struct sh_dmae_slave *param; struct dma_chan *chan; dma_cap_mask_t mask; - int nent; dev_dbg(port->dev, "%s: port %d\n", __func__, port->line); @@ -1700,25 +1701,21 @@ static void sci_request_dma(struct uart_port *port) dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); if (chan) { s->chan_tx = chan; - sg_init_table(&s->sg_tx, 1); /* UART circular tx buffer is an aligned page. */ - BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK); - sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), - UART_XMIT_SIZE, - (uintptr_t)port->state->xmit.buf & ~PAGE_MASK); - nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); - if (!nent) { + s->tx_dma_addr = dma_map_single(chan->device->dev, + port->state->xmit.buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) { dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n"); - sci_tx_dma_release(s, false); + dma_release_channel(chan); + s->chan_tx = NULL; } else { - dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", - __func__, - sg_dma_len(&s->sg_tx), port->state->xmit.buf, - &sg_dma_address(&s->sg_tx)); + dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n", + __func__, UART_XMIT_SIZE, + port->state->xmit.buf, &s->tx_dma_addr); } - s->sg_len_tx = nent; - INIT_WORK(&s->work_tx, work_fn_tx); } @@ -1736,14 +1733,17 @@ static void sci_request_dma(struct uart_port *port) s->chan_rx = chan; - s->buf_len_rx = 2 * max(16, (int)port->fifosize); - buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, - &dma[0], GFP_KERNEL); + s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize); + buf[0] = dma_alloc_coherent(chan->device->dev, + s->buf_len_rx * 2, &dma[0], + GFP_KERNEL); if (!buf[0]) { dev_warn(port->dev, "Failed to allocate Rx dma buffer, using PIO\n"); - sci_rx_dma_release(s, true); + dma_release_channel(chan); + s->chan_rx = NULL; + sci_start_rx(port); return; }