2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/pagemap.h>
19 #include <linux/scatterlist.h>
23 #define TMIO_MMC_MIN_DMA_LEN 8
25 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
27 if (!host->chan_tx || !host->chan_rx)
30 if (host->dma->enable)
31 host->dma->enable(host, enable);
34 void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
36 tmio_mmc_enable_dma(host, false);
39 dmaengine_terminate_all(host->chan_rx);
41 dmaengine_terminate_all(host->chan_tx);
43 tmio_mmc_enable_dma(host, true);
46 static void tmio_mmc_dma_callback(void *arg)
48 struct tmio_mmc_host *host = arg;
50 wait_for_completion(&host->dma_dataend);
52 spin_lock_irq(&host->lock);
57 if (host->data->flags & MMC_DATA_READ)
58 dma_unmap_sg(host->chan_rx->device->dev,
59 host->sg_ptr, host->sg_len,
62 dma_unmap_sg(host->chan_tx->device->dev,
63 host->sg_ptr, host->sg_len,
66 tmio_mmc_do_data_irq(host);
68 spin_unlock_irq(&host->lock);
71 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
73 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
74 struct dma_async_tx_descriptor *desc = NULL;
75 struct dma_chan *chan = host->chan_rx;
78 bool aligned = true, multiple = true;
79 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
81 for_each_sg(sg, sg_tmp, host->sg_len, i) {
82 if (sg_tmp->offset & align)
84 if (sg_tmp->length & align) {
90 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
91 (align & PAGE_MASK))) || !multiple) {
96 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
97 host->force_pio = true;
101 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
103 /* The only sg element can be unaligned, use our bounce buffer then */
105 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
106 host->sg_ptr = &host->bounce_sg;
110 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
112 desc = dmaengine_prep_slave_sg(chan, sg, ret,
113 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
116 reinit_completion(&host->dma_dataend);
117 desc->callback = tmio_mmc_dma_callback;
118 desc->callback_param = host;
120 cookie = dmaengine_submit(desc);
128 /* DMA failed, fall back to PIO */
129 tmio_mmc_enable_dma(host, false);
132 host->chan_rx = NULL;
133 dma_release_channel(chan);
134 /* Free the Tx channel too */
135 chan = host->chan_tx;
137 host->chan_tx = NULL;
138 dma_release_channel(chan);
140 dev_warn(&host->pdev->dev,
141 "DMA failed: %d, falling back to PIO\n", ret);
145 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
147 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
148 struct dma_async_tx_descriptor *desc = NULL;
149 struct dma_chan *chan = host->chan_tx;
152 bool aligned = true, multiple = true;
153 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
155 for_each_sg(sg, sg_tmp, host->sg_len, i) {
156 if (sg_tmp->offset & align)
158 if (sg_tmp->length & align) {
164 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
165 (align & PAGE_MASK))) || !multiple) {
170 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
171 host->force_pio = true;
175 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
177 /* The only sg element can be unaligned, use our bounce buffer then */
180 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
181 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
182 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
183 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
184 host->sg_ptr = &host->bounce_sg;
188 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
190 desc = dmaengine_prep_slave_sg(chan, sg, ret,
191 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
194 reinit_completion(&host->dma_dataend);
195 desc->callback = tmio_mmc_dma_callback;
196 desc->callback_param = host;
198 cookie = dmaengine_submit(desc);
206 /* DMA failed, fall back to PIO */
207 tmio_mmc_enable_dma(host, false);
210 host->chan_tx = NULL;
211 dma_release_channel(chan);
212 /* Free the Rx channel too */
213 chan = host->chan_rx;
215 host->chan_rx = NULL;
216 dma_release_channel(chan);
218 dev_warn(&host->pdev->dev,
219 "DMA failed: %d, falling back to PIO\n", ret);
223 void tmio_mmc_start_dma(struct tmio_mmc_host *host,
224 struct mmc_data *data)
226 if (data->flags & MMC_DATA_READ) {
228 tmio_mmc_start_dma_rx(host);
231 tmio_mmc_start_dma_tx(host);
235 static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
237 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
238 struct dma_chan *chan = NULL;
240 spin_lock_irq(&host->lock);
242 if (host && host->data) {
243 if (host->data->flags & MMC_DATA_READ)
244 chan = host->chan_rx;
246 chan = host->chan_tx;
249 spin_unlock_irq(&host->lock);
251 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
254 dma_async_issue_pending(chan);
257 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
259 /* We can only either use DMA for both Tx and Rx or not use it at all */
260 if (!host->dma || (!host->pdev->dev.of_node &&
261 (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
264 if (!host->chan_tx && !host->chan_rx) {
265 struct resource *res = platform_get_resource(host->pdev,
267 struct dma_slave_config cfg = {};
275 dma_cap_set(DMA_SLAVE, mask);
277 host->chan_tx = dma_request_slave_channel_compat(mask,
278 host->dma->filter, pdata->chan_priv_tx,
279 &host->pdev->dev, "tx");
280 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
286 cfg.direction = DMA_MEM_TO_DEV;
287 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
288 cfg.dst_addr_width = host->dma->dma_buswidth;
289 if (!cfg.dst_addr_width)
290 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
292 ret = dmaengine_slave_config(host->chan_tx, &cfg);
296 host->chan_rx = dma_request_slave_channel_compat(mask,
297 host->dma->filter, pdata->chan_priv_rx,
298 &host->pdev->dev, "rx");
299 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
305 cfg.direction = DMA_DEV_TO_MEM;
306 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
307 cfg.src_addr_width = host->dma->dma_buswidth;
308 if (!cfg.src_addr_width)
309 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
311 ret = dmaengine_slave_config(host->chan_rx, &cfg);
315 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
316 if (!host->bounce_buf)
319 init_completion(&host->dma_dataend);
320 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
323 tmio_mmc_enable_dma(host, true);
329 dma_release_channel(host->chan_rx);
330 host->chan_rx = NULL;
333 dma_release_channel(host->chan_tx);
334 host->chan_tx = NULL;
337 void tmio_mmc_release_dma(struct tmio_mmc_host *host)
340 struct dma_chan *chan = host->chan_tx;
341 host->chan_tx = NULL;
342 dma_release_channel(chan);
345 struct dma_chan *chan = host->chan_rx;
346 host->chan_rx = NULL;
347 dma_release_channel(chan);
349 if (host->bounce_buf) {
350 free_pages((unsigned long)host->bounce_buf, 0);
351 host->bounce_buf = NULL;