2 * driver/dma/coh901318_lli.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/dmapool.h>
13 #include <linux/memory.h>
14 #include <linux/gfp.h>
15 #include <mach/coh901318.h>
17 #include "coh901318_lli.h"
19 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
20 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
21 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
23 #define DEBUGFS_POOL_COUNTER_RESET(pool)
24 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
27 static struct coh901318_lli *
28 coh901318_lli_next(struct coh901318_lli *data)
30 if (data == NULL || data->link_addr == 0)
33 return (struct coh901318_lli *) data->virt_link_addr;
36 int coh901318_pool_create(struct coh901318_pool *pool,
38 size_t size, size_t align)
40 spin_lock_init(&pool->lock);
42 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
44 DEBUGFS_POOL_COUNTER_RESET(pool);
48 int coh901318_pool_destroy(struct coh901318_pool *pool)
51 dma_pool_destroy(pool->dmapool);
55 struct coh901318_lli *
56 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
59 struct coh901318_lli *head;
60 struct coh901318_lli *lli;
61 struct coh901318_lli *lli_prev;
67 spin_lock(&pool->lock);
69 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
74 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
78 lli->link_addr = 0x00000000;
79 lli->virt_link_addr = 0x00000000U;
81 for (i = 1; i < len; i++) {
84 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
89 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
91 lli->link_addr = 0x00000000;
92 lli->virt_link_addr = 0x00000000U;
94 lli_prev->link_addr = phy;
95 lli_prev->virt_link_addr = lli;
98 spin_unlock(&pool->lock);
103 spin_unlock(&pool->lock);
107 lli_prev->link_addr = 0x00000000U;
108 spin_unlock(&pool->lock);
109 coh901318_lli_free(pool, &head);
113 void coh901318_lli_free(struct coh901318_pool *pool,
114 struct coh901318_lli **lli)
116 struct coh901318_lli *l;
117 struct coh901318_lli *next;
127 spin_lock(&pool->lock);
129 while (l->link_addr) {
130 next = l->virt_link_addr;
131 dma_pool_free(pool->dmapool, l, l->phy_this);
132 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
135 dma_pool_free(pool->dmapool, l, l->phy_this);
136 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
138 spin_unlock(&pool->lock);
143 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
144 struct coh901318_lli *lli,
145 dma_addr_t source, unsigned int size,
146 dma_addr_t destination, u32 ctrl_chained,
150 dma_addr_t src = source;
151 dma_addr_t dst = destination;
156 while (lli->link_addr) {
157 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
161 s -= MAX_DMA_PACKET_SIZE;
162 lli = coh901318_lli_next(lli);
164 src += MAX_DMA_PACKET_SIZE;
165 dst += MAX_DMA_PACKET_SIZE;
168 lli->control = ctrl_eom | s;
176 coh901318_lli_fill_single(struct coh901318_pool *pool,
177 struct coh901318_lli *lli,
178 dma_addr_t buf, unsigned int size,
179 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
180 enum dma_data_direction dir)
187 if (dir == DMA_TO_DEVICE) {
191 } else if (dir == DMA_FROM_DEVICE) {
199 while (lli->link_addr) {
200 size_t block_size = MAX_DMA_PACKET_SIZE;
201 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
203 /* If we are on the next-to-final block and there will
204 * be less than half a DMA packet left for the last
205 * block, then we want to make this block a little
206 * smaller to balance the sizes. This is meant to
207 * avoid too small transfers if the buffer size is
208 * (MAX_DMA_PACKET_SIZE*N + 1) */
209 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
210 block_size = MAX_DMA_PACKET_SIZE/2;
216 lli = coh901318_lli_next(lli);
218 if (dir == DMA_TO_DEVICE)
220 else if (dir == DMA_FROM_DEVICE)
224 lli->control = ctrl_eom | s;
232 coh901318_lli_fill_sg(struct coh901318_pool *pool,
233 struct coh901318_lli *lli,
234 struct scatterlist *sgl, unsigned int nents,
235 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
237 enum dma_data_direction dir, u32 ctrl_irq_mask)
240 struct scatterlist *sg;
244 u32 bytes_to_transfer;
250 spin_lock(&pool->lock);
252 if (dir == DMA_TO_DEVICE)
254 else if (dir == DMA_FROM_DEVICE)
259 for_each_sg(sgl, sg, nents, i) {
260 if (sg_is_chain(sg)) {
261 /* sg continues to the next sg-element don't
262 * send ctrl_finish until the last
263 * sg-element in the chain
265 ctrl_sg = ctrl_chained;
266 } else if (i == nents - 1)
269 ctrl_sg = ctrl ? ctrl : ctrl_last;
272 if (dir == DMA_TO_DEVICE)
273 /* increment source address */
276 /* increment destination address */
279 bytes_to_transfer = sg_dma_len(sg);
281 while (bytes_to_transfer) {
284 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
285 elem_size = MAX_DMA_PACKET_SIZE;
288 elem_size = bytes_to_transfer;
292 lli->control = val | elem_size;
296 if (dir == DMA_FROM_DEVICE)
301 BUG_ON(lli->link_addr & 3);
303 bytes_to_transfer -= elem_size;
304 lli = coh901318_lli_next(lli);
308 spin_unlock(&pool->lock);
312 spin_unlock(&pool->lock);