2 * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/interrupt.h>
20 #include <linux/miscdevice.h>
21 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/sched.h>
29 #include <linux/module.h>
30 #include <linux/pxp_dma.h>
31 #include <linux/atomic.h>
32 #include <linux/platform_data/dma-imx.h>
34 static atomic_t open_count = ATOMIC_INIT(0);
36 static DEFINE_SPINLOCK(pxp_mem_lock);
37 static DEFINE_SPINLOCK(pxp_chan_lock);
38 static LIST_HEAD(head);
39 static LIST_HEAD(list);
40 static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
42 struct pxp_chan_handle {
47 /* To track the allocated memory buffer */
48 struct memalloc_record {
49 struct list_head list;
50 struct pxp_mem_desc mem;
53 struct pxp_chan_info {
54 struct dma_chan *dma_chan;
55 struct list_head list;
58 static int pxp_alloc_dma_buffer(struct pxp_mem_desc *mem)
60 mem->cpu_addr = (unsigned long)
61 dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
62 (dma_addr_t *) (&mem->phys_addr),
63 GFP_DMA | GFP_KERNEL);
64 pr_debug("[ALLOC] mem alloc phys_addr = 0x%x\n", mem->phys_addr);
65 if ((void *)(mem->cpu_addr) == NULL) {
66 printk(KERN_ERR "Physical memory allocation error!\n");
72 static void pxp_free_dma_buffer(struct pxp_mem_desc *mem)
74 if (mem->cpu_addr != 0) {
75 dma_free_coherent(0, PAGE_ALIGN(mem->size),
76 (void *)mem->cpu_addr, mem->phys_addr);
80 static int pxp_free_buffers(void)
82 struct memalloc_record *rec, *n;
83 struct pxp_mem_desc mem;
85 list_for_each_entry_safe(rec, n, &head, list) {
87 if (mem.cpu_addr != 0) {
88 pxp_free_dma_buffer(&mem);
89 pr_debug("[FREE] freed paddr=0x%08X\n", mem.phys_addr);
90 /* delete from list */
99 /* Callback function triggered after PxP receives an EOF interrupt */
100 static void pxp_dma_done(void *arg)
102 struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
103 struct dma_chan *chan = tx_desc->txd.chan;
104 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
105 int chan_id = pxp_chan->dma_chan.chan_id;
108 pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
110 spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
111 irq_info[chan_id].irq_pending--;
112 irq_info[chan_id].hist_status = tx_desc->hist_status;
113 spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
115 wake_up_interruptible(&(irq_info[chan_id].waitq));
118 static int pxp_ioc_config_chan(unsigned long arg)
120 struct scatterlist sg[3];
121 struct pxp_tx_desc *desc;
122 struct dma_async_tx_descriptor *txd;
123 struct pxp_chan_info *info;
124 struct pxp_config_data pxp_conf;
130 ret = copy_from_user(&pxp_conf,
131 (struct pxp_config_data *)arg,
132 sizeof(struct pxp_config_data));
136 chan_id = pxp_conf.chan_id;
137 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
140 /* find the channel */
141 spin_lock(&pxp_chan_lock);
142 list_for_each_entry(info, &list, list) {
143 if (info->dma_chan->chan_id == chan_id)
146 spin_unlock(&pxp_chan_lock);
148 sg_init_table(sg, 3);
151 info->dma_chan->device->device_prep_slave_sg(info->dma_chan,
157 pr_err("Error preparing a DMA transaction descriptor.\n");
161 txd->callback_param = txd;
162 txd->callback = pxp_dma_done;
164 desc = to_tx_desc(txd);
167 for (i = 0; i < length; i++) {
168 if (i == 0) { /* S0 */
169 memcpy(&desc->proc_data,
171 sizeof(struct pxp_proc_data));
172 memcpy(&desc->layer_param.s0_param,
174 sizeof(struct pxp_layer_param));
175 } else if (i == 1) { /* Output */
176 memcpy(&desc->layer_param.out_param,
178 sizeof(struct pxp_layer_param));
181 memcpy(&desc->layer_param.ol_param,
183 sizeof(struct pxp_layer_param));
189 cookie = txd->tx_submit(txd);
191 pr_err("Error tx_submit\n");
195 spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
196 irq_info[chan_id].irq_pending++;
197 spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
202 static int pxp_device_open(struct inode *inode, struct file *filp)
204 atomic_inc(&open_count);
209 static int pxp_device_release(struct inode *inode, struct file *filp)
211 if (atomic_dec_and_test(&open_count))
217 static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
219 struct memalloc_record *rec, *n;
220 int request_size, found;
222 request_size = vma->vm_end - vma->vm_start;
225 pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
226 (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
229 spin_lock(&pxp_mem_lock);
230 list_for_each_entry_safe(rec, n, &head, list) {
231 if (rec->mem.phys_addr == (vma->vm_pgoff << PAGE_SHIFT) &&
232 (rec->mem.size <= request_size)) {
237 spin_unlock(&pxp_mem_lock);
242 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
244 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
245 request_size, vma->vm_page_prot) ? -EAGAIN : 0;
248 static bool chan_filter(struct dma_chan *chan, void *arg)
250 if (imx_dma_is_pxp(chan))
256 static long pxp_device_ioctl(struct file *filp,
257 unsigned int cmd, unsigned long arg)
262 case PXP_IOC_GET_CHAN:
264 struct pxp_chan_info *info;
267 pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
268 info = kzalloc(sizeof(*info), GFP_KERNEL);
270 pr_err("%d: alloc err\n", __LINE__);
275 dma_cap_set(DMA_SLAVE, mask);
276 dma_cap_set(DMA_PRIVATE, mask);
278 dma_request_channel(mask, chan_filter, NULL);
279 if (!info->dma_chan) {
280 pr_err("Unsccessfully received channel!\n");
284 pr_debug("Successfully received channel."
285 "chan_id %d\n", info->dma_chan->chan_id);
287 spin_lock(&pxp_chan_lock);
288 list_add_tail(&info->list, &list);
289 spin_unlock(&pxp_chan_lock);
291 init_waitqueue_head(&(irq_info[info->dma_chan->chan_id].waitq));
293 (info->dma_chan->chan_id, (u32 __user *) arg))
298 case PXP_IOC_PUT_CHAN:
301 struct pxp_chan_info *info;
303 if (get_user(chan_id, (u32 __user *) arg))
306 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
309 spin_lock(&pxp_chan_lock);
310 list_for_each_entry(info, &list, list) {
311 if (info->dma_chan->chan_id == chan_id)
314 spin_unlock(&pxp_chan_lock);
316 pr_debug("%d release chan_id %d\n", __LINE__,
317 info->dma_chan->chan_id);
319 dma_release_channel(info->dma_chan);
320 spin_lock(&pxp_chan_lock);
321 list_del_init(&info->list);
322 spin_unlock(&pxp_chan_lock);
327 case PXP_IOC_CONFIG_CHAN:
331 ret = pxp_ioc_config_chan(arg);
337 case PXP_IOC_START_CHAN:
339 struct pxp_chan_info *info;
342 if (get_user(chan_id, (u32 __user *) arg))
345 /* find the channel */
346 spin_lock(&pxp_chan_lock);
347 list_for_each_entry(info, &list, list) {
348 if (info->dma_chan->chan_id == chan_id)
351 spin_unlock(&pxp_chan_lock);
353 dma_async_issue_pending(info->dma_chan);
357 case PXP_IOC_GET_PHYMEM:
359 struct memalloc_record *rec;
361 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
365 ret = copy_from_user(&(rec->mem),
366 (struct pxp_mem_desc *)arg,
367 sizeof(struct pxp_mem_desc));
373 pr_debug("[ALLOC] mem alloc size = 0x%x\n",
376 ret = pxp_alloc_dma_buffer(&(rec->mem));
380 "Physical memory allocation error!\n");
383 ret = copy_to_user((void __user *)arg, &(rec->mem),
384 sizeof(struct pxp_mem_desc));
391 spin_lock(&pxp_mem_lock);
392 list_add(&rec->list, &head);
393 spin_unlock(&pxp_mem_lock);
397 case PXP_IOC_PUT_PHYMEM:
399 struct memalloc_record *rec, *n;
400 struct pxp_mem_desc pxp_mem;
402 ret = copy_from_user(&pxp_mem,
403 (struct pxp_mem_desc *)arg,
404 sizeof(struct pxp_mem_desc));
408 pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
410 if ((void *)pxp_mem.cpu_addr != NULL)
411 pxp_free_dma_buffer(&pxp_mem);
413 spin_lock(&pxp_mem_lock);
414 list_for_each_entry_safe(rec, n, &head, list) {
415 if (rec->mem.cpu_addr == pxp_mem.cpu_addr) {
416 /* delete from list */
417 list_del(&rec->list);
422 spin_unlock(&pxp_mem_lock);
426 case PXP_IOC_WAIT4CMPLT:
428 struct pxp_chan_handle chan_handle;
431 ret = copy_from_user(&chan_handle,
432 (struct pxp_chan_handle *)arg,
433 sizeof(struct pxp_chan_handle));
437 chan_id = chan_handle.chan_id;
438 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
441 ret = wait_event_interruptible
442 (irq_info[chan_id].waitq,
443 (irq_info[chan_id].irq_pending == 0));
446 "WAIT4CMPLT: signal received.\n");
450 chan_handle.hist_status = irq_info[chan_id].hist_status;
451 ret = copy_to_user((struct pxp_chan_handle *)arg,
453 sizeof(struct pxp_chan_handle));
465 static const struct file_operations pxp_device_fops = {
466 .open = pxp_device_open,
467 .release = pxp_device_release,
468 .unlocked_ioctl = pxp_device_ioctl,
469 .mmap = pxp_device_mmap,
472 static struct miscdevice pxp_device_miscdev = {
473 .minor = MISC_DYNAMIC_MINOR,
474 .name = "pxp_device",
475 .fops = &pxp_device_fops,
478 int register_pxp_device(void)
482 ret = misc_register(&pxp_device_miscdev);
486 for (i = 0; i < NR_PXP_VIRT_CHANNEL; i++)
487 spin_lock_init(&(irq_info[i].lock));
489 pr_debug("PxP_Device registered Successfully\n");
493 void unregister_pxp_device(void)
495 misc_deregister(&pxp_device_miscdev);