2 * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/interrupt.h>
20 #include <linux/miscdevice.h>
21 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/sched.h>
29 #include <linux/module.h>
30 #include <linux/pxp_dma.h>
31 #include <linux/atomic.h>
32 #include <linux/platform_data/dma-imx.h>
34 static atomic_t open_count = ATOMIC_INIT(0);
36 static DEFINE_SPINLOCK(pxp_mem_lock);
37 static DEFINE_SPINLOCK(pxp_chan_lock);
38 static LIST_HEAD(head);
39 static LIST_HEAD(list);
40 static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
42 struct pxp_chan_handle {
47 /* To track the allocated memory buffer */
48 struct memalloc_record {
49 struct list_head list;
50 struct pxp_mem_desc mem;
53 struct pxp_chan_info {
55 struct dma_chan *dma_chan;
56 struct list_head list;
59 static int pxp_alloc_dma_buffer(struct pxp_mem_desc *mem)
61 mem->cpu_addr = (unsigned long)
62 dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
63 (dma_addr_t *) (&mem->phys_addr),
64 GFP_DMA | GFP_KERNEL);
65 pr_debug("[ALLOC] mem alloc phys_addr = 0x%x\n", mem->phys_addr);
66 if ((void *)(mem->cpu_addr) == NULL) {
67 printk(KERN_ERR "Physical memory allocation error!\n");
73 static void pxp_free_dma_buffer(struct pxp_mem_desc *mem)
75 if (mem->cpu_addr != 0) {
76 dma_free_coherent(0, PAGE_ALIGN(mem->size),
77 (void *)mem->cpu_addr, mem->phys_addr);
81 static int pxp_free_buffers(void)
83 struct memalloc_record *rec, *n;
84 struct pxp_mem_desc mem;
86 list_for_each_entry_safe(rec, n, &head, list) {
88 if (mem.cpu_addr != 0) {
89 pxp_free_dma_buffer(&mem);
90 pr_debug("[FREE] freed paddr=0x%08X\n", mem.phys_addr);
91 /* delete from list */
100 /* Callback function triggered after PxP receives an EOF interrupt */
101 static void pxp_dma_done(void *arg)
103 struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
104 struct dma_chan *chan = tx_desc->txd.chan;
105 struct pxp_channel *pxp_chan = to_pxp_channel(chan);
106 int chan_id = pxp_chan->dma_chan.chan_id;
108 pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
110 irq_info[chan_id].irq_pending++;
111 irq_info[chan_id].hist_status = tx_desc->hist_status;
113 wake_up_interruptible(&(irq_info[chan_id].waitq));
116 static int pxp_ioc_config_chan(unsigned long arg)
118 struct scatterlist sg[3];
119 struct pxp_tx_desc *desc;
120 struct dma_async_tx_descriptor *txd;
121 struct pxp_chan_info *info;
122 struct pxp_config_data pxp_conf;
127 ret = copy_from_user(&pxp_conf,
128 (struct pxp_config_data *)arg,
129 sizeof(struct pxp_config_data));
133 chan_id = pxp_conf.chan_id;
134 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
137 init_waitqueue_head(&(irq_info[chan_id].waitq));
139 /* find the channel */
140 spin_lock(&pxp_chan_lock);
141 list_for_each_entry(info, &list, list) {
142 if (info->dma_chan->chan_id == chan_id)
145 spin_unlock(&pxp_chan_lock);
147 sg_init_table(sg, 3);
150 info->dma_chan->device->device_prep_slave_sg(info->dma_chan,
156 pr_err("Error preparing a DMA transaction descriptor.\n");
160 txd->callback_param = txd;
161 txd->callback = pxp_dma_done;
163 desc = to_tx_desc(txd);
166 for (i = 0; i < length; i++) {
167 if (i == 0) { /* S0 */
168 memcpy(&desc->proc_data,
170 sizeof(struct pxp_proc_data));
171 memcpy(&desc->layer_param.s0_param,
173 sizeof(struct pxp_layer_param));
174 } else if (i == 1) { /* Output */
175 memcpy(&desc->layer_param.out_param,
177 sizeof(struct pxp_layer_param));
180 memcpy(&desc->layer_param.ol_param,
182 sizeof(struct pxp_layer_param));
188 cookie = txd->tx_submit(txd);
190 pr_err("Error tx_submit\n");
197 static int pxp_device_open(struct inode *inode, struct file *filp)
199 atomic_inc(&open_count);
204 static int pxp_device_release(struct inode *inode, struct file *filp)
206 if (atomic_dec_and_test(&open_count))
212 static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
214 struct memalloc_record *rec, *n;
215 int request_size, found;
217 request_size = vma->vm_end - vma->vm_start;
220 pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
221 (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
224 spin_lock(&pxp_mem_lock);
225 list_for_each_entry_safe(rec, n, &head, list) {
226 if (rec->mem.phys_addr == (vma->vm_pgoff << PAGE_SHIFT) &&
227 (rec->mem.size <= request_size)) {
232 spin_unlock(&pxp_mem_lock);
237 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
239 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
240 request_size, vma->vm_page_prot) ? -EAGAIN : 0;
243 static bool chan_filter(struct dma_chan *chan, void *arg)
245 if (imx_dma_is_pxp(chan))
251 static long pxp_device_ioctl(struct file *filp,
252 unsigned int cmd, unsigned long arg)
257 case PXP_IOC_GET_CHAN:
259 struct pxp_chan_info *info;
262 pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
263 info = kzalloc(sizeof(*info), GFP_KERNEL);
265 pr_err("%d: alloc err\n", __LINE__);
270 dma_cap_set(DMA_SLAVE, mask);
271 dma_cap_set(DMA_PRIVATE, mask);
273 dma_request_channel(mask, chan_filter, NULL);
274 if (!info->dma_chan) {
275 pr_err("Unsccessfully received channel!\n");
279 pr_debug("Successfully received channel."
280 "chan_id %d\n", info->dma_chan->chan_id);
282 spin_lock(&pxp_chan_lock);
283 list_add_tail(&info->list, &list);
284 spin_unlock(&pxp_chan_lock);
287 (info->dma_chan->chan_id, (u32 __user *) arg))
292 case PXP_IOC_PUT_CHAN:
295 struct pxp_chan_info *info;
297 if (get_user(chan_id, (u32 __user *) arg))
300 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
303 spin_lock(&pxp_chan_lock);
304 list_for_each_entry(info, &list, list) {
305 if (info->dma_chan->chan_id == chan_id)
308 spin_unlock(&pxp_chan_lock);
310 pr_debug("%d release chan_id %d\n", __LINE__,
311 info->dma_chan->chan_id);
313 dma_release_channel(info->dma_chan);
314 spin_lock(&pxp_chan_lock);
315 list_del_init(&info->list);
316 spin_unlock(&pxp_chan_lock);
321 case PXP_IOC_CONFIG_CHAN:
326 ret = pxp_ioc_config_chan(arg);
332 case PXP_IOC_START_CHAN:
334 struct pxp_chan_info *info;
337 if (get_user(chan_id, (u32 __user *) arg))
340 /* find the channel */
341 spin_lock(&pxp_chan_lock);
342 list_for_each_entry(info, &list, list) {
343 if (info->dma_chan->chan_id == chan_id)
346 spin_unlock(&pxp_chan_lock);
348 dma_async_issue_pending(info->dma_chan);
352 case PXP_IOC_GET_PHYMEM:
354 struct memalloc_record *rec;
356 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
360 ret = copy_from_user(&(rec->mem),
361 (struct pxp_mem_desc *)arg,
362 sizeof(struct pxp_mem_desc));
368 pr_debug("[ALLOC] mem alloc size = 0x%x\n",
371 ret = pxp_alloc_dma_buffer(&(rec->mem));
375 "Physical memory allocation error!\n");
378 ret = copy_to_user((void __user *)arg, &(rec->mem),
379 sizeof(struct pxp_mem_desc));
386 spin_lock(&pxp_mem_lock);
387 list_add(&rec->list, &head);
388 spin_unlock(&pxp_mem_lock);
392 case PXP_IOC_PUT_PHYMEM:
394 struct memalloc_record *rec, *n;
395 struct pxp_mem_desc pxp_mem;
397 ret = copy_from_user(&pxp_mem,
398 (struct pxp_mem_desc *)arg,
399 sizeof(struct pxp_mem_desc));
403 pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
405 if ((void *)pxp_mem.cpu_addr != NULL)
406 pxp_free_dma_buffer(&pxp_mem);
408 spin_lock(&pxp_mem_lock);
409 list_for_each_entry_safe(rec, n, &head, list) {
410 if (rec->mem.cpu_addr == pxp_mem.cpu_addr) {
411 /* delete from list */
412 list_del(&rec->list);
417 spin_unlock(&pxp_mem_lock);
421 case PXP_IOC_WAIT4CMPLT:
423 struct pxp_chan_handle chan_handle;
426 ret = copy_from_user(&chan_handle,
427 (struct pxp_chan_handle *)arg,
428 sizeof(struct pxp_chan_handle));
432 chan_id = chan_handle.chan_id;
433 if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
436 ret = wait_event_interruptible
437 (irq_info[chan_id].waitq,
438 (irq_info[chan_id].irq_pending != 0));
441 "pxp interrupt received.\n");
444 irq_info[chan_id].irq_pending--;
446 chan_handle.hist_status = irq_info[chan_id].hist_status;
447 ret = copy_to_user((struct pxp_chan_handle *)arg,
449 sizeof(struct pxp_chan_handle));
461 static const struct file_operations pxp_device_fops = {
462 .open = pxp_device_open,
463 .release = pxp_device_release,
464 .unlocked_ioctl = pxp_device_ioctl,
465 .mmap = pxp_device_mmap,
468 static struct miscdevice pxp_device_miscdev = {
469 .minor = MISC_DYNAMIC_MINOR,
470 .name = "pxp_device",
471 .fops = &pxp_device_fops,
474 int register_pxp_device(void)
478 ret = misc_register(&pxp_device_miscdev);
482 pr_debug("PxP_Device registered Successfully\n");
486 void unregister_pxp_device(void)
488 misc_deregister(&pxp_device_miscdev);