]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/pxp/pxp_device.c
d05b06df0c9026d7da38810a2468c4a518711123
[karo-tx-linux.git] / drivers / dma / pxp / pxp_device.c
1 /*
2  * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  *
18  */
19 #include <linux/interrupt.h>
20 #include <linux/miscdevice.h>
21 #include <linux/platform_device.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/sched.h>
29 #include <linux/module.h>
30 #include <linux/pxp_dma.h>
31 #include <linux/atomic.h>
32 #include <linux/platform_data/dma-imx.h>
33
34 static atomic_t open_count = ATOMIC_INIT(0);
35
36 static DEFINE_SPINLOCK(pxp_mem_lock);
37 static DEFINE_SPINLOCK(pxp_chan_lock);
38 static LIST_HEAD(head);
39 static LIST_HEAD(list);
40 static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
41
42 struct pxp_chan_handle {
43         int chan_id;
44         int hist_status;
45 };
46
47 /* To track the allocated memory buffer */
48 struct memalloc_record {
49         struct list_head list;
50         struct pxp_mem_desc mem;
51 };
52
53 struct pxp_chan_info {
54         int chan_id;
55         struct dma_chan *dma_chan;
56         struct list_head list;
57 };
58
59 static int pxp_alloc_dma_buffer(struct pxp_mem_desc *mem)
60 {
61         mem->cpu_addr = (unsigned long)
62             dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
63                                (dma_addr_t *) (&mem->phys_addr),
64                                GFP_DMA | GFP_KERNEL);
65         pr_debug("[ALLOC] mem alloc phys_addr = 0x%x\n", mem->phys_addr);
66         if ((void *)(mem->cpu_addr) == NULL) {
67                 printk(KERN_ERR "Physical memory allocation error!\n");
68                 return -1;
69         }
70         return 0;
71 }
72
73 static void pxp_free_dma_buffer(struct pxp_mem_desc *mem)
74 {
75         if (mem->cpu_addr != 0) {
76                 dma_free_coherent(0, PAGE_ALIGN(mem->size),
77                                   (void *)mem->cpu_addr, mem->phys_addr);
78         }
79 }
80
81 static int pxp_free_buffers(void)
82 {
83         struct memalloc_record *rec, *n;
84         struct pxp_mem_desc mem;
85
86         list_for_each_entry_safe(rec, n, &head, list) {
87                 mem = rec->mem;
88                 if (mem.cpu_addr != 0) {
89                         pxp_free_dma_buffer(&mem);
90                         pr_debug("[FREE] freed paddr=0x%08X\n", mem.phys_addr);
91                         /* delete from list */
92                         list_del(&rec->list);
93                         kfree(rec);
94                 }
95         }
96
97         return 0;
98 }
99
100 /* Callback function triggered after PxP receives an EOF interrupt */
101 static void pxp_dma_done(void *arg)
102 {
103         struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
104         struct dma_chan *chan = tx_desc->txd.chan;
105         struct pxp_channel *pxp_chan = to_pxp_channel(chan);
106         int chan_id = pxp_chan->dma_chan.chan_id;
107
108         pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
109
110         irq_info[chan_id].irq_pending++;
111         irq_info[chan_id].hist_status = tx_desc->hist_status;
112
113         wake_up_interruptible(&(irq_info[chan_id].waitq));
114 }
115
116 static int pxp_ioc_config_chan(unsigned long arg)
117 {
118         struct scatterlist sg[3];
119         struct pxp_tx_desc *desc;
120         struct dma_async_tx_descriptor *txd;
121         struct pxp_chan_info *info;
122         struct pxp_config_data pxp_conf;
123         dma_cookie_t cookie;
124         int chan_id;
125         int i, length, ret;
126
127         ret = copy_from_user(&pxp_conf,
128                              (struct pxp_config_data *)arg,
129                              sizeof(struct pxp_config_data));
130         if (ret)
131                 return -EFAULT;
132
133         chan_id = pxp_conf.chan_id;
134         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
135                 return -ENODEV;
136
137         init_waitqueue_head(&(irq_info[chan_id].waitq));
138
139         /* find the channel */
140         spin_lock(&pxp_chan_lock);
141         list_for_each_entry(info, &list, list) {
142                 if (info->dma_chan->chan_id == chan_id)
143                         break;
144         }
145         spin_unlock(&pxp_chan_lock);
146
147         sg_init_table(sg, 3);
148
149         txd =
150             info->dma_chan->device->device_prep_slave_sg(info->dma_chan,
151                                                          sg, 3,
152                                                          DMA_TO_DEVICE,
153                                                          DMA_PREP_INTERRUPT,
154                                                          NULL);
155         if (!txd) {
156                 pr_err("Error preparing a DMA transaction descriptor.\n");
157                 return -EIO;
158         }
159
160         txd->callback_param = txd;
161         txd->callback = pxp_dma_done;
162
163         desc = to_tx_desc(txd);
164
165         length = desc->len;
166         for (i = 0; i < length; i++) {
167                 if (i == 0) {   /* S0 */
168                         memcpy(&desc->proc_data,
169                                &pxp_conf.proc_data,
170                                sizeof(struct pxp_proc_data));
171                         memcpy(&desc->layer_param.s0_param,
172                                &pxp_conf.s0_param,
173                                sizeof(struct pxp_layer_param));
174                 } else if (i == 1) {    /* Output */
175                         memcpy(&desc->layer_param.out_param,
176                                &pxp_conf.out_param,
177                                sizeof(struct pxp_layer_param));
178                 } else {
179                         /* OverLay */
180                         memcpy(&desc->layer_param.ol_param,
181                                &pxp_conf.ol_param,
182                                sizeof(struct pxp_layer_param));
183                 }
184
185                 desc = desc->next;
186         }
187
188         cookie = txd->tx_submit(txd);
189         if (cookie < 0) {
190                 pr_err("Error tx_submit\n");
191                 return -EIO;
192         }
193
194         return 0;
195 }
196
197 static int pxp_device_open(struct inode *inode, struct file *filp)
198 {
199         atomic_inc(&open_count);
200
201         return 0;
202 }
203
204 static int pxp_device_release(struct inode *inode, struct file *filp)
205 {
206         if (atomic_dec_and_test(&open_count))
207                 pxp_free_buffers();
208
209         return 0;
210 }
211
212 static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
213 {
214         struct memalloc_record *rec, *n;
215         int request_size, found;
216
217         request_size = vma->vm_end - vma->vm_start;
218         found = 0;
219
220         pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
221                  (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
222                  request_size);
223
224         spin_lock(&pxp_mem_lock);
225         list_for_each_entry_safe(rec, n, &head, list) {
226                 if (rec->mem.phys_addr == (vma->vm_pgoff << PAGE_SHIFT) &&
227                         (rec->mem.size <= request_size)) {
228                         found = 1;
229                         break;
230                 }
231         }
232         spin_unlock(&pxp_mem_lock);
233
234         if (found == 0)
235                 return -ENOMEM;
236
237         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
238
239         return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
240                                request_size, vma->vm_page_prot) ? -EAGAIN : 0;
241 }
242
243 static bool chan_filter(struct dma_chan *chan, void *arg)
244 {
245         if (imx_dma_is_pxp(chan))
246                 return true;
247         else
248                 return false;
249 }
250
251 static long pxp_device_ioctl(struct file *filp,
252                             unsigned int cmd, unsigned long arg)
253 {
254         int ret = 0;
255
256         switch (cmd) {
257         case PXP_IOC_GET_CHAN:
258                 {
259                         struct pxp_chan_info *info;
260                         dma_cap_mask_t mask;
261
262                         pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
263                         info = kzalloc(sizeof(*info), GFP_KERNEL);
264                         if (!info) {
265                                 pr_err("%d: alloc err\n", __LINE__);
266                                 return -ENOMEM;
267                         }
268
269                         dma_cap_zero(mask);
270                         dma_cap_set(DMA_SLAVE, mask);
271                         dma_cap_set(DMA_PRIVATE, mask);
272                         info->dma_chan =
273                                 dma_request_channel(mask, chan_filter, NULL);
274                         if (!info->dma_chan) {
275                                 pr_err("Unsccessfully received channel!\n");
276                                 kfree(info);
277                                 return -EBUSY;
278                         }
279                         pr_debug("Successfully received channel."
280                                  "chan_id %d\n", info->dma_chan->chan_id);
281
282                         spin_lock(&pxp_chan_lock);
283                         list_add_tail(&info->list, &list);
284                         spin_unlock(&pxp_chan_lock);
285
286                         if (put_user
287                             (info->dma_chan->chan_id, (u32 __user *) arg))
288                                 return -EFAULT;
289
290                         break;
291                 }
292         case PXP_IOC_PUT_CHAN:
293                 {
294                         int chan_id;
295                         struct pxp_chan_info *info;
296
297                         if (get_user(chan_id, (u32 __user *) arg))
298                                 return -EFAULT;
299
300                         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
301                                 return -ENODEV;
302
303                         spin_lock(&pxp_chan_lock);
304                         list_for_each_entry(info, &list, list) {
305                                 if (info->dma_chan->chan_id == chan_id)
306                                         break;
307                         }
308                         spin_unlock(&pxp_chan_lock);
309
310                         pr_debug("%d release chan_id %d\n", __LINE__,
311                                  info->dma_chan->chan_id);
312                         /* REVISIT */
313                         dma_release_channel(info->dma_chan);
314                         spin_lock(&pxp_chan_lock);
315                         list_del_init(&info->list);
316                         spin_unlock(&pxp_chan_lock);
317                         kfree(info);
318
319                         break;
320                 }
321         case PXP_IOC_CONFIG_CHAN:
322                 {
323
324                         int ret;
325
326                         ret = pxp_ioc_config_chan(arg);
327                         if (ret)
328                                 return ret;
329
330                         break;
331                 }
332         case PXP_IOC_START_CHAN:
333                 {
334                         struct pxp_chan_info *info;
335                         int chan_id;
336
337                         if (get_user(chan_id, (u32 __user *) arg))
338                                 return -EFAULT;
339
340                         /* find the channel */
341                         spin_lock(&pxp_chan_lock);
342                         list_for_each_entry(info, &list, list) {
343                                 if (info->dma_chan->chan_id == chan_id)
344                                         break;
345                         }
346                         spin_unlock(&pxp_chan_lock);
347
348                         dma_async_issue_pending(info->dma_chan);
349
350                         break;
351                 }
352         case PXP_IOC_GET_PHYMEM:
353                 {
354                         struct memalloc_record *rec;
355
356                         rec = kzalloc(sizeof(*rec), GFP_KERNEL);
357                         if (!rec)
358                                 return -ENOMEM;
359
360                         ret = copy_from_user(&(rec->mem),
361                                              (struct pxp_mem_desc *)arg,
362                                              sizeof(struct pxp_mem_desc));
363                         if (ret) {
364                                 kfree(rec);
365                                 return -EFAULT;
366                         }
367
368                         pr_debug("[ALLOC] mem alloc size = 0x%x\n",
369                                  rec->mem.size);
370
371                         ret = pxp_alloc_dma_buffer(&(rec->mem));
372                         if (ret == -1) {
373                                 kfree(rec);
374                                 printk(KERN_ERR
375                                        "Physical memory allocation error!\n");
376                                 break;
377                         }
378                         ret = copy_to_user((void __user *)arg, &(rec->mem),
379                                            sizeof(struct pxp_mem_desc));
380                         if (ret) {
381                                 kfree(rec);
382                                 ret = -EFAULT;
383                                 break;
384                         }
385
386                         spin_lock(&pxp_mem_lock);
387                         list_add(&rec->list, &head);
388                         spin_unlock(&pxp_mem_lock);
389
390                         break;
391                 }
392         case PXP_IOC_PUT_PHYMEM:
393                 {
394                         struct memalloc_record *rec, *n;
395                         struct pxp_mem_desc pxp_mem;
396
397                         ret = copy_from_user(&pxp_mem,
398                                              (struct pxp_mem_desc *)arg,
399                                              sizeof(struct pxp_mem_desc));
400                         if (ret)
401                                 return -EACCES;
402
403                         pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
404                                  pxp_mem.cpu_addr);
405                         if ((void *)pxp_mem.cpu_addr != NULL)
406                                 pxp_free_dma_buffer(&pxp_mem);
407
408                         spin_lock(&pxp_mem_lock);
409                         list_for_each_entry_safe(rec, n, &head, list) {
410                                 if (rec->mem.cpu_addr == pxp_mem.cpu_addr) {
411                                         /* delete from list */
412                                         list_del(&rec->list);
413                                         kfree(rec);
414                                         break;
415                                 }
416                         }
417                         spin_unlock(&pxp_mem_lock);
418
419                         break;
420                 }
421         case PXP_IOC_WAIT4CMPLT:
422                 {
423                         struct pxp_chan_handle chan_handle;
424                         int ret, chan_id;
425
426                         ret = copy_from_user(&chan_handle,
427                                              (struct pxp_chan_handle *)arg,
428                                              sizeof(struct pxp_chan_handle));
429                         if (ret)
430                                 return -EFAULT;
431
432                         chan_id = chan_handle.chan_id;
433                         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
434                                 return -ENODEV;
435
436                         ret = wait_event_interruptible
437                             (irq_info[chan_id].waitq,
438                              (irq_info[chan_id].irq_pending != 0));
439                         if (ret < 0) {
440                                 printk(KERN_WARNING
441                                        "pxp interrupt received.\n");
442                                 return -ERESTARTSYS;
443                         } else
444                                 irq_info[chan_id].irq_pending--;
445
446                         chan_handle.hist_status = irq_info[chan_id].hist_status;
447                         ret = copy_to_user((struct pxp_chan_handle *)arg,
448                                            &chan_handle,
449                                            sizeof(struct pxp_chan_handle));
450                         if (ret)
451                                 return -EFAULT;
452                         break;
453                 }
454         default:
455                 break;
456         }
457
458         return 0;
459 }
460
461 static const struct file_operations pxp_device_fops = {
462         .open = pxp_device_open,
463         .release = pxp_device_release,
464         .unlocked_ioctl = pxp_device_ioctl,
465         .mmap = pxp_device_mmap,
466 };
467
468 static struct miscdevice pxp_device_miscdev = {
469         .minor = MISC_DYNAMIC_MINOR,
470         .name = "pxp_device",
471         .fops = &pxp_device_fops,
472 };
473
474 int register_pxp_device(void)
475 {
476         int ret;
477
478         ret = misc_register(&pxp_device_miscdev);
479         if (ret)
480                 return ret;
481
482         pr_debug("PxP_Device registered Successfully\n");
483         return 0;
484 }
485
486 void unregister_pxp_device(void)
487 {
488         misc_deregister(&pxp_device_miscdev);
489 }