]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/pxp/pxp_device.c
ENGR00291658 PXP: allow PXP device users to submit multiple tasks before start PXP
[karo-tx-linux.git] / drivers / dma / pxp / pxp_device.c
1 /*
2  * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  *
18  */
19 #include <linux/interrupt.h>
20 #include <linux/miscdevice.h>
21 #include <linux/platform_device.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/delay.h>
26 #include <linux/dmaengine.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/sched.h>
29 #include <linux/module.h>
30 #include <linux/pxp_dma.h>
31 #include <linux/atomic.h>
32 #include <linux/platform_data/dma-imx.h>
33
34 static atomic_t open_count = ATOMIC_INIT(0);
35
36 static DEFINE_SPINLOCK(pxp_mem_lock);
37 static DEFINE_SPINLOCK(pxp_chan_lock);
38 static LIST_HEAD(head);
39 static LIST_HEAD(list);
40 static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
41
42 struct pxp_chan_handle {
43         int chan_id;
44         int hist_status;
45 };
46
47 /* To track the allocated memory buffer */
48 struct memalloc_record {
49         struct list_head list;
50         struct pxp_mem_desc mem;
51 };
52
53 struct pxp_chan_info {
54         struct dma_chan *dma_chan;
55         struct list_head list;
56 };
57
58 static int pxp_alloc_dma_buffer(struct pxp_mem_desc *mem)
59 {
60         mem->cpu_addr = (unsigned long)
61             dma_alloc_coherent(NULL, PAGE_ALIGN(mem->size),
62                                (dma_addr_t *) (&mem->phys_addr),
63                                GFP_DMA | GFP_KERNEL);
64         pr_debug("[ALLOC] mem alloc phys_addr = 0x%x\n", mem->phys_addr);
65         if ((void *)(mem->cpu_addr) == NULL) {
66                 printk(KERN_ERR "Physical memory allocation error!\n");
67                 return -1;
68         }
69         return 0;
70 }
71
72 static void pxp_free_dma_buffer(struct pxp_mem_desc *mem)
73 {
74         if (mem->cpu_addr != 0) {
75                 dma_free_coherent(0, PAGE_ALIGN(mem->size),
76                                   (void *)mem->cpu_addr, mem->phys_addr);
77         }
78 }
79
80 static int pxp_free_buffers(void)
81 {
82         struct memalloc_record *rec, *n;
83         struct pxp_mem_desc mem;
84
85         list_for_each_entry_safe(rec, n, &head, list) {
86                 mem = rec->mem;
87                 if (mem.cpu_addr != 0) {
88                         pxp_free_dma_buffer(&mem);
89                         pr_debug("[FREE] freed paddr=0x%08X\n", mem.phys_addr);
90                         /* delete from list */
91                         list_del(&rec->list);
92                         kfree(rec);
93                 }
94         }
95
96         return 0;
97 }
98
99 /* Callback function triggered after PxP receives an EOF interrupt */
100 static void pxp_dma_done(void *arg)
101 {
102         struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
103         struct dma_chan *chan = tx_desc->txd.chan;
104         struct pxp_channel *pxp_chan = to_pxp_channel(chan);
105         int chan_id = pxp_chan->dma_chan.chan_id;
106         unsigned long flags;
107
108         pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
109
110         spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
111         irq_info[chan_id].irq_pending--;
112         irq_info[chan_id].hist_status = tx_desc->hist_status;
113         spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
114
115         wake_up_interruptible(&(irq_info[chan_id].waitq));
116 }
117
118 static int pxp_ioc_config_chan(unsigned long arg)
119 {
120         struct scatterlist sg[3];
121         struct pxp_tx_desc *desc;
122         struct dma_async_tx_descriptor *txd;
123         struct pxp_chan_info *info;
124         struct pxp_config_data pxp_conf;
125         dma_cookie_t cookie;
126         int chan_id;
127         int i, length, ret;
128         unsigned long flags;
129
130         ret = copy_from_user(&pxp_conf,
131                              (struct pxp_config_data *)arg,
132                              sizeof(struct pxp_config_data));
133         if (ret)
134                 return -EFAULT;
135
136         chan_id = pxp_conf.chan_id;
137         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
138                 return -ENODEV;
139
140         /* find the channel */
141         spin_lock(&pxp_chan_lock);
142         list_for_each_entry(info, &list, list) {
143                 if (info->dma_chan->chan_id == chan_id)
144                         break;
145         }
146         spin_unlock(&pxp_chan_lock);
147
148         sg_init_table(sg, 3);
149
150         txd =
151             info->dma_chan->device->device_prep_slave_sg(info->dma_chan,
152                                                          sg, 3,
153                                                          DMA_TO_DEVICE,
154                                                          DMA_PREP_INTERRUPT,
155                                                          NULL);
156         if (!txd) {
157                 pr_err("Error preparing a DMA transaction descriptor.\n");
158                 return -EIO;
159         }
160
161         txd->callback_param = txd;
162         txd->callback = pxp_dma_done;
163
164         desc = to_tx_desc(txd);
165
166         length = desc->len;
167         for (i = 0; i < length; i++) {
168                 if (i == 0) {   /* S0 */
169                         memcpy(&desc->proc_data,
170                                &pxp_conf.proc_data,
171                                sizeof(struct pxp_proc_data));
172                         memcpy(&desc->layer_param.s0_param,
173                                &pxp_conf.s0_param,
174                                sizeof(struct pxp_layer_param));
175                 } else if (i == 1) {    /* Output */
176                         memcpy(&desc->layer_param.out_param,
177                                &pxp_conf.out_param,
178                                sizeof(struct pxp_layer_param));
179                 } else {
180                         /* OverLay */
181                         memcpy(&desc->layer_param.ol_param,
182                                &pxp_conf.ol_param,
183                                sizeof(struct pxp_layer_param));
184                 }
185
186                 desc = desc->next;
187         }
188
189         cookie = txd->tx_submit(txd);
190         if (cookie < 0) {
191                 pr_err("Error tx_submit\n");
192                 return -EIO;
193         }
194
195         spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
196         irq_info[chan_id].irq_pending++;
197         spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
198
199         return 0;
200 }
201
202 static int pxp_device_open(struct inode *inode, struct file *filp)
203 {
204         atomic_inc(&open_count);
205
206         return 0;
207 }
208
209 static int pxp_device_release(struct inode *inode, struct file *filp)
210 {
211         if (atomic_dec_and_test(&open_count))
212                 pxp_free_buffers();
213
214         return 0;
215 }
216
217 static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
218 {
219         struct memalloc_record *rec, *n;
220         int request_size, found;
221
222         request_size = vma->vm_end - vma->vm_start;
223         found = 0;
224
225         pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
226                  (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
227                  request_size);
228
229         spin_lock(&pxp_mem_lock);
230         list_for_each_entry_safe(rec, n, &head, list) {
231                 if (rec->mem.phys_addr == (vma->vm_pgoff << PAGE_SHIFT) &&
232                         (rec->mem.size <= request_size)) {
233                         found = 1;
234                         break;
235                 }
236         }
237         spin_unlock(&pxp_mem_lock);
238
239         if (found == 0)
240                 return -ENOMEM;
241
242         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
243
244         return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
245                                request_size, vma->vm_page_prot) ? -EAGAIN : 0;
246 }
247
248 static bool chan_filter(struct dma_chan *chan, void *arg)
249 {
250         if (imx_dma_is_pxp(chan))
251                 return true;
252         else
253                 return false;
254 }
255
256 static long pxp_device_ioctl(struct file *filp,
257                             unsigned int cmd, unsigned long arg)
258 {
259         int ret = 0;
260
261         switch (cmd) {
262         case PXP_IOC_GET_CHAN:
263                 {
264                         struct pxp_chan_info *info;
265                         dma_cap_mask_t mask;
266
267                         pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
268                         info = kzalloc(sizeof(*info), GFP_KERNEL);
269                         if (!info) {
270                                 pr_err("%d: alloc err\n", __LINE__);
271                                 return -ENOMEM;
272                         }
273
274                         dma_cap_zero(mask);
275                         dma_cap_set(DMA_SLAVE, mask);
276                         dma_cap_set(DMA_PRIVATE, mask);
277                         info->dma_chan =
278                                 dma_request_channel(mask, chan_filter, NULL);
279                         if (!info->dma_chan) {
280                                 pr_err("Unsccessfully received channel!\n");
281                                 kfree(info);
282                                 return -EBUSY;
283                         }
284                         pr_debug("Successfully received channel."
285                                  "chan_id %d\n", info->dma_chan->chan_id);
286
287                         spin_lock(&pxp_chan_lock);
288                         list_add_tail(&info->list, &list);
289                         spin_unlock(&pxp_chan_lock);
290
291                         init_waitqueue_head(&(irq_info[info->dma_chan->chan_id].waitq));
292                         if (put_user
293                             (info->dma_chan->chan_id, (u32 __user *) arg))
294                                 return -EFAULT;
295
296                         break;
297                 }
298         case PXP_IOC_PUT_CHAN:
299                 {
300                         int chan_id;
301                         struct pxp_chan_info *info;
302
303                         if (get_user(chan_id, (u32 __user *) arg))
304                                 return -EFAULT;
305
306                         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
307                                 return -ENODEV;
308
309                         spin_lock(&pxp_chan_lock);
310                         list_for_each_entry(info, &list, list) {
311                                 if (info->dma_chan->chan_id == chan_id)
312                                         break;
313                         }
314                         spin_unlock(&pxp_chan_lock);
315
316                         pr_debug("%d release chan_id %d\n", __LINE__,
317                                  info->dma_chan->chan_id);
318                         /* REVISIT */
319                         dma_release_channel(info->dma_chan);
320                         spin_lock(&pxp_chan_lock);
321                         list_del_init(&info->list);
322                         spin_unlock(&pxp_chan_lock);
323                         kfree(info);
324
325                         break;
326                 }
327         case PXP_IOC_CONFIG_CHAN:
328                 {
329                         int ret;
330
331                         ret = pxp_ioc_config_chan(arg);
332                         if (ret)
333                                 return ret;
334
335                         break;
336                 }
337         case PXP_IOC_START_CHAN:
338                 {
339                         struct pxp_chan_info *info;
340                         int chan_id;
341
342                         if (get_user(chan_id, (u32 __user *) arg))
343                                 return -EFAULT;
344
345                         /* find the channel */
346                         spin_lock(&pxp_chan_lock);
347                         list_for_each_entry(info, &list, list) {
348                                 if (info->dma_chan->chan_id == chan_id)
349                                         break;
350                         }
351                         spin_unlock(&pxp_chan_lock);
352
353                         dma_async_issue_pending(info->dma_chan);
354
355                         break;
356                 }
357         case PXP_IOC_GET_PHYMEM:
358                 {
359                         struct memalloc_record *rec;
360
361                         rec = kzalloc(sizeof(*rec), GFP_KERNEL);
362                         if (!rec)
363                                 return -ENOMEM;
364
365                         ret = copy_from_user(&(rec->mem),
366                                              (struct pxp_mem_desc *)arg,
367                                              sizeof(struct pxp_mem_desc));
368                         if (ret) {
369                                 kfree(rec);
370                                 return -EFAULT;
371                         }
372
373                         pr_debug("[ALLOC] mem alloc size = 0x%x\n",
374                                  rec->mem.size);
375
376                         ret = pxp_alloc_dma_buffer(&(rec->mem));
377                         if (ret == -1) {
378                                 kfree(rec);
379                                 printk(KERN_ERR
380                                        "Physical memory allocation error!\n");
381                                 return ret;
382                         }
383                         ret = copy_to_user((void __user *)arg, &(rec->mem),
384                                            sizeof(struct pxp_mem_desc));
385                         if (ret) {
386                                 kfree(rec);
387                                 ret = -EFAULT;
388                                 return ret;
389                         }
390
391                         spin_lock(&pxp_mem_lock);
392                         list_add(&rec->list, &head);
393                         spin_unlock(&pxp_mem_lock);
394
395                         break;
396                 }
397         case PXP_IOC_PUT_PHYMEM:
398                 {
399                         struct memalloc_record *rec, *n;
400                         struct pxp_mem_desc pxp_mem;
401
402                         ret = copy_from_user(&pxp_mem,
403                                              (struct pxp_mem_desc *)arg,
404                                              sizeof(struct pxp_mem_desc));
405                         if (ret)
406                                 return -EACCES;
407
408                         pr_debug("[FREE] mem freed cpu_addr = 0x%x\n",
409                                  pxp_mem.cpu_addr);
410                         if ((void *)pxp_mem.cpu_addr != NULL)
411                                 pxp_free_dma_buffer(&pxp_mem);
412
413                         spin_lock(&pxp_mem_lock);
414                         list_for_each_entry_safe(rec, n, &head, list) {
415                                 if (rec->mem.cpu_addr == pxp_mem.cpu_addr) {
416                                         /* delete from list */
417                                         list_del(&rec->list);
418                                         kfree(rec);
419                                         break;
420                                 }
421                         }
422                         spin_unlock(&pxp_mem_lock);
423
424                         break;
425                 }
426         case PXP_IOC_WAIT4CMPLT:
427                 {
428                         struct pxp_chan_handle chan_handle;
429                         int ret, chan_id;
430
431                         ret = copy_from_user(&chan_handle,
432                                              (struct pxp_chan_handle *)arg,
433                                              sizeof(struct pxp_chan_handle));
434                         if (ret)
435                                 return -EFAULT;
436
437                         chan_id = chan_handle.chan_id;
438                         if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
439                                 return -ENODEV;
440
441                         ret = wait_event_interruptible
442                             (irq_info[chan_id].waitq,
443                              (irq_info[chan_id].irq_pending == 0));
444                         if (ret < 0) {
445                                 printk(KERN_WARNING
446                                        "WAIT4CMPLT: signal received.\n");
447                                 return -ERESTARTSYS;
448                         }
449
450                         chan_handle.hist_status = irq_info[chan_id].hist_status;
451                         ret = copy_to_user((struct pxp_chan_handle *)arg,
452                                            &chan_handle,
453                                            sizeof(struct pxp_chan_handle));
454                         if (ret)
455                                 return -EFAULT;
456                         break;
457                 }
458         default:
459                 break;
460         }
461
462         return 0;
463 }
464
465 static const struct file_operations pxp_device_fops = {
466         .open = pxp_device_open,
467         .release = pxp_device_release,
468         .unlocked_ioctl = pxp_device_ioctl,
469         .mmap = pxp_device_mmap,
470 };
471
472 static struct miscdevice pxp_device_miscdev = {
473         .minor = MISC_DYNAMIC_MINOR,
474         .name = "pxp_device",
475         .fops = &pxp_device_fops,
476 };
477
478 int register_pxp_device(void)
479 {
480         int i, ret;
481
482         ret = misc_register(&pxp_device_miscdev);
483         if (ret)
484                 return ret;
485
486         for (i = 0; i < NR_PXP_VIRT_CHANNEL; i++)
487                 spin_lock_init(&(irq_info[i].lock));
488
489         pr_debug("PxP_Device registered Successfully\n");
490         return 0;
491 }
492
493 void unregister_pxp_device(void)
494 {
495         misc_deregister(&pxp_device_miscdev);
496 }