2 * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief VPU system initialization and file operation implementation
22 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/stat.h>
27 #include <linux/platform_device.h>
28 #include <linux/kdev_t.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/wait.h>
31 #include <linux/list.h>
32 #include <linux/clk.h>
33 #include <linux/delay.h>
34 #include <linux/fsl_devices.h>
35 #include <linux/uaccess.h>
37 #include <linux/slab.h>
38 #include <linux/workqueue.h>
39 #include <linux/sched.h>
40 #include <linux/vmalloc.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/page-flags.h>
43 #include <linux/mm_types.h>
44 #include <linux/types.h>
45 #include <linux/memblock.h>
46 #include <linux/memory.h>
47 #include <linux/version.h>
48 #include <linux/module.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/sizes.h>
51 #include <linux/genalloc.h>
53 #include <linux/of_device.h>
54 #include <linux/reset.h>
55 #include <linux/clk.h>
56 #include <linux/mxc_vpu.h>
58 /* Define one new pgprot which combined uncached and XN(never executable) */
59 #define pgprot_noncachedxn(prot) \
60 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
63 struct fasync_struct *async_queue;
64 struct work_struct work;
65 struct workqueue_struct *workqueue;
67 const struct mxc_vpu_soc_data *soc_data;
69 struct list_head users;
72 struct vpu_user_data {
73 struct vpu_priv *vpu_data;
74 struct list_head list;
78 /* To track the allocated memory buffer */
79 struct memalloc_record {
80 struct list_head list;
81 struct vpu_mem_desc mem;
89 struct mxc_vpu_soc_data {
90 unsigned vpu_pwr_mgmnt:1,
100 static struct gen_pool *iram_pool;
101 static u32 iram_base;
103 static LIST_HEAD(mem_list);
105 static int vpu_major;
106 static struct class *vpu_class;
107 static struct vpu_priv vpu_data;
108 static u8 open_count;
109 static struct clk *vpu_clk;
110 static struct vpu_mem_desc bitwork_mem;
111 static struct vpu_mem_desc pic_para_mem;
112 static struct vpu_mem_desc user_data_mem;
113 static struct vpu_mem_desc share_mem;
114 static struct vpu_mem_desc vshare_mem;
116 static void __iomem *vpu_base;
117 static int vpu_ipi_irq;
118 static u32 phy_vpu_base_addr;
120 static struct device *vpu_dev;
123 static struct iram_setting iram;
125 /* implement the blocking ioctl */
126 static int irq_status;
127 static int codec_done;
128 static wait_queue_head_t vpu_queue;
130 static int vpu_jpu_irq;
133 static unsigned int regBk[64];
134 static unsigned int pc_before_suspend;
136 static struct regulator *vpu_regulator;
138 #define READ_REG(x) readl_relaxed(vpu_base + (x))
139 #define WRITE_REG(val, x) writel_relaxed(val, vpu_base + (x))
141 static int vpu_clk_enable(struct vpu_priv *vpu_data)
145 if (vpu_data->clk_enabled++ == 0)
146 ret = clk_prepare_enable(vpu_clk);
148 if (WARN_ON(vpu_data->clk_enabled <= 0))
154 static int vpu_clk_disable(struct vpu_priv *vpu_data)
156 if (WARN_ON(vpu_data->clk_enabled == 0))
159 if (--vpu_data->clk_enabled == 0)
160 clk_disable_unprepare(vpu_clk);
164 static inline int vpu_reset(void)
166 return device_reset(vpu_dev);
169 static void vpu_power_up(void)
173 if (IS_ERR(vpu_regulator))
176 ret = regulator_enable(vpu_regulator);
178 dev_err(vpu_dev, "failed to power up vpu: %d\n", ret);
181 static void vpu_power_down(void)
185 if (IS_ERR(vpu_regulator))
188 ret = regulator_disable(vpu_regulator);
190 dev_err(vpu_dev, "failed to power down vpu: %d\n", ret);
194 * Private function to alloc dma buffer
195 * @return status 0 success.
197 static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
199 mem->cpu_addr = dma_alloc_coherent(vpu_dev, PAGE_ALIGN(mem->size),
201 GFP_DMA | GFP_KERNEL);
202 dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = %p\n", mem->cpu_addr);
203 if (mem->cpu_addr == NULL) {
204 dev_err(vpu_dev, "Physical memory allocation error!\n");
211 * Private function to free dma buffer
213 static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
215 if (mem->cpu_addr != NULL)
216 dma_free_coherent(vpu_dev, PAGE_ALIGN(mem->size),
217 mem->cpu_addr, mem->phy_addr);
221 * Private function to free buffers
222 * @return status 0 success.
224 static int vpu_free_buffers(void)
226 struct memalloc_record *rec, *n;
227 struct vpu_mem_desc mem;
229 list_for_each_entry_safe(rec, n, &mem_list, list) {
231 if (mem.cpu_addr != 0) {
232 vpu_free_dma_buffer(&mem);
233 dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
234 /* delete from list */
235 list_del(&rec->list);
243 static inline void vpu_worker_callback(struct work_struct *w)
245 struct vpu_priv *dev = container_of(w, struct vpu_priv, work);
247 if (dev->async_queue)
248 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
252 * Clock is gated on when dec/enc started, gate it off when
258 wake_up_interruptible(&vpu_queue);
262 * @brief vpu interrupt handler
264 static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
266 struct vpu_priv *dev = dev_id;
269 reg = READ_REG(BIT_INT_REASON);
272 WRITE_REG(0x1, BIT_INT_CLEAR);
274 queue_work(dev->workqueue, &dev->work);
280 * @brief vpu jpu interrupt handler
282 static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
284 struct vpu_priv *dev = dev_id;
287 reg = READ_REG(MJPEG_PIC_STATUS_REG);
291 queue_work(dev->workqueue, &dev->work);
297 * @brief open function for vpu file operation
299 * @return 0 on success or negative error code on error
301 static int vpu_open(struct inode *inode, struct file *filp)
303 struct vpu_user_data *user_data = devm_kzalloc(vpu_dev,
306 if (user_data == NULL)
309 user_data->vpu_data = &vpu_data;
311 INIT_LIST_HEAD(&user_data->list);
312 list_add(&user_data->list, &vpu_data.users);
314 mutex_lock(&vpu_data.lock);
316 if (open_count++ == 0) {
317 pm_runtime_get_sync(vpu_dev);
321 filp->private_data = user_data;
322 mutex_unlock(&vpu_data.lock);
327 * @brief IO ctrl function for vpu file operation
328 * @param cmd IO ctrl command
329 * @return 0 on success or negative error code on error
331 static long vpu_ioctl(struct file *filp, u_int cmd,
335 struct vpu_user_data *user_data = filp->private_data;
336 struct vpu_priv *vpu_data = user_data->vpu_data;
339 case VPU_IOC_PHYMEM_ALLOC:
341 struct memalloc_record *rec;
343 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
347 if (copy_from_user(&rec->mem,
348 (struct vpu_mem_desc *)arg,
349 sizeof(struct vpu_mem_desc))) {
354 dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
357 ret = vpu_alloc_dma_buffer(&rec->mem);
362 if (copy_to_user((void __user *)arg, &rec->mem,
363 sizeof(struct vpu_mem_desc))) {
368 mutex_lock(&vpu_data->lock);
369 list_add(&rec->list, &mem_list);
370 mutex_unlock(&vpu_data->lock);
374 case VPU_IOC_PHYMEM_FREE:
376 struct memalloc_record *rec, *n;
377 struct vpu_mem_desc vpu_mem;
379 if (copy_from_user(&vpu_mem,
380 (struct vpu_mem_desc *)arg,
381 sizeof(struct vpu_mem_desc)))
384 dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = %p\n",
386 if (vpu_mem.cpu_addr != NULL)
387 vpu_free_dma_buffer(&vpu_mem);
389 mutex_lock(&vpu_data->lock);
390 list_for_each_entry_safe(rec, n, &mem_list, list) {
391 if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
392 list_del(&rec->list);
397 mutex_unlock(&vpu_data->lock);
401 case VPU_IOC_WAIT4INT:
403 u_long timeout = arg;
405 ret = wait_event_interruptible_timeout(vpu_queue,
407 msecs_to_jiffies(timeout));
409 dev_warn(vpu_dev, "VPU blocking: timeout.\n");
411 } else if (signal_pending(current)) {
412 dev_warn(vpu_dev, "VPU interrupt received.\n");
419 case VPU_IOC_IRAM_SETTING:
420 ret = copy_to_user((void __user *)arg, &iram,
421 sizeof(struct iram_setting));
426 case VPU_IOC_CLKGATE_SETTING:
430 if (get_user(clkgate_en, (u32 __user *)arg))
433 mutex_lock(&vpu_data->lock);
435 ret = vpu_clk_enable(vpu_data);
437 user_data->clk_enable_cnt++;
439 if (user_data->clk_enable_cnt == 0) {
442 if (--user_data->clk_enable_cnt == 0)
443 vpu_clk_disable(vpu_data);
447 mutex_unlock(&vpu_data->lock);
450 case VPU_IOC_GET_SHARE_MEM:
451 mutex_lock(&vpu_data->lock);
452 if (share_mem.cpu_addr == NULL) {
453 if (copy_from_user(&share_mem,
454 (struct vpu_mem_desc *)arg,
455 sizeof(struct vpu_mem_desc))) {
456 mutex_unlock(&vpu_data->lock);
459 ret = vpu_alloc_dma_buffer(&share_mem);
461 mutex_unlock(&vpu_data->lock);
465 if (copy_to_user((void __user *)arg,
467 sizeof(struct vpu_mem_desc)))
471 mutex_unlock(&vpu_data->lock);
473 case VPU_IOC_REQ_VSHARE_MEM:
474 mutex_lock(&vpu_data->lock);
475 if (vshare_mem.cpu_addr == NULL) {
476 if (copy_from_user(&vshare_mem,
477 (struct vpu_mem_desc *)arg,
480 mutex_unlock(&vpu_data->lock);
483 vshare_mem.cpu_addr = vmalloc_user(vshare_mem.size);
484 if (vshare_mem.cpu_addr == NULL) {
485 mutex_unlock(&vpu_data->lock);
489 if (copy_to_user((void __user *)arg, &vshare_mem,
490 sizeof(struct vpu_mem_desc)))
494 mutex_unlock(&vpu_data->lock);
496 case VPU_IOC_GET_WORK_ADDR:
497 if (bitwork_mem.cpu_addr == 0) {
498 if (copy_from_user(&bitwork_mem,
499 (struct vpu_mem_desc *)arg,
500 sizeof(struct vpu_mem_desc)))
503 ret = vpu_alloc_dma_buffer(&bitwork_mem);
507 if (copy_to_user((void __user *)arg,
516 * The following two ioctls are used when user allocates a working buffer
517 * and registers it to vpu driver.
519 case VPU_IOC_QUERY_BITWORK_MEM:
520 if (copy_to_user((void __user *)arg,
522 sizeof(struct vpu_mem_desc)))
527 case VPU_IOC_SET_BITWORK_MEM:
528 if (copy_from_user(&bitwork_mem,
529 (struct vpu_mem_desc *)arg,
530 sizeof(struct vpu_mem_desc)))
535 case VPU_IOC_SYS_SW_RESET:
538 case VPU_IOC_REG_DUMP:
539 case VPU_IOC_PHYMEM_DUMP:
542 case VPU_IOC_PHYMEM_CHECK:
544 struct vpu_mem_desc check_memory;
546 ret = copy_from_user(&check_memory,
548 sizeof(struct vpu_mem_desc));
550 dev_err(vpu_dev, "copy from user failure:%d\n", ret);
554 check_memory.size = 1;
555 if (copy_to_user((void __user *)arg, &check_memory,
556 sizeof(struct vpu_mem_desc)))
562 case VPU_IOC_LOCK_DEV:
566 if (get_user(lock_en, (u32 __user *)arg))
570 mutex_lock(&vpu_data->lock);
572 mutex_unlock(&vpu_data->lock);
577 dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
583 * @brief Release function for vpu file operation
584 * @return 0 on success or negative error code on error
586 static int vpu_release(struct inode *inode, struct file *filp)
588 unsigned long timeout;
589 struct vpu_user_data *user_data = filp->private_data;
590 struct vpu_priv *vpu_data = user_data->vpu_data;
592 mutex_lock(&vpu_data->lock);
594 if (open_count > 0 && !--open_count) {
595 /* Wait for vpu go to idle state */
596 vpu_clk_enable(vpu_data);
597 if (READ_REG(BIT_CUR_PC)) {
599 timeout = jiffies + HZ;
600 while (READ_REG(BIT_BUSY_FLAG)) {
602 if (time_after(jiffies, timeout)) {
603 dev_warn(vpu_dev, "VPU timeout during release\n");
608 /* Clean up interrupt */
609 cancel_work_sync(&vpu_data->work);
610 flush_workqueue(vpu_data->workqueue);
613 if (READ_REG(BIT_BUSY_FLAG)) {
614 if (vpu_data->soc_data->is_mx51 ||
615 vpu_data->soc_data->is_mx53) {
617 "fatal error: can't gate/power off when VPU is busy\n");
618 vpu_clk_disable(vpu_data);
619 mutex_unlock(&vpu_data->lock);
622 if (vpu_data->soc_data->is_mx6dl ||
623 vpu_data->soc_data->is_mx6q) {
624 WRITE_REG(0x11, 0x10F0);
625 timeout = jiffies + HZ;
626 while (READ_REG(0x10F4) != 0x77) {
628 if (time_after(jiffies, timeout))
632 if (READ_REG(0x10F4) != 0x77) {
634 "fatal error: can't gate/power off when VPU is busy\n");
635 WRITE_REG(0x0, 0x10F0);
636 vpu_clk_disable(vpu_data);
637 mutex_unlock(&vpu_data->lock);
647 /* Free shared memory when vpu device is idle */
648 vpu_free_dma_buffer(&share_mem);
649 share_mem.cpu_addr = 0;
650 vfree(vshare_mem.cpu_addr);
651 vshare_mem.cpu_addr = 0;
653 if (user_data->clk_enable_cnt)
654 vpu_clk_disable(vpu_data);
656 vpu_clk_disable(vpu_data);
658 pm_runtime_put_sync_suspend(vpu_dev);
659 devm_kfree(vpu_dev, user_data);
661 mutex_unlock(&vpu_data->lock);
667 * @brief fasync function for vpu file operation
668 * @return 0 on success or negative error code on error
670 static int vpu_fasync(int fd, struct file *filp, int mode)
672 struct vpu_user_data *user_data = filp->private_data;
673 struct vpu_priv *vpu_data = user_data->vpu_data;
674 return fasync_helper(fd, filp, mode, &vpu_data->async_queue);
678 * @brief memory map function of harware registers for vpu file operation
679 * @return 0 on success or negative error code on error
681 static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
685 vm->vm_flags |= VM_IO;
687 * Since vpu registers have been mapped with ioremap() at probe
688 * which L_PTE_XN is 1, and the same physical address must be
689 * mapped multiple times with same type, so set L_PTE_XN to 1 here.
690 * Otherwise, there may be unexpected result in video codec.
692 vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
693 pfn = phy_vpu_base_addr >> PAGE_SHIFT;
694 dev_dbg(vpu_dev, "size=0x%lx, page no.=0x%lx\n",
695 vm->vm_end - vm->vm_start, pfn);
696 return remap_pfn_range(vm, vm->vm_start, pfn,
697 vm->vm_end - vm->vm_start,
698 vm->vm_page_prot) ? -EAGAIN : 0;
702 * @brief memory map function of memory for vpu file operation
703 * @return 0 on success or negative error code on error
705 static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
707 size_t request_size = vm->vm_end - vm->vm_start;
709 dev_dbg(vpu_dev, "start=0x%08lx, pgoff=0x%08lx, size=%zx\n",
710 vm->vm_start, vm->vm_pgoff, request_size);
712 vm->vm_flags |= VM_IO;
713 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
715 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
716 request_size, vm->vm_page_prot) ? -EAGAIN : 0;
720 * @brief memory map function of vmalloced share memory
721 * @return 0 on success or negative error code on error
723 static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
727 ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
728 vm->vm_flags |= VM_IO;
732 * @brief memory map interface for vpu file operation
733 * @return 0 on success or negative error code on error
735 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
737 unsigned long offset;
739 offset = (unsigned long)vshare_mem.cpu_addr >> PAGE_SHIFT;
741 if (vm->vm_pgoff && (vm->vm_pgoff == offset))
742 return vpu_map_vshare_mem(fp, vm);
743 else if (vm->vm_pgoff)
744 return vpu_map_dma_mem(fp, vm);
746 return vpu_map_hwregs(fp, vm);
749 static const struct file_operations vpu_fops = {
750 .owner = THIS_MODULE,
752 .unlocked_ioctl = vpu_ioctl,
753 .release = vpu_release,
754 .fasync = vpu_fasync,
758 static const struct mxc_vpu_soc_data imx6dl_vpu_data = {
759 .regulator_required = 1,
764 static const struct mxc_vpu_soc_data imx6q_vpu_data = {
765 .quirk_subblk_en = 1,
766 .regulator_required = 1,
771 static const struct mxc_vpu_soc_data imx53_vpu_data = {
774 static const struct mxc_vpu_soc_data imx51_vpu_data = {
778 static const struct of_device_id vpu_of_match[] = {
779 { .compatible = "fsl,imx6dl-vpu", .data = &imx6dl_vpu_data, },
780 { .compatible = "fsl,imx6q-vpu", .data = &imx6q_vpu_data, },
781 { .compatible = "fsl,imx53-vpu", .data = &imx53_vpu_data, },
782 { .compatible = "fsl,imx51-vpu", .data = &imx51_vpu_data, },
785 MODULE_DEVICE_TABLE(of, vpu_of_match);
788 * This function is called by the driver framework to initialize the vpu device.
789 * @param dev The device structure for the vpu passed in by the framework.
790 * @return 0 on success or negative error code on error
792 static int vpu_dev_probe(struct platform_device *pdev)
795 struct device *temp_class;
796 struct resource *res;
797 unsigned long addr = 0;
798 struct device_node *np = pdev->dev.of_node;
800 struct vpu_priv *drv_data;
801 const struct of_device_id *of_id = of_match_device(vpu_of_match,
803 const struct mxc_vpu_soc_data *soc_data = of_id->data;
805 drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
806 if (drv_data == NULL)
809 drv_data->soc_data = soc_data;
810 INIT_LIST_HEAD(&drv_data->users);
812 init_waitqueue_head(&vpu_queue);
814 err = of_property_read_u32(np, "iramsize", &iramsize);
815 if (!err && iramsize) {
816 iram_pool = of_get_named_gen_pool(np, "iram", 0);
818 dev_err(&pdev->dev, "iram pool not available\n");
822 iram_base = gen_pool_alloc(iram_pool, iramsize);
824 dev_err(&pdev->dev, "unable to alloc iram\n");
828 addr = gen_pool_virt_to_phys(iram_pool, iram_base);
832 iram.start = iram.end = 0;
835 iram.end = addr + iramsize - 1;
838 vpu_dev = &pdev->dev;
840 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
842 dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
845 phy_vpu_base_addr = res->start;
846 vpu_base = devm_ioremap_resource(&pdev->dev, res);
847 if (IS_ERR(vpu_base))
848 return PTR_ERR(vpu_base);
850 vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
852 dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
856 vpu_class = class_create(THIS_MODULE, "mxc_vpu");
857 if (IS_ERR(vpu_class)) {
858 err = PTR_ERR(vpu_class);
862 temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
864 if (IS_ERR(temp_class)) {
865 err = PTR_ERR(temp_class);
869 vpu_clk = clk_get(&pdev->dev, "vpu_clk");
870 if (IS_ERR(vpu_clk)) {
871 err = PTR_ERR(vpu_clk);
875 vpu_ipi_irq = platform_get_irq_byname(pdev, "vpu_ipi_irq");
876 if (vpu_ipi_irq < 0) {
877 dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
881 err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
886 vpu_regulator = devm_regulator_get(vpu_dev, "pu");
887 if (IS_ERR(vpu_regulator)) {
888 if (drv_data->soc_data->regulator_required) {
889 dev_err(vpu_dev, "failed to get vpu power\n");
892 /* regulator_get will return error on MX5x,
893 * just igore it everywhere
895 dev_warn(vpu_dev, "failed to get vpu power\n");
899 platform_set_drvdata(pdev, drv_data);
901 if (drv_data->soc_data->has_jpu) {
902 vpu_jpu_irq = platform_get_irq_byname(pdev, "vpu_jpu_irq");
903 if (vpu_jpu_irq < 0) {
904 dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
908 err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
909 "VPU_JPG_IRQ", &vpu_data);
914 pm_runtime_enable(&pdev->dev);
916 vpu_data.workqueue = create_workqueue("vpu_wq");
917 INIT_WORK(&vpu_data.work, vpu_worker_callback);
918 mutex_init(&vpu_data.lock);
919 dev_info(vpu_dev, "VPU initialized\n");
923 device_destroy(vpu_class, MKDEV(vpu_major, 0));
924 class_destroy(vpu_class);
926 unregister_chrdev(vpu_major, "mxc_vpu");
930 static int vpu_dev_remove(struct platform_device *pdev)
932 struct vpu_priv *vpu_data = platform_get_drvdata(pdev);
934 pm_runtime_disable(&pdev->dev);
936 free_irq(vpu_ipi_irq, &vpu_data);
937 #ifdef MXC_VPU_HAS_JPU
938 free_irq(vpu_jpu_irq, &vpu_data);
940 cancel_work_sync(&vpu_data->work);
941 flush_workqueue(vpu_data->workqueue);
942 destroy_workqueue(vpu_data->workqueue);
946 gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
949 device_destroy(vpu_class, MKDEV(vpu_major, 0));
950 class_destroy(vpu_class);
951 unregister_chrdev(vpu_major, "mxc_vpu");
955 vpu_free_dma_buffer(&bitwork_mem);
956 vpu_free_dma_buffer(&pic_para_mem);
957 vpu_free_dma_buffer(&user_data_mem);
959 /* reset VPU state */
961 vpu_clk_enable(vpu_data);
963 vpu_clk_disable(vpu_data);
971 static int vpu_suspend(struct device *dev)
973 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
974 unsigned long timeout;
976 mutex_lock(&vpu_data->lock);
979 /* Wait for vpu go to idle state, suspect vpu cannot be changed
980 to idle state after about 1 sec */
981 timeout = jiffies + HZ;
982 while (READ_REG(BIT_BUSY_FLAG)) {
984 if (time_after(jiffies, timeout)) {
985 mutex_unlock(&vpu_data->lock);
990 if (vpu_data->soc_data->is_mx53) {
991 mutex_unlock(&vpu_data->lock);
995 if (bitwork_mem.cpu_addr != 0) {
998 /* Save 64 registers from BIT_CODE_BUF_ADDR */
999 for (i = 0; i < 64; i++)
1000 regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
1001 pc_before_suspend = READ_REG(BIT_CUR_PC);
1004 vpu_clk_disable(vpu_data);
1005 /* If VPU is working before suspend, disable
1006 * regulator to make usecount right.
1011 mutex_unlock(&vpu_data->lock);
1015 static int vpu_resume(struct device *dev)
1018 struct vpu_priv *vpu_data = dev_get_drvdata(dev);
1020 mutex_lock(&vpu_data->lock);
1023 if (vpu_data->soc_data->is_mx53) {
1024 vpu_clk_enable(vpu_data);
1028 /* If VPU is working before suspend, enable
1029 * regulator to make usecount right.
1033 if (bitwork_mem.cpu_addr != NULL) {
1034 u32 *p = bitwork_mem.cpu_addr;
1039 vpu_clk_enable(vpu_data);
1041 pc = READ_REG(BIT_CUR_PC);
1043 dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
1047 /* Restore registers */
1048 for (i = 0; i < 64; i++)
1049 WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
1051 WRITE_REG(0x0, BIT_RESET_CTRL);
1052 WRITE_REG(0x0, BIT_CODE_RUN);
1053 /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
1054 if (vpu_data->soc_data->quirk_subblk_en)
1055 WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
1058 * Re-load boot code, from the codebuffer in external RAM.
1059 * Thankfully, we only need 4096 bytes, same for all platforms.
1061 for (i = 0; i < 2048; i += 4) {
1062 data = p[(i / 2) + 1];
1063 data_hi = (data >> 16) & 0xFFFF;
1064 data_lo = data & 0xFFFF;
1065 WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
1066 WRITE_REG(((i + 1) << 16) | data_lo,
1070 data_hi = (data >> 16) & 0xFFFF;
1071 data_lo = data & 0xFFFF;
1072 WRITE_REG(((i + 2) << 16) | data_hi,
1074 WRITE_REG(((i + 3) << 16) | data_lo,
1078 if (pc_before_suspend) {
1079 WRITE_REG(0x1, BIT_BUSY_FLAG);
1080 WRITE_REG(0x1, BIT_CODE_RUN);
1081 while (READ_REG(BIT_BUSY_FLAG))
1084 dev_warn(vpu_dev, "PC=0 before suspend\n");
1089 mutex_unlock(&vpu_data->lock);
1093 static SIMPLE_DEV_PM_OPS(vpu_pm_ops, vpu_suspend, vpu_resume);
1094 #define VPU_PM_OPS &vpu_pm_ops
1096 #define VPU_PM_OPS NULL
1097 #endif /* !CONFIG_PM */
1099 /*! Driver definition
1102 static struct platform_driver mxcvpu_driver = {
1105 .of_match_table = vpu_of_match,
1108 .probe = vpu_dev_probe,
1109 .remove = vpu_dev_remove,
1112 module_platform_driver(mxcvpu_driver);
1114 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1115 MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
1116 MODULE_LICENSE("GPL");