2 * Virtual DMA channel support for DMAengine
4 * Copyright (C) 2012 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
17 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
19 return container_of(tx, struct virt_dma_desc, tx);
22 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
24 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 struct virt_dma_desc *vd = to_virt_desc(tx);
29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx);
32 list_move_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags);
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
40 EXPORT_SYMBOL_GPL(vchan_tx_submit);
43 * vchan_tx_desc_free - free a reusable descriptor
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
50 * Returns 0 upon success
52 int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
54 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
55 struct virt_dma_desc *vd = to_virt_desc(tx);
58 spin_lock_irqsave(&vc->lock, flags);
60 spin_unlock_irqrestore(&vc->lock, flags);
62 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
63 vc, vd, vd->tx.cookie);
67 EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
69 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
72 struct virt_dma_desc *vd;
74 list_for_each_entry(vd, &vc->desc_issued, node)
75 if (vd->tx.cookie == cookie)
80 EXPORT_SYMBOL_GPL(vchan_find_desc);
83 * This tasklet handles the completion of a DMA descriptor by
84 * calling its callback and freeing it.
86 static void vchan_complete(unsigned long arg)
88 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
89 struct virt_dma_desc *vd;
90 dma_async_tx_callback cb = NULL;
94 spin_lock_irq(&vc->lock);
95 list_splice_tail_init(&vc->desc_completed, &head);
100 cb_data = vd->tx.callback_param;
102 spin_unlock_irq(&vc->lock);
107 while (!list_empty(&head)) {
108 vd = list_first_entry(&head, struct virt_dma_desc, node);
109 cb = vd->tx.callback;
110 cb_data = vd->tx.callback_param;
113 if (dmaengine_desc_test_reuse(&vd->tx))
114 list_add(&vd->node, &vc->desc_allocated);
123 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
125 while (!list_empty(head)) {
126 struct virt_dma_desc *vd = list_first_entry(head,
127 struct virt_dma_desc, node);
128 if (dmaengine_desc_test_reuse(&vd->tx)) {
129 list_move_tail(&vd->node, &vc->desc_allocated);
131 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
137 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
139 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
141 dma_cookie_init(&vc->chan);
143 spin_lock_init(&vc->lock);
144 INIT_LIST_HEAD(&vc->desc_allocated);
145 INIT_LIST_HEAD(&vc->desc_submitted);
146 INIT_LIST_HEAD(&vc->desc_issued);
147 INIT_LIST_HEAD(&vc->desc_completed);
149 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
151 vc->chan.device = dmadev;
152 list_add_tail(&vc->chan.device_node, &dmadev->channels);
154 EXPORT_SYMBOL_GPL(vchan_init);
156 MODULE_AUTHOR("Russell King");
157 MODULE_LICENSE("GPL");