2 * Copyright (c) 2013 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 #include <linux/sched.h>
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_device.h>
21 #include <linux/clk.h>
22 #include <linux/of_dma.h>
26 #define DRIVER_NAME "k3-dma"
27 #define DMA_MAX_SIZE 0x1ffc
33 #define INT_TC1_MASK 0x18
34 #define INT_ERR1_MASK 0x20
35 #define INT_ERR2_MASK 0x24
36 #define INT_TC1_RAW 0x600
37 #define INT_ERR1_RAW 0x608
38 #define INT_ERR2_RAW 0x610
41 #define CX_CUR_CNT 0x704
48 #define AXI_CFG_DEFAULT 0x201201
50 #define CX_LLI_CHAIN_EN 0x2
52 #define CX_CFG_MEM2PER (0x1 << 2)
53 #define CX_CFG_PER2MEM (0x2 << 2)
54 #define CX_CFG_SRCINCR (0x1 << 31)
55 #define CX_CFG_DSTINCR (0x1 << 30)
66 struct k3_dma_desc_sw {
67 struct virt_dma_desc vd;
68 dma_addr_t desc_hw_lli;
71 struct k3_desc_hw desc_hw[0];
78 struct virt_dma_chan vc;
79 struct k3_dma_phy *phy;
80 struct list_head node;
81 enum dma_transfer_direction dir;
83 enum dma_status status;
89 struct k3_dma_chan *vchan;
90 struct k3_dma_desc_sw *ds_run;
91 struct k3_dma_desc_sw *ds_done;
95 struct dma_device slave;
97 struct tasklet_struct task;
99 struct list_head chan_pending;
100 struct k3_dma_phy *phy;
101 struct k3_dma_chan *chans;
107 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
109 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
111 return container_of(chan, struct k3_dma_chan, vc.chan);
114 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
119 val = readl_relaxed(phy->base + CX_CFG);
121 writel_relaxed(val, phy->base + CX_CFG);
123 val = readl_relaxed(phy->base + CX_CFG);
125 writel_relaxed(val, phy->base + CX_CFG);
129 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
133 k3_dma_pause_dma(phy, false);
135 val = 0x1 << phy->idx;
136 writel_relaxed(val, d->base + INT_TC1_RAW);
137 writel_relaxed(val, d->base + INT_ERR1_RAW);
138 writel_relaxed(val, d->base + INT_ERR2_RAW);
141 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
143 writel_relaxed(hw->lli, phy->base + CX_LLI);
144 writel_relaxed(hw->count, phy->base + CX_CNT);
145 writel_relaxed(hw->saddr, phy->base + CX_SRC);
146 writel_relaxed(hw->daddr, phy->base + CX_DST);
147 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
148 writel_relaxed(hw->config, phy->base + CX_CFG);
151 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
155 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
160 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
162 return readl_relaxed(phy->base + CX_LLI);
165 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
167 return readl_relaxed(d->base + CH_STAT);
170 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
173 /* set same priority */
174 writel_relaxed(0x0, d->base + CH_PRI);
177 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
178 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
179 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
182 writel_relaxed(0x0, d->base + INT_TC1_MASK);
183 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
184 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
188 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
190 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
191 struct k3_dma_phy *p;
192 struct k3_dma_chan *c;
193 u32 stat = readl_relaxed(d->base + INT_STAT);
194 u32 tc1 = readl_relaxed(d->base + INT_TC1);
195 u32 err1 = readl_relaxed(d->base + INT_ERR1);
196 u32 err2 = readl_relaxed(d->base + INT_ERR2);
202 if (likely(tc1 & BIT(i))) {
208 spin_lock_irqsave(&c->vc.lock, flags);
209 vchan_cookie_complete(&p->ds_run->vd);
210 p->ds_done = p->ds_run;
211 spin_unlock_irqrestore(&c->vc.lock, flags);
215 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
216 dev_warn(d->slave.dev, "DMA ERR\n");
219 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
220 writel_relaxed(err1, d->base + INT_ERR1_RAW);
221 writel_relaxed(err2, d->base + INT_ERR2_RAW);
224 tasklet_schedule(&d->task);
230 static int k3_dma_start_txd(struct k3_dma_chan *c)
232 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
233 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
238 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
242 struct k3_dma_desc_sw *ds =
243 container_of(vd, struct k3_dma_desc_sw, vd);
245 * fetch and remove request from vc->desc_issued
246 * so vc->desc_issued only contains desc pending
248 list_del(&ds->vd.node);
250 c->phy->ds_done = NULL;
252 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
255 c->phy->ds_done = NULL;
256 c->phy->ds_run = NULL;
260 static void k3_dma_tasklet(unsigned long arg)
262 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
263 struct k3_dma_phy *p;
264 struct k3_dma_chan *c, *cn;
265 unsigned pch, pch_alloc = 0;
267 /* check new dma request of running channel in vc->desc_issued */
268 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
269 spin_lock_irq(&c->vc.lock);
271 if (p && p->ds_done) {
272 if (k3_dma_start_txd(c)) {
273 /* No current txd associated with this channel */
274 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
275 /* Mark this channel free */
280 spin_unlock_irq(&c->vc.lock);
283 /* check new channel request in d->chan_pending */
284 spin_lock_irq(&d->lock);
285 for (pch = 0; pch < d->dma_channels; pch++) {
288 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
289 c = list_first_entry(&d->chan_pending,
290 struct k3_dma_chan, node);
291 /* remove from d->chan_pending */
292 list_del_init(&c->node);
293 pch_alloc |= 1 << pch;
294 /* Mark this channel allocated */
297 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
300 spin_unlock_irq(&d->lock);
302 for (pch = 0; pch < d->dma_channels; pch++) {
303 if (pch_alloc & (1 << pch)) {
307 spin_lock_irq(&c->vc.lock);
309 spin_unlock_irq(&c->vc.lock);
315 static void k3_dma_free_chan_resources(struct dma_chan *chan)
317 struct k3_dma_chan *c = to_k3_chan(chan);
318 struct k3_dma_dev *d = to_k3_dma(chan->device);
321 spin_lock_irqsave(&d->lock, flags);
322 list_del_init(&c->node);
323 spin_unlock_irqrestore(&d->lock, flags);
325 vchan_free_chan_resources(&c->vc);
329 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
330 dma_cookie_t cookie, struct dma_tx_state *state)
332 struct k3_dma_chan *c = to_k3_chan(chan);
333 struct k3_dma_dev *d = to_k3_dma(chan->device);
334 struct k3_dma_phy *p;
335 struct virt_dma_desc *vd;
340 ret = dma_cookie_status(&c->vc.chan, cookie, state);
341 if (ret == DMA_COMPLETE)
344 spin_lock_irqsave(&c->vc.lock, flags);
349 * If the cookie is on our issue queue, then the residue is
352 vd = vchan_find_desc(&c->vc, cookie);
354 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
355 } else if ((!p) || (!p->ds_run)) {
358 struct k3_dma_desc_sw *ds = p->ds_run;
359 u32 clli = 0, index = 0;
361 bytes = k3_dma_get_curr_cnt(d, p);
362 clli = k3_dma_get_curr_lli(p);
363 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
364 for (; index < ds->desc_num; index++) {
365 bytes += ds->desc_hw[index].count;
367 if (!ds->desc_hw[index].lli)
371 spin_unlock_irqrestore(&c->vc.lock, flags);
372 dma_set_residue(state, bytes);
376 static void k3_dma_issue_pending(struct dma_chan *chan)
378 struct k3_dma_chan *c = to_k3_chan(chan);
379 struct k3_dma_dev *d = to_k3_dma(chan->device);
382 spin_lock_irqsave(&c->vc.lock, flags);
383 /* add request to vc->desc_issued */
384 if (vchan_issue_pending(&c->vc)) {
387 if (list_empty(&c->node)) {
388 /* if new channel, add chan_pending */
389 list_add_tail(&c->node, &d->chan_pending);
390 /* check in tasklet */
391 tasklet_schedule(&d->task);
392 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
395 spin_unlock(&d->lock);
397 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
398 spin_unlock_irqrestore(&c->vc.lock, flags);
401 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
402 dma_addr_t src, size_t len, u32 num, u32 ccfg)
404 if ((num + 1) < ds->desc_num)
405 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
406 sizeof(struct k3_desc_hw);
407 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
408 ds->desc_hw[num].count = len;
409 ds->desc_hw[num].saddr = src;
410 ds->desc_hw[num].daddr = dst;
411 ds->desc_hw[num].config = ccfg;
414 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
415 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
416 size_t len, unsigned long flags)
418 struct k3_dma_chan *c = to_k3_chan(chan);
419 struct k3_dma_desc_sw *ds;
426 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
427 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
429 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
432 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
438 /* default is memtomem, without calling device_config */
439 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
440 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
441 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
445 copy = min_t(size_t, len, DMA_MAX_SIZE);
446 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
448 if (c->dir == DMA_MEM_TO_DEV) {
450 } else if (c->dir == DMA_DEV_TO_MEM) {
459 ds->desc_hw[num-1].lli = 0; /* end of link */
460 return vchan_tx_prep(&c->vc, &ds->vd, flags);
463 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
464 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
465 enum dma_transfer_direction dir, unsigned long flags, void *context)
467 struct k3_dma_chan *c = to_k3_chan(chan);
468 struct k3_dma_desc_sw *ds;
469 size_t len, avail, total = 0;
470 struct scatterlist *sg;
471 dma_addr_t addr, src = 0, dst = 0;
477 for_each_sg(sgl, sg, sglen, i) {
478 avail = sg_dma_len(sg);
479 if (avail > DMA_MAX_SIZE)
480 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
483 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
485 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
488 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
492 for_each_sg(sgl, sg, sglen, i) {
493 addr = sg_dma_address(sg);
494 avail = sg_dma_len(sg);
498 len = min_t(size_t, avail, DMA_MAX_SIZE);
500 if (dir == DMA_MEM_TO_DEV) {
503 } else if (dir == DMA_DEV_TO_MEM) {
508 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
515 ds->desc_hw[num-1].lli = 0; /* end of link */
517 return vchan_tx_prep(&c->vc, &ds->vd, flags);
520 static int k3_dma_config(struct dma_chan *chan,
521 struct dma_slave_config *cfg)
523 struct k3_dma_chan *c = to_k3_chan(chan);
524 u32 maxburst = 0, val = 0;
525 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
529 c->dir = cfg->direction;
530 if (c->dir == DMA_DEV_TO_MEM) {
531 c->ccfg = CX_CFG_DSTINCR;
532 c->dev_addr = cfg->src_addr;
533 maxburst = cfg->src_maxburst;
534 width = cfg->src_addr_width;
535 } else if (c->dir == DMA_MEM_TO_DEV) {
536 c->ccfg = CX_CFG_SRCINCR;
537 c->dev_addr = cfg->dst_addr;
538 maxburst = cfg->dst_maxburst;
539 width = cfg->dst_addr_width;
542 case DMA_SLAVE_BUSWIDTH_1_BYTE:
543 case DMA_SLAVE_BUSWIDTH_2_BYTES:
544 case DMA_SLAVE_BUSWIDTH_4_BYTES:
545 case DMA_SLAVE_BUSWIDTH_8_BYTES:
552 c->ccfg |= (val << 12) | (val << 16);
554 if ((maxburst == 0) || (maxburst > 16))
558 c->ccfg |= (val << 20) | (val << 24);
559 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
561 /* specific request line */
562 c->ccfg |= c->vc.chan.chan_id << 4;
567 static int k3_dma_terminate_all(struct dma_chan *chan)
569 struct k3_dma_chan *c = to_k3_chan(chan);
570 struct k3_dma_dev *d = to_k3_dma(chan->device);
571 struct k3_dma_phy *p = c->phy;
575 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
577 /* Prevent this channel being scheduled */
579 list_del_init(&c->node);
580 spin_unlock(&d->lock);
582 /* Clear the tx descriptor lists */
583 spin_lock_irqsave(&c->vc.lock, flags);
584 vchan_get_all_descriptors(&c->vc, &head);
586 /* vchan is assigned to a pchan - stop the channel */
587 k3_dma_terminate_chan(p, d);
590 p->ds_run = p->ds_done = NULL;
592 spin_unlock_irqrestore(&c->vc.lock, flags);
593 vchan_dma_desc_free_list(&c->vc, &head);
598 static int k3_dma_transfer_pause(struct dma_chan *chan)
600 struct k3_dma_chan *c = to_k3_chan(chan);
601 struct k3_dma_dev *d = to_k3_dma(chan->device);
602 struct k3_dma_phy *p = c->phy;
604 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
605 if (c->status == DMA_IN_PROGRESS) {
606 c->status = DMA_PAUSED;
608 k3_dma_pause_dma(p, false);
611 list_del_init(&c->node);
612 spin_unlock(&d->lock);
619 static int k3_dma_transfer_resume(struct dma_chan *chan)
621 struct k3_dma_chan *c = to_k3_chan(chan);
622 struct k3_dma_dev *d = to_k3_dma(chan->device);
623 struct k3_dma_phy *p = c->phy;
626 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
627 spin_lock_irqsave(&c->vc.lock, flags);
628 if (c->status == DMA_PAUSED) {
629 c->status = DMA_IN_PROGRESS;
631 k3_dma_pause_dma(p, true);
632 } else if (!list_empty(&c->vc.desc_issued)) {
634 list_add_tail(&c->node, &d->chan_pending);
635 spin_unlock(&d->lock);
638 spin_unlock_irqrestore(&c->vc.lock, flags);
643 static void k3_dma_free_desc(struct virt_dma_desc *vd)
645 struct k3_dma_desc_sw *ds =
646 container_of(vd, struct k3_dma_desc_sw, vd);
651 static const struct of_device_id k3_pdma_dt_ids[] = {
652 { .compatible = "hisilicon,k3-dma-1.0", },
655 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
657 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
658 struct of_dma *ofdma)
660 struct k3_dma_dev *d = ofdma->of_dma_data;
661 unsigned int request = dma_spec->args[0];
663 if (request > d->dma_requests)
666 return dma_get_slave_channel(&(d->chans[request].vc.chan));
669 static int k3_dma_probe(struct platform_device *op)
671 struct k3_dma_dev *d;
672 const struct of_device_id *of_id;
673 struct resource *iores;
676 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
680 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
684 d->base = devm_ioremap_resource(&op->dev, iores);
686 return PTR_ERR(d->base);
688 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
690 of_property_read_u32((&op->dev)->of_node,
691 "dma-channels", &d->dma_channels);
692 of_property_read_u32((&op->dev)->of_node,
693 "dma-requests", &d->dma_requests);
696 d->clk = devm_clk_get(&op->dev, NULL);
697 if (IS_ERR(d->clk)) {
698 dev_err(&op->dev, "no dma clk\n");
699 return PTR_ERR(d->clk);
702 irq = platform_get_irq(op, 0);
703 ret = devm_request_irq(&op->dev, irq,
704 k3_dma_int_handler, 0, DRIVER_NAME, d);
708 /* init phy channel */
709 d->phy = devm_kzalloc(&op->dev,
710 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
714 for (i = 0; i < d->dma_channels; i++) {
715 struct k3_dma_phy *p = &d->phy[i];
718 p->base = d->base + i * 0x40;
721 INIT_LIST_HEAD(&d->slave.channels);
722 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
723 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
724 d->slave.dev = &op->dev;
725 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
726 d->slave.device_tx_status = k3_dma_tx_status;
727 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
728 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
729 d->slave.device_issue_pending = k3_dma_issue_pending;
730 d->slave.device_config = k3_dma_config;
731 d->slave.device_pause = k3_dma_transfer_pause;
732 d->slave.device_resume = k3_dma_transfer_resume;
733 d->slave.device_terminate_all = k3_dma_terminate_all;
734 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
736 /* init virtual channel */
737 d->chans = devm_kzalloc(&op->dev,
738 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
739 if (d->chans == NULL)
742 for (i = 0; i < d->dma_requests; i++) {
743 struct k3_dma_chan *c = &d->chans[i];
745 c->status = DMA_IN_PROGRESS;
746 INIT_LIST_HEAD(&c->node);
747 c->vc.desc_free = k3_dma_free_desc;
748 vchan_init(&c->vc, &d->slave);
751 /* Enable clock before accessing registers */
752 ret = clk_prepare_enable(d->clk);
754 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
758 k3_dma_enable_dma(d, true);
760 ret = dma_async_device_register(&d->slave);
764 ret = of_dma_controller_register((&op->dev)->of_node,
765 k3_of_dma_simple_xlate, d);
767 goto of_dma_register_fail;
769 spin_lock_init(&d->lock);
770 INIT_LIST_HEAD(&d->chan_pending);
771 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
772 platform_set_drvdata(op, d);
773 dev_info(&op->dev, "initialized\n");
777 of_dma_register_fail:
778 dma_async_device_unregister(&d->slave);
782 static int k3_dma_remove(struct platform_device *op)
784 struct k3_dma_chan *c, *cn;
785 struct k3_dma_dev *d = platform_get_drvdata(op);
787 dma_async_device_unregister(&d->slave);
788 of_dma_controller_free((&op->dev)->of_node);
790 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
791 list_del(&c->vc.chan.device_node);
792 tasklet_kill(&c->vc.task);
794 tasklet_kill(&d->task);
795 clk_disable_unprepare(d->clk);
799 #ifdef CONFIG_PM_SLEEP
800 static int k3_dma_suspend_dev(struct device *dev)
802 struct k3_dma_dev *d = dev_get_drvdata(dev);
805 stat = k3_dma_get_chan_stat(d);
807 dev_warn(d->slave.dev,
808 "chan %d is running fail to suspend\n", stat);
811 k3_dma_enable_dma(d, false);
812 clk_disable_unprepare(d->clk);
816 static int k3_dma_resume_dev(struct device *dev)
818 struct k3_dma_dev *d = dev_get_drvdata(dev);
821 ret = clk_prepare_enable(d->clk);
823 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
826 k3_dma_enable_dma(d, true);
831 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
833 static struct platform_driver k3_pdma_driver = {
837 .of_match_table = k3_pdma_dt_ids,
839 .probe = k3_dma_probe,
840 .remove = k3_dma_remove,
843 module_platform_driver(k3_pdma_driver);
845 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
846 MODULE_ALIAS("platform:k3dma");
847 MODULE_LICENSE("GPL v2");