2 * Qualcomm Technologies HIDMA DMA engine low level code
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
20 #include <linux/highmem.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <linux/atomic.h>
24 #include <linux/iopoll.h>
25 #include <linux/kfifo.h>
26 #include <linux/bitops.h>
30 #define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */
32 #define HIDMA_TRCA_CTRLSTS_REG 0x000
33 #define HIDMA_TRCA_RING_LOW_REG 0x008
34 #define HIDMA_TRCA_RING_HIGH_REG 0x00C
35 #define HIDMA_TRCA_RING_LEN_REG 0x010
36 #define HIDMA_TRCA_DOORBELL_REG 0x400
38 #define HIDMA_EVCA_CTRLSTS_REG 0x000
39 #define HIDMA_EVCA_INTCTRL_REG 0x004
40 #define HIDMA_EVCA_RING_LOW_REG 0x008
41 #define HIDMA_EVCA_RING_HIGH_REG 0x00C
42 #define HIDMA_EVCA_RING_LEN_REG 0x010
43 #define HIDMA_EVCA_WRITE_PTR_REG 0x020
44 #define HIDMA_EVCA_DOORBELL_REG 0x400
46 #define HIDMA_EVCA_IRQ_STAT_REG 0x100
47 #define HIDMA_EVCA_IRQ_CLR_REG 0x108
48 #define HIDMA_EVCA_IRQ_EN_REG 0x110
50 #define HIDMA_EVRE_CFG_IDX 0
52 #define HIDMA_EVRE_ERRINFO_BIT_POS 24
53 #define HIDMA_EVRE_CODE_BIT_POS 28
55 #define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
56 #define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
58 #define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
59 #define HIDMA_CH_STATE_MASK GENMASK(7, 0)
60 #define HIDMA_CH_STATE_BIT_POS 0x8
62 #define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
63 #define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
64 #define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
65 #define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
66 #define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
67 #define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
69 #define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
70 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
71 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
72 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
73 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
74 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
76 #define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
79 if (iter >= ring_size) \
83 #define HIDMA_CH_STATE(val) \
84 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
86 #define HIDMA_ERR_INT_MASK \
87 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
88 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
89 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
90 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
91 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
101 HIDMA_CH_DISABLED = 0,
102 HIDMA_CH_ENABLED = 1,
103 HIDMA_CH_RUNNING = 2,
104 HIDMA_CH_SUSPENDED = 3,
105 HIDMA_CH_STOPPED = 4,
109 HIDMA_TRE_MEMCPY = 3,
113 HIDMA_EVRE_STATUS_COMPLETE = 1,
114 HIDMA_EVRE_STATUS_ERROR = 4,
117 static int hidma_is_chan_enabled(int state)
120 case HIDMA_CH_ENABLED:
121 case HIDMA_CH_RUNNING:
128 void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
130 struct hidma_tre *tre;
132 if (tre_ch >= lldev->nr_tres) {
133 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
137 tre = &lldev->trepool[tre_ch];
138 if (atomic_read(&tre->allocated) != true) {
139 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
143 atomic_set(&tre->allocated, 0);
146 int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
147 void (*callback)(void *data), void *data, u32 *tre_ch)
150 struct hidma_tre *tre;
153 if (!tre_ch || !lldev)
156 /* need to have at least one empty spot in the queue */
157 for (i = 0; i < lldev->nr_tres - 1; i++) {
158 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
162 if (i == (lldev->nr_tres - 1))
165 tre = &lldev->trepool[i];
167 tre->dev_name = dev_name;
168 tre->callback = callback;
176 tre_local = &tre->tre_local[0];
177 tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
178 tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
179 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
187 * Multiple TREs may be queued and waiting in the pending queue.
189 static void hidma_ll_tre_complete(unsigned long arg)
191 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
192 struct hidma_tre *tre;
194 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
195 /* call the user if it has been read by the hardware */
197 tre->callback(tre->data);
201 static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
204 struct hidma_tre *tre;
208 spin_lock_irqsave(&lldev->lock, flags);
210 tre_iterator = lldev->tre_processed_off;
211 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
213 spin_unlock_irqrestore(&lldev->lock, flags);
214 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
215 tre_iterator / HIDMA_TRE_SIZE);
218 lldev->pending_tre_list[tre->tre_index] = NULL;
221 * Keep track of pending TREs that SW is expecting to receive
222 * from HW. We got one now. Decrement our counter.
224 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
225 dev_warn(lldev->dev, "tre count mismatch on completion");
226 atomic_set(&lldev->pending_tre_count, 0);
229 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
230 lldev->tre_ring_size);
231 lldev->tre_processed_off = tre_iterator;
232 spin_unlock_irqrestore(&lldev->lock, flags);
234 tre->err_info = err_info;
235 tre->err_code = err_code;
238 kfifo_put(&lldev->handoff_fifo, tre);
239 tasklet_schedule(&lldev->task);
245 * Called to handle the interrupt for the channel.
246 * Return a positive number if TRE or EVRE were consumed on this run.
247 * Return a positive number if there are pending TREs or EVREs.
248 * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
250 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
252 u32 evre_ring_size = lldev->evre_ring_size;
253 u32 err_info, err_code, evre_write_off;
255 u32 num_completed = 0;
257 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
258 evre_iterator = lldev->evre_processed_off;
260 if ((evre_write_off > evre_ring_size) ||
261 (evre_write_off % HIDMA_EVRE_SIZE)) {
262 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
267 * By the time control reaches here the number of EVREs and TREs
268 * may not match. Only consume the ones that hardware told us.
270 while ((evre_iterator != evre_write_off)) {
271 u32 *current_evre = lldev->evre_ring + evre_iterator;
274 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
275 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
276 err_info &= HIDMA_EVRE_ERRINFO_MASK;
278 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
280 if (hidma_post_completed(lldev, err_info, err_code))
283 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
287 * Read the new event descriptor written by the HW.
288 * As we are processing the delivered events, other events
289 * get queued to the SW for processing.
292 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
296 * An error interrupt might have arrived while we are processing
297 * the completed interrupt.
299 if (!hidma_ll_isenabled(lldev))
304 u32 evre_read_off = (lldev->evre_processed_off +
305 HIDMA_EVRE_SIZE * num_completed);
306 evre_read_off = evre_read_off % evre_ring_size;
307 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
309 /* record the last processed tre offset */
310 lldev->evre_processed_off = evre_read_off;
313 return num_completed;
316 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
319 while (atomic_read(&lldev->pending_tre_count)) {
320 if (hidma_post_completed(lldev, err_info, err_code))
325 static int hidma_ll_reset(struct hidma_lldev *lldev)
330 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
331 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
332 val |= HIDMA_CH_RESET << 16;
333 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
336 * Delay 10ms after reset to allow DMA logic to quiesce.
337 * Do a polled read up to 1ms and 10ms maximum.
339 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
340 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
343 dev_err(lldev->dev, "transfer channel did not reset\n");
347 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
348 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
349 val |= HIDMA_CH_RESET << 16;
350 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
353 * Delay 10ms after reset to allow DMA logic to quiesce.
354 * Do a polled read up to 1ms and 10ms maximum.
356 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
357 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
362 lldev->trch_state = HIDMA_CH_DISABLED;
363 lldev->evch_state = HIDMA_CH_DISABLED;
368 * The interrupt handler for HIDMA will try to consume as many pending
369 * EVRE from the event queue as possible. Each EVRE has an associated
370 * TRE that holds the user interface parameters. EVRE reports the
371 * result of the transaction. Hardware guarantees ordering between EVREs
372 * and TREs. We use last processed offset to figure out which TRE is
373 * associated with which EVRE. If two TREs are consumed by HW, the EVREs
374 * are in order in the event ring.
376 * This handler will do a one pass for consuming EVREs. Other EVREs may
377 * be delivered while we are working. It will try to consume incoming
378 * EVREs one more time and return.
380 * For unprocessed EVREs, hardware will trigger another interrupt until
381 * all the interrupt bits are cleared.
383 * Hardware guarantees that by the time interrupt is observed, all data
384 * transactions in flight are delivered to their respective places and
385 * are visible to the CPU.
387 * On demand paging for IOMMU is only supported for PCIe via PRI
388 * (Page Request Interface) not for HIDMA. All other hardware instances
389 * including HIDMA work on pinned DMA addresses.
391 * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
392 * IOMMU latency will be built into the data movement time. By the time
393 * interrupt happens, IOMMU lookups + data movement has already taken place.
395 * While the first read in a typical PCI endpoint ISR flushes all outstanding
396 * requests traditionally to the destination, this concept does not apply
399 static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
401 if (cause & HIDMA_ERR_INT_MASK) {
402 dev_err(lldev->dev, "error 0x%x, disabling...\n",
405 /* Clear out pending interrupts */
406 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
408 /* No further submissions. */
409 hidma_ll_disable(lldev);
411 /* Driver completes the txn and intimates the client.*/
412 hidma_cleanup_pending_tre(lldev, 0xFF,
413 HIDMA_EVRE_STATUS_ERROR);
419 * Fine tuned for this HW...
421 * This ISR has been designed for this particular hardware. Relaxed
422 * read and write accessors are used for performance reasons due to
423 * interrupt delivery guarantees. Do not copy this code blindly and
424 * expect that to work.
426 * Try to consume as many EVREs as possible.
428 hidma_handle_tre_completion(lldev);
430 /* We consumed TREs or there are pending TREs or EVREs. */
431 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
434 irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
436 struct hidma_lldev *lldev = arg;
441 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
442 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
443 cause = status & enable;
446 hidma_ll_int_handler_internal(lldev, cause);
449 * Another interrupt might have arrived while we are
450 * processing this one. Read the new cause.
452 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
453 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
454 cause = status & enable;
460 irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
462 struct hidma_lldev *lldev = arg;
464 hidma_ll_int_handler_internal(lldev, cause);
468 int hidma_ll_enable(struct hidma_lldev *lldev)
473 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
474 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
475 val |= HIDMA_CH_ENABLE << 16;
476 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
478 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
479 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
482 dev_err(lldev->dev, "event channel did not get enabled\n");
486 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
487 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
488 val |= HIDMA_CH_ENABLE << 16;
489 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
491 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
492 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
495 dev_err(lldev->dev, "transfer channel did not get enabled\n");
499 lldev->trch_state = HIDMA_CH_ENABLED;
500 lldev->evch_state = HIDMA_CH_ENABLED;
505 void hidma_ll_start(struct hidma_lldev *lldev)
507 unsigned long irqflags;
509 spin_lock_irqsave(&lldev->lock, irqflags);
510 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
511 spin_unlock_irqrestore(&lldev->lock, irqflags);
514 bool hidma_ll_isenabled(struct hidma_lldev *lldev)
518 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
519 lldev->trch_state = HIDMA_CH_STATE(val);
520 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
521 lldev->evch_state = HIDMA_CH_STATE(val);
523 /* both channels have to be enabled before calling this function */
524 if (hidma_is_chan_enabled(lldev->trch_state) &&
525 hidma_is_chan_enabled(lldev->evch_state))
531 void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
533 struct hidma_tre *tre;
536 tre = &lldev->trepool[tre_ch];
538 /* copy the TRE into its location in the TRE ring */
539 spin_lock_irqsave(&lldev->lock, flags);
540 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
541 lldev->pending_tre_list[tre->tre_index] = tre;
542 memcpy(lldev->tre_ring + lldev->tre_write_offset,
543 &tre->tre_local[0], HIDMA_TRE_SIZE);
547 atomic_inc(&lldev->pending_tre_count);
548 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
549 % lldev->tre_ring_size;
550 spin_unlock_irqrestore(&lldev->lock, flags);
554 * Note that even though we stop this channel if there is a pending transaction
555 * in flight it will complete and follow the callback. This request will
556 * prevent further requests to be made.
558 int hidma_ll_disable(struct hidma_lldev *lldev)
563 /* The channel needs to be in working state */
564 if (!hidma_ll_isenabled(lldev))
567 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
568 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
569 val |= HIDMA_CH_SUSPEND << 16;
570 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
573 * Start the wait right after the suspend is confirmed.
574 * Do a polled read up to 1ms and 10ms maximum.
576 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
577 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
582 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
583 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
584 val |= HIDMA_CH_SUSPEND << 16;
585 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
588 * Start the wait right after the suspend is confirmed
589 * Delay up to 10ms after reset to allow DMA logic to quiesce.
591 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
592 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
597 lldev->trch_state = HIDMA_CH_SUSPENDED;
598 lldev->evch_state = HIDMA_CH_SUSPENDED;
602 void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
603 dma_addr_t src, dma_addr_t dest, u32 len,
606 struct hidma_tre *tre;
609 if (tre_ch >= lldev->nr_tres) {
610 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
615 tre = &lldev->trepool[tre_ch];
616 if (atomic_read(&tre->allocated) != true) {
617 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
622 tre_local = &tre->tre_local[0];
623 tre_local[HIDMA_TRE_LEN_IDX] = len;
624 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
625 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
626 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
627 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
628 tre->int_flags = flags;
632 * Called during initialization and after an error condition
633 * to restore hardware state.
635 int hidma_ll_setup(struct hidma_lldev *lldev)
640 u32 nr_tres = lldev->nr_tres;
642 atomic_set(&lldev->pending_tre_count, 0);
643 lldev->tre_processed_off = 0;
644 lldev->evre_processed_off = 0;
645 lldev->tre_write_offset = 0;
647 /* disable interrupts */
648 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
650 /* clear all pending interrupts */
651 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
652 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
654 rc = hidma_ll_reset(lldev);
659 * Clear all pending interrupts again.
660 * Otherwise, we observe reset complete interrupts.
662 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
663 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
665 /* disable interrupts again after reset */
666 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
668 addr = lldev->tre_dma;
669 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
670 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
671 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
673 addr = lldev->evre_dma;
674 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
675 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
676 writel(HIDMA_EVRE_SIZE * nr_tres,
677 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
679 /* configure interrupts */
680 hidma_ll_setup_irq(lldev, lldev->msi_support);
682 rc = hidma_ll_enable(lldev);
689 void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
693 lldev->msi_support = msi;
695 /* disable interrupts again after reset */
696 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
697 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
699 /* support IRQ by default */
700 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
702 if (!lldev->msi_support)
704 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
706 /* clear all pending interrupts and enable them */
707 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
708 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
711 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
712 void __iomem *trca, void __iomem *evca,
716 struct hidma_lldev *lldev;
720 if (!trca || !evca || !dev || !nr_tres)
723 /* need at least four TREs */
727 /* need an extra space */
730 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
737 sz = sizeof(struct hidma_tre);
738 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
742 required_bytes = sizeof(lldev->pending_tre_list[0]);
743 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
745 if (!lldev->pending_tre_list)
748 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
749 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
751 if (!lldev->tre_ring)
754 memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
755 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
756 lldev->nr_tres = nr_tres;
758 /* the TRE ring has to be TRE_SIZE aligned */
759 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
762 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
763 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
764 lldev->tre_dma += tre_ring_shift;
765 lldev->tre_ring += tre_ring_shift;
768 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
769 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
771 if (!lldev->evre_ring)
774 memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
775 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
777 /* the EVRE ring has to be EVRE_SIZE aligned */
778 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
781 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
782 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
783 lldev->evre_dma += evre_ring_shift;
784 lldev->evre_ring += evre_ring_shift;
786 lldev->nr_tres = nr_tres;
787 lldev->chidx = chidx;
789 sz = nr_tres * sizeof(struct hidma_tre *);
790 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
794 rc = hidma_ll_setup(lldev);
798 spin_lock_init(&lldev->lock);
799 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
800 lldev->initialized = 1;
801 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
805 int hidma_ll_uninit(struct hidma_lldev *lldev)
814 if (!lldev->initialized)
817 lldev->initialized = 0;
819 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
820 tasklet_kill(&lldev->task);
821 memset(lldev->trepool, 0, required_bytes);
822 lldev->trepool = NULL;
823 atomic_set(&lldev->pending_tre_count, 0);
824 lldev->tre_write_offset = 0;
826 rc = hidma_ll_reset(lldev);
829 * Clear all pending interrupts again.
830 * Otherwise, we observe reset complete interrupts.
832 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
833 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
834 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
838 enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
840 enum dma_status ret = DMA_ERROR;
841 struct hidma_tre *tre;
845 spin_lock_irqsave(&lldev->lock, flags);
847 tre = &lldev->trepool[tre_ch];
848 err_code = tre->err_code;
850 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
852 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
855 ret = DMA_IN_PROGRESS;
856 spin_unlock_irqrestore(&lldev->lock, flags);