2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
34 #include "dmaengine.h"
37 static void mv_xor_issue_pending(struct dma_chan *chan);
39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
45 #define mv_chan_to_devp(chan) \
48 static void mv_desc_init(struct mv_xor_desc_slot *desc,
49 dma_addr_t addr, u32 byte_count)
51 struct mv_xor_desc *hw_desc = desc->hw_desc;
53 hw_desc->status = XOR_DESC_DMA_OWNED;
54 hw_desc->phy_next_desc = 0;
55 hw_desc->desc_command = XOR_DESC_EOD_INT_EN;
56 hw_desc->phy_dest_addr = addr;
57 hw_desc->byte_count = byte_count;
60 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 BUG_ON(hw_desc->phy_next_desc);
65 hw_desc->phy_next_desc = next_desc_addr;
68 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
70 struct mv_xor_desc *hw_desc = desc->hw_desc;
71 hw_desc->phy_next_desc = 0;
74 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
75 int index, dma_addr_t addr)
77 struct mv_xor_desc *hw_desc = desc->hw_desc;
78 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
79 if (desc->type == DMA_XOR)
80 hw_desc->desc_command |= (1 << index);
83 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
85 return readl_relaxed(XOR_CURR_DESC(chan));
88 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
91 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
94 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
96 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
97 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
98 writel_relaxed(val, XOR_INTR_MASK(chan));
101 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
103 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
104 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
108 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
110 u32 val = ~(XOR_INT_END_OF_DESC << (chan->idx * 16));
111 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
112 writel_relaxed(val, XOR_INTR_CAUSE(chan));
115 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
117 u32 val = 0xFFFF0000 >> (chan->idx * 16);
118 writel_relaxed(val, XOR_INTR_CAUSE(chan));
121 static void mv_set_mode(struct mv_xor_chan *chan,
122 enum dma_transaction_type type)
125 u32 config = readl_relaxed(XOR_CONFIG(chan));
129 op_mode = XOR_OPERATION_MODE_XOR;
132 op_mode = XOR_OPERATION_MODE_MEMCPY;
135 dev_err(mv_chan_to_devp(chan),
136 "error: unsupported operation %d\n",
145 #if defined(__BIG_ENDIAN)
146 config |= XOR_DESCRIPTOR_SWAP;
148 config &= ~XOR_DESCRIPTOR_SWAP;
151 writel_relaxed(config, XOR_CONFIG(chan));
152 chan->current_type = type;
155 static void mv_chan_activate(struct mv_xor_chan *chan)
157 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
159 /* writel ensures all descriptors are flushed before activation */
160 writel(BIT(0), XOR_ACTIVATION(chan));
163 static char mv_chan_is_busy(struct mv_xor_chan *chan)
165 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
167 state = (state >> 4) & 0x3;
169 return (state == 1) ? 1 : 0;
173 * mv_xor_free_slots - flags descriptor slots for reuse
174 * @slot: Slot to free
175 * Caller must hold &mv_chan->lock while calling this function
177 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
178 struct mv_xor_desc_slot *slot)
180 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
181 __func__, __LINE__, slot);
188 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
190 * Caller must hold &mv_chan->lock while calling this function
192 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
193 struct mv_xor_desc_slot *sw_desc)
195 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
196 __func__, __LINE__, sw_desc);
198 /* set the hardware chain */
199 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
202 mv_xor_issue_pending(&mv_chan->dmachan);
206 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
207 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
209 BUG_ON(desc->async_tx.cookie < 0);
211 if (desc->async_tx.cookie > 0) {
212 cookie = desc->async_tx.cookie;
214 /* call the callback (must not sleep or submit new
215 * operations to this channel)
217 if (desc->async_tx.callback)
218 desc->async_tx.callback(
219 desc->async_tx.callback_param);
221 dma_descriptor_unmap(&desc->async_tx);
224 /* run dependent operations */
225 dma_run_dependencies(&desc->async_tx);
231 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
233 struct mv_xor_desc_slot *iter, *_iter;
235 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
236 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
239 if (async_tx_test_ack(&iter->async_tx)) {
240 list_del(&iter->completed_node);
241 mv_xor_free_slots(mv_chan, iter);
248 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
249 struct mv_xor_chan *mv_chan)
251 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
252 __func__, __LINE__, desc, desc->async_tx.flags);
253 list_del(&desc->chain_node);
254 /* the client is allowed to attach dependent operations
257 if (!async_tx_test_ack(&desc->async_tx)) {
258 /* move this slot to the completed_slots */
259 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
263 mv_xor_free_slots(mv_chan, desc);
267 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
269 struct mv_xor_desc_slot *iter, *_iter;
270 dma_cookie_t cookie = 0;
271 int busy = mv_chan_is_busy(mv_chan);
272 u32 current_desc = mv_chan_get_current_desc(mv_chan);
273 int seen_current = 0;
275 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
276 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
277 mv_xor_clean_completed_slots(mv_chan);
279 /* free completed slots from the chain starting with
280 * the oldest descriptor
283 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
286 prefetch(&_iter->async_tx);
288 /* do not advance past the current descriptor loaded into the
289 * hardware channel, subsequent descriptors are either in
290 * process or have not been submitted
295 /* stop the search if we reach the current descriptor and the
298 if (iter->async_tx.phys == current_desc) {
304 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
306 if (mv_xor_clean_slot(iter, mv_chan))
310 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
311 struct mv_xor_desc_slot *chain_head;
312 chain_head = list_entry(mv_chan->chain.next,
313 struct mv_xor_desc_slot,
316 mv_xor_start_new_chain(mv_chan, chain_head);
320 mv_chan->dmachan.completed_cookie = cookie;
324 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
326 spin_lock_bh(&mv_chan->lock);
327 __mv_xor_slot_cleanup(mv_chan);
328 spin_unlock_bh(&mv_chan->lock);
331 static void mv_xor_tasklet(unsigned long data)
333 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
334 mv_xor_slot_cleanup(chan);
337 static struct mv_xor_desc_slot *
338 mv_xor_alloc_slot(struct mv_xor_chan *mv_chan)
340 struct mv_xor_desc_slot *iter, *_iter;
343 /* start search from the last allocated descrtiptor
344 * if a contiguous allocation can not be found start searching
345 * from the beginning of the list
349 iter = mv_chan->last_used;
351 iter = list_entry(&mv_chan->all_slots,
352 struct mv_xor_desc_slot,
355 list_for_each_entry_safe_continue(
356 iter, _iter, &mv_chan->all_slots, slot_node) {
359 prefetch(&_iter->async_tx);
360 if (iter->slot_used) {
361 /* give up after finding the first busy slot
362 * on the second pass through the list
369 /* pre-ack descriptor */
370 async_tx_ack(&iter->async_tx);
373 INIT_LIST_HEAD(&iter->chain_node);
374 iter->async_tx.cookie = -EBUSY;
375 mv_chan->last_used = iter;
376 mv_desc_clear_next_desc(iter);
384 /* try to free some slots if the allocation fails */
385 tasklet_schedule(&mv_chan->irq_tasklet);
390 /************************ DMA engine API functions ****************************/
392 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
394 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
395 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
396 struct mv_xor_desc_slot *old_chain_tail;
398 int new_hw_chain = 1;
400 dev_dbg(mv_chan_to_devp(mv_chan),
401 "%s sw_desc %p: async_tx %p\n",
402 __func__, sw_desc, &sw_desc->async_tx);
404 spin_lock_bh(&mv_chan->lock);
405 cookie = dma_cookie_assign(tx);
407 if (list_empty(&mv_chan->chain))
408 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
412 old_chain_tail = list_entry(mv_chan->chain.prev,
413 struct mv_xor_desc_slot,
415 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
417 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
418 &old_chain_tail->async_tx.phys);
420 /* fix up the hardware chain */
421 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
423 /* if the channel is not busy */
424 if (!mv_chan_is_busy(mv_chan)) {
425 u32 current_desc = mv_chan_get_current_desc(mv_chan);
427 * and the curren desc is the end of the chain before
428 * the append, then we need to start the channel
430 if (current_desc == old_chain_tail->async_tx.phys)
436 mv_xor_start_new_chain(mv_chan, sw_desc);
438 spin_unlock_bh(&mv_chan->lock);
443 /* returns the number of allocated descriptors */
444 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
449 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
450 struct mv_xor_desc_slot *slot = NULL;
451 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
453 /* Allocate descriptor slots */
454 idx = mv_chan->slots_allocated;
455 while (idx < num_descs_in_pool) {
456 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
458 dev_info(mv_chan_to_devp(mv_chan),
459 "channel only initialized %d descriptor slots",
463 virt_desc = mv_chan->dma_desc_pool_virt;
464 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
466 dma_async_tx_descriptor_init(&slot->async_tx, chan);
467 slot->async_tx.tx_submit = mv_xor_tx_submit;
468 INIT_LIST_HEAD(&slot->chain_node);
469 INIT_LIST_HEAD(&slot->slot_node);
470 dma_desc = mv_chan->dma_desc_pool;
471 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
474 spin_lock_bh(&mv_chan->lock);
475 mv_chan->slots_allocated = idx;
476 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
477 spin_unlock_bh(&mv_chan->lock);
480 if (mv_chan->slots_allocated && !mv_chan->last_used)
481 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
482 struct mv_xor_desc_slot,
485 dev_dbg(mv_chan_to_devp(mv_chan),
486 "allocated %d descriptor slots last_used: %p\n",
487 mv_chan->slots_allocated, mv_chan->last_used);
489 return mv_chan->slots_allocated ? : -ENOMEM;
492 static struct dma_async_tx_descriptor *
493 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
494 unsigned int src_cnt, size_t len, unsigned long flags)
496 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
497 struct mv_xor_desc_slot *sw_desc;
499 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
502 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
504 dev_dbg(mv_chan_to_devp(mv_chan),
505 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
506 __func__, src_cnt, len, &dest, flags);
508 spin_lock_bh(&mv_chan->lock);
509 sw_desc = mv_xor_alloc_slot(mv_chan);
511 sw_desc->type = DMA_XOR;
512 sw_desc->async_tx.flags = flags;
513 mv_desc_init(sw_desc, dest, len);
514 sw_desc->unmap_src_cnt = src_cnt;
515 sw_desc->unmap_len = len;
517 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
519 spin_unlock_bh(&mv_chan->lock);
520 dev_dbg(mv_chan_to_devp(mv_chan),
521 "%s sw_desc %p async_tx %p \n",
522 __func__, sw_desc, &sw_desc->async_tx);
523 return sw_desc ? &sw_desc->async_tx : NULL;
526 static struct dma_async_tx_descriptor *
527 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
528 size_t len, unsigned long flags)
531 * A MEMCPY operation is identical to an XOR operation with only
532 * a single source address.
534 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
537 static void mv_xor_free_chan_resources(struct dma_chan *chan)
539 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
540 struct mv_xor_desc_slot *iter, *_iter;
541 int in_use_descs = 0;
543 mv_xor_slot_cleanup(mv_chan);
545 spin_lock_bh(&mv_chan->lock);
546 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
549 list_del(&iter->chain_node);
551 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
554 list_del(&iter->completed_node);
556 list_for_each_entry_safe_reverse(
557 iter, _iter, &mv_chan->all_slots, slot_node) {
558 list_del(&iter->slot_node);
560 mv_chan->slots_allocated--;
562 mv_chan->last_used = NULL;
564 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
565 __func__, mv_chan->slots_allocated);
566 spin_unlock_bh(&mv_chan->lock);
569 dev_err(mv_chan_to_devp(mv_chan),
570 "freeing %d in use descriptors!\n", in_use_descs);
574 * mv_xor_status - poll the status of an XOR transaction
575 * @chan: XOR channel handle
576 * @cookie: XOR transaction identifier
577 * @txstate: XOR transactions state holder (or NULL)
579 static enum dma_status mv_xor_status(struct dma_chan *chan,
581 struct dma_tx_state *txstate)
583 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
586 ret = dma_cookie_status(chan, cookie, txstate);
587 if (ret == DMA_COMPLETE) {
588 mv_xor_clean_completed_slots(mv_chan);
591 mv_xor_slot_cleanup(mv_chan);
593 return dma_cookie_status(chan, cookie, txstate);
596 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
600 val = readl_relaxed(XOR_CONFIG(chan));
601 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
603 val = readl_relaxed(XOR_ACTIVATION(chan));
604 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
606 val = readl_relaxed(XOR_INTR_CAUSE(chan));
607 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
609 val = readl_relaxed(XOR_INTR_MASK(chan));
610 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
612 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
613 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
615 val = readl_relaxed(XOR_ERROR_ADDR(chan));
616 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
619 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
622 if (intr_cause & XOR_INT_ERR_DECODE) {
623 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
627 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
628 chan->idx, intr_cause);
630 mv_dump_xor_regs(chan);
634 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
636 struct mv_xor_chan *chan = data;
637 u32 intr_cause = mv_chan_get_intr_cause(chan);
639 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
641 if (intr_cause & XOR_INTR_ERRORS)
642 mv_xor_err_interrupt_handler(chan, intr_cause);
644 tasklet_schedule(&chan->irq_tasklet);
646 mv_xor_device_clear_eoc_cause(chan);
651 static void mv_xor_issue_pending(struct dma_chan *chan)
653 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
655 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
656 mv_chan->pending = 0;
657 mv_chan_activate(mv_chan);
662 * Perform a transaction to verify the HW works.
665 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
669 dma_addr_t src_dma, dest_dma;
670 struct dma_chan *dma_chan;
672 struct dma_async_tx_descriptor *tx;
673 struct dmaengine_unmap_data *unmap;
676 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
680 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
686 /* Fill in src buffer */
687 for (i = 0; i < PAGE_SIZE; i++)
688 ((u8 *) src)[i] = (u8)i;
690 dma_chan = &mv_chan->dmachan;
691 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
696 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
702 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
703 PAGE_SIZE, DMA_TO_DEVICE);
705 unmap->addr[0] = src_dma;
707 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
708 PAGE_SIZE, DMA_FROM_DEVICE);
710 unmap->addr[1] = dest_dma;
712 unmap->len = PAGE_SIZE;
714 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
716 cookie = mv_xor_tx_submit(tx);
717 mv_xor_issue_pending(dma_chan);
721 if (mv_xor_status(dma_chan, cookie, NULL) !=
723 dev_err(dma_chan->device->dev,
724 "Self-test copy timed out, disabling\n");
729 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
730 PAGE_SIZE, DMA_FROM_DEVICE);
731 if (memcmp(src, dest, PAGE_SIZE)) {
732 dev_err(dma_chan->device->dev,
733 "Self-test copy failed compare, disabling\n");
739 dmaengine_unmap_put(unmap);
740 mv_xor_free_chan_resources(dma_chan);
747 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
749 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
753 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
754 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
756 struct dma_async_tx_descriptor *tx;
757 struct dmaengine_unmap_data *unmap;
758 struct dma_chan *dma_chan;
763 int src_count = MV_XOR_NUM_SRC_TEST;
765 for (src_idx = 0; src_idx < src_count; src_idx++) {
766 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
767 if (!xor_srcs[src_idx]) {
769 __free_page(xor_srcs[src_idx]);
774 dest = alloc_page(GFP_KERNEL);
777 __free_page(xor_srcs[src_idx]);
781 /* Fill in src buffers */
782 for (src_idx = 0; src_idx < src_count; src_idx++) {
783 u8 *ptr = page_address(xor_srcs[src_idx]);
784 for (i = 0; i < PAGE_SIZE; i++)
785 ptr[i] = (1 << src_idx);
788 for (src_idx = 0; src_idx < src_count; src_idx++)
789 cmp_byte ^= (u8) (1 << src_idx);
791 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
792 (cmp_byte << 8) | cmp_byte;
794 memset(page_address(dest), 0, PAGE_SIZE);
796 dma_chan = &mv_chan->dmachan;
797 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
802 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
810 for (i = 0; i < src_count; i++) {
811 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
812 0, PAGE_SIZE, DMA_TO_DEVICE);
813 dma_srcs[i] = unmap->addr[i];
817 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
819 dest_dma = unmap->addr[src_count];
821 unmap->len = PAGE_SIZE;
823 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
824 src_count, PAGE_SIZE, 0);
826 cookie = mv_xor_tx_submit(tx);
827 mv_xor_issue_pending(dma_chan);
831 if (mv_xor_status(dma_chan, cookie, NULL) !=
833 dev_err(dma_chan->device->dev,
834 "Self-test xor timed out, disabling\n");
839 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
840 PAGE_SIZE, DMA_FROM_DEVICE);
841 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
842 u32 *ptr = page_address(dest);
843 if (ptr[i] != cmp_word) {
844 dev_err(dma_chan->device->dev,
845 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
846 i, ptr[i], cmp_word);
853 dmaengine_unmap_put(unmap);
854 mv_xor_free_chan_resources(dma_chan);
858 __free_page(xor_srcs[src_idx]);
863 /* This driver does not implement any of the optional DMA operations. */
865 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
871 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
873 struct dma_chan *chan, *_chan;
874 struct device *dev = mv_chan->dmadev.dev;
876 dma_async_device_unregister(&mv_chan->dmadev);
878 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
879 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
881 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
883 list_del(&chan->device_node);
886 free_irq(mv_chan->irq, mv_chan);
891 static struct mv_xor_chan *
892 mv_xor_channel_add(struct mv_xor_device *xordev,
893 struct platform_device *pdev,
894 int idx, dma_cap_mask_t cap_mask, int irq)
897 struct mv_xor_chan *mv_chan;
898 struct dma_device *dma_dev;
900 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
902 return ERR_PTR(-ENOMEM);
907 dma_dev = &mv_chan->dmadev;
909 /* allocate coherent memory for hardware descriptors
910 * note: writecombine gives slightly better performance, but
911 * requires that we explicitly flush the writes
913 mv_chan->dma_desc_pool_virt =
914 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
915 &mv_chan->dma_desc_pool, GFP_KERNEL);
916 if (!mv_chan->dma_desc_pool_virt)
917 return ERR_PTR(-ENOMEM);
919 /* discover transaction capabilites from the platform data */
920 dma_dev->cap_mask = cap_mask;
922 INIT_LIST_HEAD(&dma_dev->channels);
924 /* set base routines */
925 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
926 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
927 dma_dev->device_tx_status = mv_xor_status;
928 dma_dev->device_issue_pending = mv_xor_issue_pending;
929 dma_dev->device_control = mv_xor_control;
930 dma_dev->dev = &pdev->dev;
932 /* set prep routines based on capability */
933 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
934 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
935 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
936 dma_dev->max_xor = 8;
937 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
940 mv_chan->mmr_base = xordev->xor_base;
941 mv_chan->mmr_high_base = xordev->xor_high_base;
942 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
945 /* clear errors before enabling interrupts */
946 mv_xor_device_clear_err_status(mv_chan);
948 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
949 0, dev_name(&pdev->dev), mv_chan);
953 mv_chan_unmask_interrupts(mv_chan);
955 mv_set_mode(mv_chan, DMA_XOR);
957 spin_lock_init(&mv_chan->lock);
958 INIT_LIST_HEAD(&mv_chan->chain);
959 INIT_LIST_HEAD(&mv_chan->completed_slots);
960 INIT_LIST_HEAD(&mv_chan->all_slots);
961 mv_chan->dmachan.device = dma_dev;
962 dma_cookie_init(&mv_chan->dmachan);
964 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
966 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
967 ret = mv_xor_memcpy_self_test(mv_chan);
968 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
973 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
974 ret = mv_xor_xor_self_test(mv_chan);
975 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
980 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
981 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
982 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
983 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
985 dma_async_device_register(dma_dev);
989 free_irq(mv_chan->irq, mv_chan);
991 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
992 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
997 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
998 const struct mbus_dram_target_info *dram)
1000 void __iomem *base = xordev->xor_high_base;
1004 for (i = 0; i < 8; i++) {
1005 writel(0, base + WINDOW_BASE(i));
1006 writel(0, base + WINDOW_SIZE(i));
1008 writel(0, base + WINDOW_REMAP_HIGH(i));
1011 for (i = 0; i < dram->num_cs; i++) {
1012 const struct mbus_dram_window *cs = dram->cs + i;
1014 writel((cs->base & 0xffff0000) |
1015 (cs->mbus_attr << 8) |
1016 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1017 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1019 win_enable |= (1 << i);
1020 win_enable |= 3 << (16 + (2 * i));
1023 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1024 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1025 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1026 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1029 static int mv_xor_probe(struct platform_device *pdev)
1031 const struct mbus_dram_target_info *dram;
1032 struct mv_xor_device *xordev;
1033 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1034 struct resource *res;
1037 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1039 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1048 resource_size(res));
1049 if (!xordev->xor_base)
1052 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1056 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1057 resource_size(res));
1058 if (!xordev->xor_high_base)
1061 platform_set_drvdata(pdev, xordev);
1064 * (Re-)program MBUS remapping windows if we are asked to.
1066 dram = mv_mbus_dram_info();
1068 mv_xor_conf_mbus_windows(xordev, dram);
1070 /* Not all platforms can gate the clock, so it is not
1071 * an error if the clock does not exists.
1073 xordev->clk = clk_get(&pdev->dev, NULL);
1074 if (!IS_ERR(xordev->clk))
1075 clk_prepare_enable(xordev->clk);
1077 if (pdev->dev.of_node) {
1078 struct device_node *np;
1081 for_each_child_of_node(pdev->dev.of_node, np) {
1082 struct mv_xor_chan *chan;
1083 dma_cap_mask_t cap_mask;
1086 dma_cap_zero(cap_mask);
1087 if (of_property_read_bool(np, "dmacap,memcpy"))
1088 dma_cap_set(DMA_MEMCPY, cap_mask);
1089 if (of_property_read_bool(np, "dmacap,xor"))
1090 dma_cap_set(DMA_XOR, cap_mask);
1091 if (of_property_read_bool(np, "dmacap,interrupt"))
1092 dma_cap_set(DMA_INTERRUPT, cap_mask);
1094 irq = irq_of_parse_and_map(np, 0);
1097 goto err_channel_add;
1100 chan = mv_xor_channel_add(xordev, pdev, i,
1103 ret = PTR_ERR(chan);
1104 irq_dispose_mapping(irq);
1105 goto err_channel_add;
1108 xordev->channels[i] = chan;
1111 } else if (pdata && pdata->channels) {
1112 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1113 struct mv_xor_channel_data *cd;
1114 struct mv_xor_chan *chan;
1117 cd = &pdata->channels[i];
1120 goto err_channel_add;
1123 irq = platform_get_irq(pdev, i);
1126 goto err_channel_add;
1129 chan = mv_xor_channel_add(xordev, pdev, i,
1132 ret = PTR_ERR(chan);
1133 goto err_channel_add;
1136 xordev->channels[i] = chan;
1143 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1144 if (xordev->channels[i]) {
1145 mv_xor_channel_remove(xordev->channels[i]);
1146 if (pdev->dev.of_node)
1147 irq_dispose_mapping(xordev->channels[i]->irq);
1150 if (!IS_ERR(xordev->clk)) {
1151 clk_disable_unprepare(xordev->clk);
1152 clk_put(xordev->clk);
1158 static int mv_xor_remove(struct platform_device *pdev)
1160 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1163 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1164 if (xordev->channels[i])
1165 mv_xor_channel_remove(xordev->channels[i]);
1168 if (!IS_ERR(xordev->clk)) {
1169 clk_disable_unprepare(xordev->clk);
1170 clk_put(xordev->clk);
1177 static struct of_device_id mv_xor_dt_ids[] = {
1178 { .compatible = "marvell,orion-xor", },
1181 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1184 static struct platform_driver mv_xor_driver = {
1185 .probe = mv_xor_probe,
1186 .remove = mv_xor_remove,
1188 .owner = THIS_MODULE,
1189 .name = MV_XOR_NAME,
1190 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1195 static int __init mv_xor_init(void)
1197 return platform_driver_register(&mv_xor_driver);
1199 module_init(mv_xor_init);
1201 /* it's currently unsafe to unload this module */
1203 static void __exit mv_xor_exit(void)
1205 platform_driver_unregister(&mv_xor_driver);
1209 module_exit(mv_xor_exit);
1212 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1213 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1214 MODULE_LICENSE("GPL");