2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/memory.h>
33 #include <linux/ioport.h>
34 #include <linux/raid/pq.h>
35 #include <linux/slab.h>
37 #include <mach/adma.h>
39 #include "dmaengine.h"
41 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
42 #define to_iop_adma_device(dev) \
43 container_of(dev, struct iop_adma_device, common)
44 #define tx_to_iop_adma_slot(tx) \
45 container_of(tx, struct iop_adma_desc_slot, async_tx)
48 * iop_adma_free_slots - flags descriptor slots for reuse
50 * Caller must hold &iop_chan->lock while calling this function
52 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
54 int stride = slot->slots_per_op;
57 slot->slots_per_op = 0;
58 slot = list_entry(slot->slot_node.next,
59 struct iop_adma_desc_slot,
65 iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
67 struct dma_async_tx_descriptor *tx = &desc->async_tx;
68 struct iop_adma_desc_slot *unmap = desc->group_head;
69 struct device *dev = &iop_chan->device->pdev->dev;
70 u32 len = unmap->unmap_len;
71 enum dma_ctrl_flags flags = tx->flags;
76 src_cnt = unmap->unmap_src_cnt;
77 dest = iop_desc_get_dest_addr(unmap, iop_chan);
78 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
79 enum dma_data_direction dir;
81 if (src_cnt > 1) /* is xor? */
82 dir = DMA_BIDIRECTIONAL;
84 dir = DMA_FROM_DEVICE;
86 dma_unmap_page(dev, dest, len, dir);
89 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
91 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
94 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
97 desc->group_head = NULL;
101 iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
103 struct dma_async_tx_descriptor *tx = &desc->async_tx;
104 struct iop_adma_desc_slot *unmap = desc->group_head;
105 struct device *dev = &iop_chan->device->pdev->dev;
106 u32 len = unmap->unmap_len;
107 enum dma_ctrl_flags flags = tx->flags;
108 u32 src_cnt = unmap->unmap_src_cnt;
109 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
110 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
113 if (tx->flags & DMA_PREP_CONTINUE)
116 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
117 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
118 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
121 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
124 for (i = 0; i < src_cnt; i++) {
125 addr = iop_desc_get_src_addr(unmap, iop_chan, i);
126 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
128 if (desc->pq_check_result) {
129 dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
130 dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
134 desc->group_head = NULL;
139 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
140 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
142 struct dma_async_tx_descriptor *tx = &desc->async_tx;
144 BUG_ON(tx->cookie < 0);
145 if (tx->cookie > 0) {
149 /* call the callback (must not sleep or submit new
150 * operations to this channel)
153 tx->callback(tx->callback_param);
155 /* unmap dma addresses
156 * (unmap_single vs unmap_page?)
158 if (desc->group_head && desc->unmap_len) {
159 if (iop_desc_is_pq(desc))
160 iop_desc_unmap_pq(iop_chan, desc);
162 iop_desc_unmap(iop_chan, desc);
166 /* run dependent operations */
167 dma_run_dependencies(tx);
173 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
174 struct iop_adma_chan *iop_chan)
176 /* the client is allowed to attach dependent operations
179 if (!async_tx_test_ack(&desc->async_tx))
182 /* leave the last descriptor in the chain
183 * so we can append to it
185 if (desc->chain_node.next == &iop_chan->chain)
188 dev_dbg(iop_chan->device->common.dev,
189 "\tfree slot: %d slots_per_op: %d\n",
190 desc->idx, desc->slots_per_op);
192 list_del(&desc->chain_node);
193 iop_adma_free_slots(desc);
198 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
200 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
201 dma_cookie_t cookie = 0;
202 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
203 int busy = iop_chan_is_busy(iop_chan);
204 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
206 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
207 /* free completed slots from the chain starting with
208 * the oldest descriptor
210 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
212 pr_debug("\tcookie: %d slot: %d busy: %d "
213 "this_desc: %#x next_desc: %#x ack: %d\n",
214 iter->async_tx.cookie, iter->idx, busy,
215 iter->async_tx.phys, iop_desc_get_next_desc(iter),
216 async_tx_test_ack(&iter->async_tx));
218 prefetch(&_iter->async_tx);
220 /* do not advance past the current descriptor loaded into the
221 * hardware channel, subsequent descriptors are either in
222 * process or have not been submitted
227 /* stop the search if we reach the current descriptor and the
228 * channel is busy, or if it appears that the current descriptor
229 * needs to be re-read (i.e. has been appended to)
231 if (iter->async_tx.phys == current_desc) {
232 BUG_ON(seen_current++);
233 if (busy || iop_desc_get_next_desc(iter))
237 /* detect the start of a group transaction */
238 if (!slot_cnt && !slots_per_op) {
239 slot_cnt = iter->slot_cnt;
240 slots_per_op = iter->slots_per_op;
241 if (slot_cnt <= slots_per_op) {
248 pr_debug("\tgroup++\n");
251 slot_cnt -= slots_per_op;
254 /* all the members of a group are complete */
255 if (slots_per_op != 0 && slot_cnt == 0) {
256 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
257 int end_of_chain = 0;
258 pr_debug("\tgroup end\n");
260 /* collect the total results */
261 if (grp_start->xor_check_result) {
262 u32 zero_sum_result = 0;
263 slot_cnt = grp_start->slot_cnt;
264 grp_iter = grp_start;
266 list_for_each_entry_from(grp_iter,
267 &iop_chan->chain, chain_node) {
269 iop_desc_get_zero_result(grp_iter);
270 pr_debug("\titer%d result: %d\n",
271 grp_iter->idx, zero_sum_result);
272 slot_cnt -= slots_per_op;
276 pr_debug("\tgrp_start->xor_check_result: %p\n",
277 grp_start->xor_check_result);
278 *grp_start->xor_check_result = zero_sum_result;
281 /* clean up the group */
282 slot_cnt = grp_start->slot_cnt;
283 grp_iter = grp_start;
284 list_for_each_entry_safe_from(grp_iter, _grp_iter,
285 &iop_chan->chain, chain_node) {
286 cookie = iop_adma_run_tx_complete_actions(
287 grp_iter, iop_chan, cookie);
289 slot_cnt -= slots_per_op;
290 end_of_chain = iop_adma_clean_slot(grp_iter,
293 if (slot_cnt == 0 || end_of_chain)
297 /* the group should be complete at this point */
306 } else if (slots_per_op) /* wait for group completion */
309 /* write back zero sum results (single descriptor case) */
310 if (iter->xor_check_result && iter->async_tx.cookie)
311 *iter->xor_check_result =
312 iop_desc_get_zero_result(iter);
314 cookie = iop_adma_run_tx_complete_actions(
315 iter, iop_chan, cookie);
317 if (iop_adma_clean_slot(iter, iop_chan))
322 iop_chan->common.completed_cookie = cookie;
323 pr_debug("\tcompleted cookie %d\n", cookie);
328 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
330 spin_lock_bh(&iop_chan->lock);
331 __iop_adma_slot_cleanup(iop_chan);
332 spin_unlock_bh(&iop_chan->lock);
335 static void iop_adma_tasklet(unsigned long data)
337 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
339 /* lockdep will flag depedency submissions as potentially
340 * recursive locking, this is not the case as a dependency
341 * submission will never recurse a channels submit routine.
342 * There are checks in async_tx.c to prevent this.
344 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
345 __iop_adma_slot_cleanup(iop_chan);
346 spin_unlock(&iop_chan->lock);
349 static struct iop_adma_desc_slot *
350 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
353 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
355 int slots_found, retry = 0;
357 /* start search from the last allocated descrtiptor
358 * if a contiguous allocation can not be found start searching
359 * from the beginning of the list
364 iter = iop_chan->last_used;
366 iter = list_entry(&iop_chan->all_slots,
367 struct iop_adma_desc_slot,
370 list_for_each_entry_safe_continue(
371 iter, _iter, &iop_chan->all_slots, slot_node) {
373 prefetch(&_iter->async_tx);
374 if (iter->slots_per_op) {
375 /* give up after finding the first busy slot
376 * on the second pass through the list
385 /* start the allocation if the slot is correctly aligned */
386 if (!slots_found++) {
387 if (iop_desc_is_aligned(iter, slots_per_op))
395 if (slots_found == num_slots) {
396 struct iop_adma_desc_slot *alloc_tail = NULL;
397 struct iop_adma_desc_slot *last_used = NULL;
401 dev_dbg(iop_chan->device->common.dev,
402 "allocated slot: %d "
403 "(desc %p phys: %#x) slots_per_op %d\n",
404 iter->idx, iter->hw_desc,
405 iter->async_tx.phys, slots_per_op);
407 /* pre-ack all but the last descriptor */
408 if (num_slots != slots_per_op)
409 async_tx_ack(&iter->async_tx);
411 list_add_tail(&iter->chain_node, &chain);
413 iter->async_tx.cookie = 0;
414 iter->slot_cnt = num_slots;
415 iter->xor_check_result = NULL;
416 for (i = 0; i < slots_per_op; i++) {
417 iter->slots_per_op = slots_per_op - i;
419 iter = list_entry(iter->slot_node.next,
420 struct iop_adma_desc_slot,
423 num_slots -= slots_per_op;
425 alloc_tail->group_head = alloc_start;
426 alloc_tail->async_tx.cookie = -EBUSY;
427 list_splice(&chain, &alloc_tail->tx_list);
428 iop_chan->last_used = last_used;
429 iop_desc_clear_next_desc(alloc_start);
430 iop_desc_clear_next_desc(alloc_tail);
437 /* perform direct reclaim if the allocation fails */
438 __iop_adma_slot_cleanup(iop_chan);
444 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
445 struct iop_adma_desc_slot *desc)
447 dma_cookie_t cookie = iop_chan->common.cookie;
451 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
455 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
457 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
460 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
461 iop_chan->pending = 0;
462 iop_chan_append(iop_chan);
467 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
469 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
470 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
471 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
477 grp_start = sw_desc->group_head;
478 slot_cnt = grp_start->slot_cnt;
479 slots_per_op = grp_start->slots_per_op;
481 spin_lock_bh(&iop_chan->lock);
482 cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
484 old_chain_tail = list_entry(iop_chan->chain.prev,
485 struct iop_adma_desc_slot, chain_node);
486 list_splice_init(&sw_desc->tx_list,
487 &old_chain_tail->chain_node);
489 /* fix up the hardware chain */
490 next_dma = grp_start->async_tx.phys;
491 iop_desc_set_next_desc(old_chain_tail, next_dma);
492 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
494 /* check for pre-chained descriptors */
495 iop_paranoia(iop_desc_get_next_desc(sw_desc));
497 /* increment the pending count by the number of slots
498 * memcpy operations have a 1:1 (slot:operation) relation
499 * other operations are heavier and will pop the threshold
502 iop_chan->pending += slot_cnt;
503 iop_adma_check_threshold(iop_chan);
504 spin_unlock_bh(&iop_chan->lock);
506 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
507 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
512 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
513 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
516 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
517 * @chan - allocate descriptor resources for this channel
518 * @client - current client requesting the channel be ready for requests
520 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
521 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
522 * greater than 2x the number slots needed to satisfy a device->max_xor
525 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
529 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
530 struct iop_adma_desc_slot *slot = NULL;
531 int init = iop_chan->slots_allocated ? 0 : 1;
532 struct iop_adma_platform_data *plat_data =
533 iop_chan->device->pdev->dev.platform_data;
534 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
536 /* Allocate descriptor slots */
538 idx = iop_chan->slots_allocated;
539 if (idx == num_descs_in_pool)
542 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
544 printk(KERN_INFO "IOP ADMA Channel only initialized"
545 " %d descriptor slots", idx);
548 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
549 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
551 dma_async_tx_descriptor_init(&slot->async_tx, chan);
552 slot->async_tx.tx_submit = iop_adma_tx_submit;
553 INIT_LIST_HEAD(&slot->tx_list);
554 INIT_LIST_HEAD(&slot->chain_node);
555 INIT_LIST_HEAD(&slot->slot_node);
556 hw_desc = (char *) iop_chan->device->dma_desc_pool;
557 slot->async_tx.phys =
558 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
561 spin_lock_bh(&iop_chan->lock);
562 iop_chan->slots_allocated++;
563 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
564 spin_unlock_bh(&iop_chan->lock);
565 } while (iop_chan->slots_allocated < num_descs_in_pool);
567 if (idx && !iop_chan->last_used)
568 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
569 struct iop_adma_desc_slot,
572 dev_dbg(iop_chan->device->common.dev,
573 "allocated %d descriptor slots last_used: %p\n",
574 iop_chan->slots_allocated, iop_chan->last_used);
576 /* initialize the channel and the chain with a null operation */
578 if (dma_has_cap(DMA_MEMCPY,
579 iop_chan->device->common.cap_mask))
580 iop_chan_start_null_memcpy(iop_chan);
581 else if (dma_has_cap(DMA_XOR,
582 iop_chan->device->common.cap_mask))
583 iop_chan_start_null_xor(iop_chan);
588 return (idx > 0) ? idx : -ENOMEM;
591 static struct dma_async_tx_descriptor *
592 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
594 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
595 struct iop_adma_desc_slot *sw_desc, *grp_start;
596 int slot_cnt, slots_per_op;
598 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
600 spin_lock_bh(&iop_chan->lock);
601 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
602 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
604 grp_start = sw_desc->group_head;
605 iop_desc_init_interrupt(grp_start, iop_chan);
606 grp_start->unmap_len = 0;
607 sw_desc->async_tx.flags = flags;
609 spin_unlock_bh(&iop_chan->lock);
611 return sw_desc ? &sw_desc->async_tx : NULL;
614 static struct dma_async_tx_descriptor *
615 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
616 dma_addr_t dma_src, size_t len, unsigned long flags)
618 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
619 struct iop_adma_desc_slot *sw_desc, *grp_start;
620 int slot_cnt, slots_per_op;
624 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
626 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
629 spin_lock_bh(&iop_chan->lock);
630 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
631 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
633 grp_start = sw_desc->group_head;
634 iop_desc_init_memcpy(grp_start, flags);
635 iop_desc_set_byte_count(grp_start, iop_chan, len);
636 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
637 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
638 sw_desc->unmap_src_cnt = 1;
639 sw_desc->unmap_len = len;
640 sw_desc->async_tx.flags = flags;
642 spin_unlock_bh(&iop_chan->lock);
644 return sw_desc ? &sw_desc->async_tx : NULL;
647 static struct dma_async_tx_descriptor *
648 iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
649 int value, size_t len, unsigned long flags)
651 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
652 struct iop_adma_desc_slot *sw_desc, *grp_start;
653 int slot_cnt, slots_per_op;
657 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
659 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
662 spin_lock_bh(&iop_chan->lock);
663 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
664 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
666 grp_start = sw_desc->group_head;
667 iop_desc_init_memset(grp_start, flags);
668 iop_desc_set_byte_count(grp_start, iop_chan, len);
669 iop_desc_set_block_fill_val(grp_start, value);
670 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
671 sw_desc->unmap_src_cnt = 1;
672 sw_desc->unmap_len = len;
673 sw_desc->async_tx.flags = flags;
675 spin_unlock_bh(&iop_chan->lock);
677 return sw_desc ? &sw_desc->async_tx : NULL;
680 static struct dma_async_tx_descriptor *
681 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
682 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
685 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
686 struct iop_adma_desc_slot *sw_desc, *grp_start;
687 int slot_cnt, slots_per_op;
691 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
693 dev_dbg(iop_chan->device->common.dev,
694 "%s src_cnt: %d len: %u flags: %lx\n",
695 __func__, src_cnt, len, flags);
697 spin_lock_bh(&iop_chan->lock);
698 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
699 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
701 grp_start = sw_desc->group_head;
702 iop_desc_init_xor(grp_start, src_cnt, flags);
703 iop_desc_set_byte_count(grp_start, iop_chan, len);
704 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
705 sw_desc->unmap_src_cnt = src_cnt;
706 sw_desc->unmap_len = len;
707 sw_desc->async_tx.flags = flags;
709 iop_desc_set_xor_src_addr(grp_start, src_cnt,
712 spin_unlock_bh(&iop_chan->lock);
714 return sw_desc ? &sw_desc->async_tx : NULL;
717 static struct dma_async_tx_descriptor *
718 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
719 unsigned int src_cnt, size_t len, u32 *result,
722 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
723 struct iop_adma_desc_slot *sw_desc, *grp_start;
724 int slot_cnt, slots_per_op;
729 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
730 __func__, src_cnt, len);
732 spin_lock_bh(&iop_chan->lock);
733 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
734 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
736 grp_start = sw_desc->group_head;
737 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
738 iop_desc_set_zero_sum_byte_count(grp_start, len);
739 grp_start->xor_check_result = result;
740 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
741 __func__, grp_start->xor_check_result);
742 sw_desc->unmap_src_cnt = src_cnt;
743 sw_desc->unmap_len = len;
744 sw_desc->async_tx.flags = flags;
746 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
749 spin_unlock_bh(&iop_chan->lock);
751 return sw_desc ? &sw_desc->async_tx : NULL;
754 static struct dma_async_tx_descriptor *
755 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
756 unsigned int src_cnt, const unsigned char *scf, size_t len,
759 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
760 struct iop_adma_desc_slot *sw_desc, *g;
761 int slot_cnt, slots_per_op;
766 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
768 dev_dbg(iop_chan->device->common.dev,
769 "%s src_cnt: %d len: %u flags: %lx\n",
770 __func__, src_cnt, len, flags);
772 if (dmaf_p_disabled_continue(flags))
773 continue_srcs = 1+src_cnt;
774 else if (dmaf_continue(flags))
775 continue_srcs = 3+src_cnt;
777 continue_srcs = 0+src_cnt;
779 spin_lock_bh(&iop_chan->lock);
780 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
781 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
785 g = sw_desc->group_head;
786 iop_desc_set_byte_count(g, iop_chan, len);
788 /* even if P is disabled its destination address (bits
789 * [3:0]) must match Q. It is ok if P points to an
790 * invalid address, it won't be written.
792 if (flags & DMA_PREP_PQ_DISABLE_P)
793 dst[0] = dst[1] & 0x7;
795 iop_desc_set_pq_addr(g, dst);
796 sw_desc->unmap_src_cnt = src_cnt;
797 sw_desc->unmap_len = len;
798 sw_desc->async_tx.flags = flags;
799 for (i = 0; i < src_cnt; i++)
800 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
802 /* if we are continuing a previous operation factor in
803 * the old p and q values, see the comment for dma_maxpq
804 * in include/linux/dmaengine.h
806 if (dmaf_p_disabled_continue(flags))
807 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
808 else if (dmaf_continue(flags)) {
809 iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
810 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
811 iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
813 iop_desc_init_pq(g, i, flags);
815 spin_unlock_bh(&iop_chan->lock);
817 return sw_desc ? &sw_desc->async_tx : NULL;
820 static struct dma_async_tx_descriptor *
821 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
822 unsigned int src_cnt, const unsigned char *scf,
823 size_t len, enum sum_check_flags *pqres,
826 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
827 struct iop_adma_desc_slot *sw_desc, *g;
828 int slot_cnt, slots_per_op;
832 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
834 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
835 __func__, src_cnt, len);
837 spin_lock_bh(&iop_chan->lock);
838 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
839 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
841 /* for validate operations p and q are tagged onto the
842 * end of the source list
844 int pq_idx = src_cnt;
846 g = sw_desc->group_head;
847 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
848 iop_desc_set_pq_zero_sum_byte_count(g, len);
849 g->pq_check_result = pqres;
850 pr_debug("\t%s: g->pq_check_result: %p\n",
851 __func__, g->pq_check_result);
852 sw_desc->unmap_src_cnt = src_cnt+2;
853 sw_desc->unmap_len = len;
854 sw_desc->async_tx.flags = flags;
856 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
859 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
861 spin_unlock_bh(&iop_chan->lock);
863 return sw_desc ? &sw_desc->async_tx : NULL;
866 static void iop_adma_free_chan_resources(struct dma_chan *chan)
868 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
869 struct iop_adma_desc_slot *iter, *_iter;
870 int in_use_descs = 0;
872 iop_adma_slot_cleanup(iop_chan);
874 spin_lock_bh(&iop_chan->lock);
875 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
878 list_del(&iter->chain_node);
880 list_for_each_entry_safe_reverse(
881 iter, _iter, &iop_chan->all_slots, slot_node) {
882 list_del(&iter->slot_node);
884 iop_chan->slots_allocated--;
886 iop_chan->last_used = NULL;
888 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
889 __func__, iop_chan->slots_allocated);
890 spin_unlock_bh(&iop_chan->lock);
892 /* one is ok since we left it on there on purpose */
893 if (in_use_descs > 1)
894 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
899 * iop_adma_status - poll the status of an ADMA transaction
900 * @chan: ADMA channel handle
901 * @cookie: ADMA transaction identifier
902 * @txstate: a holder for the current state of the channel or NULL
904 static enum dma_status iop_adma_status(struct dma_chan *chan,
906 struct dma_tx_state *txstate)
908 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
909 dma_cookie_t last_used;
910 dma_cookie_t last_complete;
913 last_used = chan->cookie;
914 last_complete = chan->completed_cookie;
915 dma_set_tx_state(txstate, last_complete, last_used, 0);
916 ret = dma_async_is_complete(cookie, last_complete, last_used);
917 if (ret == DMA_SUCCESS)
920 iop_adma_slot_cleanup(iop_chan);
922 last_used = chan->cookie;
923 last_complete = chan->completed_cookie;
924 dma_set_tx_state(txstate, last_complete, last_used, 0);
926 return dma_async_is_complete(cookie, last_complete, last_used);
929 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
931 struct iop_adma_chan *chan = data;
933 dev_dbg(chan->device->common.dev, "%s\n", __func__);
935 tasklet_schedule(&chan->irq_tasklet);
937 iop_adma_device_clear_eot_status(chan);
942 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
944 struct iop_adma_chan *chan = data;
946 dev_dbg(chan->device->common.dev, "%s\n", __func__);
948 tasklet_schedule(&chan->irq_tasklet);
950 iop_adma_device_clear_eoc_status(chan);
955 static irqreturn_t iop_adma_err_handler(int irq, void *data)
957 struct iop_adma_chan *chan = data;
958 unsigned long status = iop_chan_get_status(chan);
960 dev_printk(KERN_ERR, chan->device->common.dev,
961 "error ( %s%s%s%s%s%s%s)\n",
962 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
963 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
964 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
965 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
966 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
967 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
968 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
970 iop_adma_device_clear_err_status(chan);
977 static void iop_adma_issue_pending(struct dma_chan *chan)
979 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
981 if (iop_chan->pending) {
982 iop_chan->pending = 0;
983 iop_chan_append(iop_chan);
988 * Perform a transaction to verify the HW works.
990 #define IOP_ADMA_TEST_SIZE 2000
992 static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
996 dma_addr_t src_dma, dest_dma;
997 struct dma_chan *dma_chan;
999 struct dma_async_tx_descriptor *tx;
1001 struct iop_adma_chan *iop_chan;
1003 dev_dbg(device->common.dev, "%s\n", __func__);
1005 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1008 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1014 /* Fill in src buffer */
1015 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
1016 ((u8 *) src)[i] = (u8)i;
1018 /* Start copy, using first DMA channel */
1019 dma_chan = container_of(device->common.channels.next,
1022 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1027 dest_dma = dma_map_single(dma_chan->device->dev, dest,
1028 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1029 src_dma = dma_map_single(dma_chan->device->dev, src,
1030 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
1031 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
1033 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1035 cookie = iop_adma_tx_submit(tx);
1036 iop_adma_issue_pending(dma_chan);
1039 if (iop_adma_status(dma_chan, cookie, NULL) !=
1041 dev_printk(KERN_ERR, dma_chan->device->dev,
1042 "Self-test copy timed out, disabling\n");
1044 goto free_resources;
1047 iop_chan = to_iop_adma_chan(dma_chan);
1048 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1049 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1050 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
1051 dev_printk(KERN_ERR, dma_chan->device->dev,
1052 "Self-test copy failed compare, disabling\n");
1054 goto free_resources;
1058 iop_adma_free_chan_resources(dma_chan);
1065 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
1066 static int __devinit
1067 iop_adma_xor_val_self_test(struct iop_adma_device *device)
1071 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
1072 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1073 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1074 dma_addr_t dma_addr, dest_dma;
1075 struct dma_async_tx_descriptor *tx;
1076 struct dma_chan *dma_chan;
1077 dma_cookie_t cookie;
1080 u32 zero_sum_result;
1082 struct iop_adma_chan *iop_chan;
1084 dev_dbg(device->common.dev, "%s\n", __func__);
1086 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1087 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1088 if (!xor_srcs[src_idx]) {
1090 __free_page(xor_srcs[src_idx]);
1095 dest = alloc_page(GFP_KERNEL);
1098 __free_page(xor_srcs[src_idx]);
1102 /* Fill in src buffers */
1103 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1104 u8 *ptr = page_address(xor_srcs[src_idx]);
1105 for (i = 0; i < PAGE_SIZE; i++)
1106 ptr[i] = (1 << src_idx);
1109 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
1110 cmp_byte ^= (u8) (1 << src_idx);
1112 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1113 (cmp_byte << 8) | cmp_byte;
1115 memset(page_address(dest), 0, PAGE_SIZE);
1117 dma_chan = container_of(device->common.channels.next,
1120 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1126 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
1127 PAGE_SIZE, DMA_FROM_DEVICE);
1128 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1129 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1130 0, PAGE_SIZE, DMA_TO_DEVICE);
1131 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1132 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
1133 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1135 cookie = iop_adma_tx_submit(tx);
1136 iop_adma_issue_pending(dma_chan);
1139 if (iop_adma_status(dma_chan, cookie, NULL) !=
1141 dev_printk(KERN_ERR, dma_chan->device->dev,
1142 "Self-test xor timed out, disabling\n");
1144 goto free_resources;
1147 iop_chan = to_iop_adma_chan(dma_chan);
1148 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1149 PAGE_SIZE, DMA_FROM_DEVICE);
1150 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1151 u32 *ptr = page_address(dest);
1152 if (ptr[i] != cmp_word) {
1153 dev_printk(KERN_ERR, dma_chan->device->dev,
1154 "Self-test xor failed compare, disabling\n");
1156 goto free_resources;
1159 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1160 PAGE_SIZE, DMA_TO_DEVICE);
1162 /* skip zero sum if the capability is not present */
1163 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1164 goto free_resources;
1166 /* zero sum the sources with the destintation page */
1167 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1168 zero_sum_srcs[i] = xor_srcs[i];
1169 zero_sum_srcs[i] = dest;
1171 zero_sum_result = 1;
1173 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1174 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1175 zero_sum_srcs[i], 0, PAGE_SIZE,
1177 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1178 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1180 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1182 cookie = iop_adma_tx_submit(tx);
1183 iop_adma_issue_pending(dma_chan);
1186 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1187 dev_printk(KERN_ERR, dma_chan->device->dev,
1188 "Self-test zero sum timed out, disabling\n");
1190 goto free_resources;
1193 if (zero_sum_result != 0) {
1194 dev_printk(KERN_ERR, dma_chan->device->dev,
1195 "Self-test zero sum failed compare, disabling\n");
1197 goto free_resources;
1201 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1202 PAGE_SIZE, DMA_FROM_DEVICE);
1203 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1204 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1206 cookie = iop_adma_tx_submit(tx);
1207 iop_adma_issue_pending(dma_chan);
1210 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1211 dev_printk(KERN_ERR, dma_chan->device->dev,
1212 "Self-test memset timed out, disabling\n");
1214 goto free_resources;
1217 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1218 u32 *ptr = page_address(dest);
1220 dev_printk(KERN_ERR, dma_chan->device->dev,
1221 "Self-test memset failed compare, disabling\n");
1223 goto free_resources;
1227 /* test for non-zero parity sum */
1228 zero_sum_result = 0;
1229 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1230 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1231 zero_sum_srcs[i], 0, PAGE_SIZE,
1233 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1234 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1236 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1238 cookie = iop_adma_tx_submit(tx);
1239 iop_adma_issue_pending(dma_chan);
1242 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1243 dev_printk(KERN_ERR, dma_chan->device->dev,
1244 "Self-test non-zero sum timed out, disabling\n");
1246 goto free_resources;
1249 if (zero_sum_result != 1) {
1250 dev_printk(KERN_ERR, dma_chan->device->dev,
1251 "Self-test non-zero sum failed compare, disabling\n");
1253 goto free_resources;
1257 iop_adma_free_chan_resources(dma_chan);
1259 src_idx = IOP_ADMA_NUM_SRC_TEST;
1261 __free_page(xor_srcs[src_idx]);
1266 #ifdef CONFIG_RAID6_PQ
1267 static int __devinit
1268 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1270 /* combined sources, software pq results, and extra hw pq results */
1271 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1272 /* ptr to the extra hw pq buffers defined above */
1273 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1274 /* address conversion buffers (dma_map / page_address) */
1275 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1276 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
1277 dma_addr_t pq_dest[2];
1280 struct dma_async_tx_descriptor *tx;
1281 struct dma_chan *dma_chan;
1282 dma_cookie_t cookie;
1283 u32 zero_sum_result;
1287 dev_dbg(device->common.dev, "%s\n", __func__);
1289 for (i = 0; i < ARRAY_SIZE(pq); i++) {
1290 pq[i] = alloc_page(GFP_KERNEL);
1298 /* Fill in src buffers */
1299 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1300 pq_sw[i] = page_address(pq[i]);
1301 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1303 pq_sw[i] = page_address(pq[i]);
1304 pq_sw[i+1] = page_address(pq[i+1]);
1306 dma_chan = container_of(device->common.channels.next,
1309 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1314 dev = dma_chan->device->dev;
1316 /* initialize the dests */
1317 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1318 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1321 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1322 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1323 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1324 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1327 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1328 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1330 DMA_PREP_INTERRUPT |
1333 cookie = iop_adma_tx_submit(tx);
1334 iop_adma_issue_pending(dma_chan);
1337 if (iop_adma_status(dma_chan, cookie, NULL) !=
1339 dev_err(dev, "Self-test pq timed out, disabling\n");
1341 goto free_resources;
1344 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1346 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1347 page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1348 dev_err(dev, "Self-test p failed compare, disabling\n");
1350 goto free_resources;
1352 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1353 page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1354 dev_err(dev, "Self-test q failed compare, disabling\n");
1356 goto free_resources;
1359 /* test correct zero sum using the software generated pq values */
1360 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1361 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1364 zero_sum_result = ~0;
1365 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1366 pq_src, IOP_ADMA_NUM_SRC_TEST,
1367 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1368 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1370 cookie = iop_adma_tx_submit(tx);
1371 iop_adma_issue_pending(dma_chan);
1374 if (iop_adma_status(dma_chan, cookie, NULL) !=
1376 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1378 goto free_resources;
1381 if (zero_sum_result != 0) {
1382 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1385 goto free_resources;
1388 /* test incorrect zero sum */
1389 i = IOP_ADMA_NUM_SRC_TEST;
1390 memset(pq_sw[i] + 100, 0, 100);
1391 memset(pq_sw[i+1] + 200, 0, 200);
1392 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1393 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1396 zero_sum_result = 0;
1397 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1398 pq_src, IOP_ADMA_NUM_SRC_TEST,
1399 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1400 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1402 cookie = iop_adma_tx_submit(tx);
1403 iop_adma_issue_pending(dma_chan);
1406 if (iop_adma_status(dma_chan, cookie, NULL) !=
1408 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1410 goto free_resources;
1413 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1414 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1417 goto free_resources;
1421 iop_adma_free_chan_resources(dma_chan);
1430 static int __devexit iop_adma_remove(struct platform_device *dev)
1432 struct iop_adma_device *device = platform_get_drvdata(dev);
1433 struct dma_chan *chan, *_chan;
1434 struct iop_adma_chan *iop_chan;
1435 struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1437 dma_async_device_unregister(&device->common);
1439 dma_free_coherent(&dev->dev, plat_data->pool_size,
1440 device->dma_desc_pool_virt, device->dma_desc_pool);
1442 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1444 iop_chan = to_iop_adma_chan(chan);
1445 list_del(&chan->device_node);
1453 static int __devinit iop_adma_probe(struct platform_device *pdev)
1455 struct resource *res;
1457 struct iop_adma_device *adev;
1458 struct iop_adma_chan *iop_chan;
1459 struct dma_device *dma_dev;
1460 struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1466 if (!devm_request_mem_region(&pdev->dev, res->start,
1467 resource_size(res), pdev->name))
1470 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1473 dma_dev = &adev->common;
1475 /* allocate coherent memory for hardware descriptors
1476 * note: writecombine gives slightly better performance, but
1477 * requires that we explicitly flush the writes
1479 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1480 plat_data->pool_size,
1481 &adev->dma_desc_pool,
1482 GFP_KERNEL)) == NULL) {
1487 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1488 __func__, adev->dma_desc_pool_virt,
1489 (void *) adev->dma_desc_pool);
1491 adev->id = plat_data->hw_id;
1493 /* discover transaction capabilites from the platform data */
1494 dma_dev->cap_mask = plat_data->cap_mask;
1497 platform_set_drvdata(pdev, adev);
1499 INIT_LIST_HEAD(&dma_dev->channels);
1501 /* set base routines */
1502 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1503 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1504 dma_dev->device_tx_status = iop_adma_status;
1505 dma_dev->device_issue_pending = iop_adma_issue_pending;
1506 dma_dev->dev = &pdev->dev;
1508 /* set prep routines based on capability */
1509 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1510 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1511 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1512 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1513 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1514 dma_dev->max_xor = iop_adma_get_max_xor();
1515 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1517 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1518 dma_dev->device_prep_dma_xor_val =
1519 iop_adma_prep_dma_xor_val;
1520 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1521 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1522 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1524 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1525 dma_dev->device_prep_dma_pq_val =
1526 iop_adma_prep_dma_pq_val;
1527 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1528 dma_dev->device_prep_dma_interrupt =
1529 iop_adma_prep_dma_interrupt;
1531 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1536 iop_chan->device = adev;
1538 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1539 resource_size(res));
1540 if (!iop_chan->mmr_base) {
1542 goto err_free_iop_chan;
1544 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1547 /* clear errors before enabling interrupts */
1548 iop_adma_device_clear_err_status(iop_chan);
1550 for (i = 0; i < 3; i++) {
1551 irq_handler_t handler[] = { iop_adma_eot_handler,
1552 iop_adma_eoc_handler,
1553 iop_adma_err_handler };
1554 int irq = platform_get_irq(pdev, i);
1557 goto err_free_iop_chan;
1559 ret = devm_request_irq(&pdev->dev, irq,
1560 handler[i], 0, pdev->name, iop_chan);
1562 goto err_free_iop_chan;
1566 spin_lock_init(&iop_chan->lock);
1567 INIT_LIST_HEAD(&iop_chan->chain);
1568 INIT_LIST_HEAD(&iop_chan->all_slots);
1569 iop_chan->common.device = dma_dev;
1570 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1572 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1573 ret = iop_adma_memcpy_self_test(adev);
1574 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1576 goto err_free_iop_chan;
1579 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1580 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1581 ret = iop_adma_xor_val_self_test(adev);
1582 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1584 goto err_free_iop_chan;
1587 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1588 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1589 #ifdef CONFIG_RAID6_PQ
1590 ret = iop_adma_pq_zero_sum_self_test(adev);
1591 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1593 /* can not test raid6, so do not publish capability */
1594 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1595 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1599 goto err_free_iop_chan;
1602 dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1603 "( %s%s%s%s%s%s%s)\n",
1604 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1605 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1606 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1607 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1608 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1609 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1610 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1612 dma_async_device_register(dma_dev);
1618 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1619 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1626 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1628 struct iop_adma_desc_slot *sw_desc, *grp_start;
1629 dma_cookie_t cookie;
1630 int slot_cnt, slots_per_op;
1632 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1634 spin_lock_bh(&iop_chan->lock);
1635 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1636 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1638 grp_start = sw_desc->group_head;
1640 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1641 async_tx_ack(&sw_desc->async_tx);
1642 iop_desc_init_memcpy(grp_start, 0);
1643 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1644 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1645 iop_desc_set_memcpy_src_addr(grp_start, 0);
1647 cookie = iop_chan->common.cookie;
1652 /* initialize the completed cookie to be less than
1653 * the most recently used cookie
1655 iop_chan->common.completed_cookie = cookie - 1;
1656 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1658 /* channel should not be busy */
1659 BUG_ON(iop_chan_is_busy(iop_chan));
1661 /* clear any prior error-status bits */
1662 iop_adma_device_clear_err_status(iop_chan);
1664 /* disable operation */
1665 iop_chan_disable(iop_chan);
1667 /* set the descriptor address */
1668 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1670 /* 1/ don't add pre-chained descriptors
1671 * 2/ dummy read to flush next_desc write
1673 BUG_ON(iop_desc_get_next_desc(sw_desc));
1675 /* run the descriptor */
1676 iop_chan_enable(iop_chan);
1678 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1679 "failed to allocate null descriptor\n");
1680 spin_unlock_bh(&iop_chan->lock);
1683 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1685 struct iop_adma_desc_slot *sw_desc, *grp_start;
1686 dma_cookie_t cookie;
1687 int slot_cnt, slots_per_op;
1689 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1691 spin_lock_bh(&iop_chan->lock);
1692 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1693 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1695 grp_start = sw_desc->group_head;
1696 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1697 async_tx_ack(&sw_desc->async_tx);
1698 iop_desc_init_null_xor(grp_start, 2, 0);
1699 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1700 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1701 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1702 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1704 cookie = iop_chan->common.cookie;
1709 /* initialize the completed cookie to be less than
1710 * the most recently used cookie
1712 iop_chan->common.completed_cookie = cookie - 1;
1713 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1715 /* channel should not be busy */
1716 BUG_ON(iop_chan_is_busy(iop_chan));
1718 /* clear any prior error-status bits */
1719 iop_adma_device_clear_err_status(iop_chan);
1721 /* disable operation */
1722 iop_chan_disable(iop_chan);
1724 /* set the descriptor address */
1725 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1727 /* 1/ don't add pre-chained descriptors
1728 * 2/ dummy read to flush next_desc write
1730 BUG_ON(iop_desc_get_next_desc(sw_desc));
1732 /* run the descriptor */
1733 iop_chan_enable(iop_chan);
1735 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1736 "failed to allocate null descriptor\n");
1737 spin_unlock_bh(&iop_chan->lock);
1740 static struct platform_driver iop_adma_driver = {
1741 .probe = iop_adma_probe,
1742 .remove = __devexit_p(iop_adma_remove),
1744 .owner = THIS_MODULE,
1749 module_platform_driver(iop_adma_driver);
1751 MODULE_AUTHOR("Intel Corporation");
1752 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1753 MODULE_LICENSE("GPL");
1754 MODULE_ALIAS("platform:iop-adma");