]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/mv_xor.c
Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge
[karo-tx-linux.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
29 #include <linux/of.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
33
34 #include "dmaengine.h"
35 #include "mv_xor.h"
36
37 static void mv_xor_issue_pending(struct dma_chan *chan);
38
39 #define to_mv_xor_chan(chan)            \
40         container_of(chan, struct mv_xor_chan, dmachan)
41
42 #define to_mv_xor_slot(tx)              \
43         container_of(tx, struct mv_xor_desc_slot, async_tx)
44
45 #define mv_chan_to_devp(chan)           \
46         ((chan)->dmadev.dev)
47
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49 {
50         struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52         hw_desc->status = (1 << 31);
53         hw_desc->phy_next_desc = 0;
54         hw_desc->desc_command = (1 << 31);
55 }
56
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58 {
59         struct mv_xor_desc *hw_desc = desc->hw_desc;
60         return hw_desc->phy_dest_addr;
61 }
62
63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64                                 int src_idx)
65 {
66         struct mv_xor_desc *hw_desc = desc->hw_desc;
67         return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
68 }
69
70
71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72                                    u32 byte_count)
73 {
74         struct mv_xor_desc *hw_desc = desc->hw_desc;
75         hw_desc->byte_count = byte_count;
76 }
77
78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79                                   u32 next_desc_addr)
80 {
81         struct mv_xor_desc *hw_desc = desc->hw_desc;
82         BUG_ON(hw_desc->phy_next_desc);
83         hw_desc->phy_next_desc = next_desc_addr;
84 }
85
86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
87 {
88         struct mv_xor_desc *hw_desc = desc->hw_desc;
89         hw_desc->phy_next_desc = 0;
90 }
91
92 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
93                                   dma_addr_t addr)
94 {
95         struct mv_xor_desc *hw_desc = desc->hw_desc;
96         hw_desc->phy_dest_addr = addr;
97 }
98
99 static int mv_chan_memset_slot_count(size_t len)
100 {
101         return 1;
102 }
103
104 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105
106 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107                                  int index, dma_addr_t addr)
108 {
109         struct mv_xor_desc *hw_desc = desc->hw_desc;
110         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
111         if (desc->type == DMA_XOR)
112                 hw_desc->desc_command |= (1 << index);
113 }
114
115 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116 {
117         return readl_relaxed(XOR_CURR_DESC(chan));
118 }
119
120 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121                                         u32 next_desc_addr)
122 {
123         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
124 }
125
126 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127 {
128         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
129         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
130         writel_relaxed(val, XOR_INTR_MASK(chan));
131 }
132
133 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134 {
135         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
136         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137         return intr_cause;
138 }
139
140 static int mv_is_err_intr(u32 intr_cause)
141 {
142         if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
143                 return 1;
144
145         return 0;
146 }
147
148 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149 {
150         u32 val = ~(1 << (chan->idx * 16));
151         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
152         writel_relaxed(val, XOR_INTR_CAUSE(chan));
153 }
154
155 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156 {
157         u32 val = 0xFFFF0000 >> (chan->idx * 16);
158         writel_relaxed(val, XOR_INTR_CAUSE(chan));
159 }
160
161 static int mv_can_chain(struct mv_xor_desc_slot *desc)
162 {
163         struct mv_xor_desc_slot *chain_old_tail = list_entry(
164                 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
165
166         if (chain_old_tail->type != desc->type)
167                 return 0;
168
169         return 1;
170 }
171
172 static void mv_set_mode(struct mv_xor_chan *chan,
173                                enum dma_transaction_type type)
174 {
175         u32 op_mode;
176         u32 config = readl_relaxed(XOR_CONFIG(chan));
177
178         switch (type) {
179         case DMA_XOR:
180                 op_mode = XOR_OPERATION_MODE_XOR;
181                 break;
182         case DMA_MEMCPY:
183                 op_mode = XOR_OPERATION_MODE_MEMCPY;
184                 break;
185         default:
186                 dev_err(mv_chan_to_devp(chan),
187                         "error: unsupported operation %d\n",
188                         type);
189                 BUG();
190                 return;
191         }
192
193         config &= ~0x7;
194         config |= op_mode;
195
196 #if defined(__BIG_ENDIAN)
197         config |= XOR_DESCRIPTOR_SWAP;
198 #else
199         config &= ~XOR_DESCRIPTOR_SWAP;
200 #endif
201
202         writel_relaxed(config, XOR_CONFIG(chan));
203         chan->current_type = type;
204 }
205
206 static void mv_chan_activate(struct mv_xor_chan *chan)
207 {
208         u32 activation;
209
210         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
211         activation = readl_relaxed(XOR_ACTIVATION(chan));
212         activation |= 0x1;
213         writel_relaxed(activation, XOR_ACTIVATION(chan));
214 }
215
216 static char mv_chan_is_busy(struct mv_xor_chan *chan)
217 {
218         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
219
220         state = (state >> 4) & 0x3;
221
222         return (state == 1) ? 1 : 0;
223 }
224
225 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
226 {
227         return 1;
228 }
229
230 /**
231  * mv_xor_free_slots - flags descriptor slots for reuse
232  * @slot: Slot to free
233  * Caller must hold &mv_chan->lock while calling this function
234  */
235 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236                               struct mv_xor_desc_slot *slot)
237 {
238         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
239                 __func__, __LINE__, slot);
240
241         slot->slots_per_op = 0;
242
243 }
244
245 /*
246  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
247  * sw_desc
248  * Caller must hold &mv_chan->lock while calling this function
249  */
250 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251                                    struct mv_xor_desc_slot *sw_desc)
252 {
253         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
254                 __func__, __LINE__, sw_desc);
255         if (sw_desc->type != mv_chan->current_type)
256                 mv_set_mode(mv_chan, sw_desc->type);
257
258         /* set the hardware chain */
259         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
260
261         mv_chan->pending += sw_desc->slot_cnt;
262         mv_xor_issue_pending(&mv_chan->dmachan);
263 }
264
265 static dma_cookie_t
266 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267         struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
268 {
269         BUG_ON(desc->async_tx.cookie < 0);
270
271         if (desc->async_tx.cookie > 0) {
272                 cookie = desc->async_tx.cookie;
273
274                 /* call the callback (must not sleep or submit new
275                  * operations to this channel)
276                  */
277                 if (desc->async_tx.callback)
278                         desc->async_tx.callback(
279                                 desc->async_tx.callback_param);
280
281                 /* unmap dma addresses
282                  * (unmap_single vs unmap_page?)
283                  */
284                 if (desc->group_head && desc->unmap_len) {
285                         struct mv_xor_desc_slot *unmap = desc->group_head;
286                         struct device *dev = mv_chan_to_devp(mv_chan);
287                         u32 len = unmap->unmap_len;
288                         enum dma_ctrl_flags flags = desc->async_tx.flags;
289                         u32 src_cnt;
290                         dma_addr_t addr;
291                         dma_addr_t dest;
292
293                         src_cnt = unmap->unmap_src_cnt;
294                         dest = mv_desc_get_dest_addr(unmap);
295                         if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
296                                 enum dma_data_direction dir;
297
298                                 if (src_cnt > 1) /* is xor ? */
299                                         dir = DMA_BIDIRECTIONAL;
300                                 else
301                                         dir = DMA_FROM_DEVICE;
302                                 dma_unmap_page(dev, dest, len, dir);
303                         }
304
305                         if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
306                                 while (src_cnt--) {
307                                         addr = mv_desc_get_src_addr(unmap,
308                                                                     src_cnt);
309                                         if (addr == dest)
310                                                 continue;
311                                         dma_unmap_page(dev, addr, len,
312                                                        DMA_TO_DEVICE);
313                                 }
314                         }
315                         desc->group_head = NULL;
316                 }
317         }
318
319         /* run dependent operations */
320         dma_run_dependencies(&desc->async_tx);
321
322         return cookie;
323 }
324
325 static int
326 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
327 {
328         struct mv_xor_desc_slot *iter, *_iter;
329
330         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
331         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
332                                  completed_node) {
333
334                 if (async_tx_test_ack(&iter->async_tx)) {
335                         list_del(&iter->completed_node);
336                         mv_xor_free_slots(mv_chan, iter);
337                 }
338         }
339         return 0;
340 }
341
342 static int
343 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
344         struct mv_xor_chan *mv_chan)
345 {
346         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
347                 __func__, __LINE__, desc, desc->async_tx.flags);
348         list_del(&desc->chain_node);
349         /* the client is allowed to attach dependent operations
350          * until 'ack' is set
351          */
352         if (!async_tx_test_ack(&desc->async_tx)) {
353                 /* move this slot to the completed_slots */
354                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
355                 return 0;
356         }
357
358         mv_xor_free_slots(mv_chan, desc);
359         return 0;
360 }
361
362 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
363 {
364         struct mv_xor_desc_slot *iter, *_iter;
365         dma_cookie_t cookie = 0;
366         int busy = mv_chan_is_busy(mv_chan);
367         u32 current_desc = mv_chan_get_current_desc(mv_chan);
368         int seen_current = 0;
369
370         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
371         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
372         mv_xor_clean_completed_slots(mv_chan);
373
374         /* free completed slots from the chain starting with
375          * the oldest descriptor
376          */
377
378         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
379                                         chain_node) {
380                 prefetch(_iter);
381                 prefetch(&_iter->async_tx);
382
383                 /* do not advance past the current descriptor loaded into the
384                  * hardware channel, subsequent descriptors are either in
385                  * process or have not been submitted
386                  */
387                 if (seen_current)
388                         break;
389
390                 /* stop the search if we reach the current descriptor and the
391                  * channel is busy
392                  */
393                 if (iter->async_tx.phys == current_desc) {
394                         seen_current = 1;
395                         if (busy)
396                                 break;
397                 }
398
399                 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
400
401                 if (mv_xor_clean_slot(iter, mv_chan))
402                         break;
403         }
404
405         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
406                 struct mv_xor_desc_slot *chain_head;
407                 chain_head = list_entry(mv_chan->chain.next,
408                                         struct mv_xor_desc_slot,
409                                         chain_node);
410
411                 mv_xor_start_new_chain(mv_chan, chain_head);
412         }
413
414         if (cookie > 0)
415                 mv_chan->dmachan.completed_cookie = cookie;
416 }
417
418 static void
419 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
420 {
421         spin_lock_bh(&mv_chan->lock);
422         __mv_xor_slot_cleanup(mv_chan);
423         spin_unlock_bh(&mv_chan->lock);
424 }
425
426 static void mv_xor_tasklet(unsigned long data)
427 {
428         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
429         mv_xor_slot_cleanup(chan);
430 }
431
432 static struct mv_xor_desc_slot *
433 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
434                     int slots_per_op)
435 {
436         struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
437         LIST_HEAD(chain);
438         int slots_found, retry = 0;
439
440         /* start search from the last allocated descrtiptor
441          * if a contiguous allocation can not be found start searching
442          * from the beginning of the list
443          */
444 retry:
445         slots_found = 0;
446         if (retry == 0)
447                 iter = mv_chan->last_used;
448         else
449                 iter = list_entry(&mv_chan->all_slots,
450                         struct mv_xor_desc_slot,
451                         slot_node);
452
453         list_for_each_entry_safe_continue(
454                 iter, _iter, &mv_chan->all_slots, slot_node) {
455                 prefetch(_iter);
456                 prefetch(&_iter->async_tx);
457                 if (iter->slots_per_op) {
458                         /* give up after finding the first busy slot
459                          * on the second pass through the list
460                          */
461                         if (retry)
462                                 break;
463
464                         slots_found = 0;
465                         continue;
466                 }
467
468                 /* start the allocation if the slot is correctly aligned */
469                 if (!slots_found++)
470                         alloc_start = iter;
471
472                 if (slots_found == num_slots) {
473                         struct mv_xor_desc_slot *alloc_tail = NULL;
474                         struct mv_xor_desc_slot *last_used = NULL;
475                         iter = alloc_start;
476                         while (num_slots) {
477                                 int i;
478
479                                 /* pre-ack all but the last descriptor */
480                                 async_tx_ack(&iter->async_tx);
481
482                                 list_add_tail(&iter->chain_node, &chain);
483                                 alloc_tail = iter;
484                                 iter->async_tx.cookie = 0;
485                                 iter->slot_cnt = num_slots;
486                                 iter->xor_check_result = NULL;
487                                 for (i = 0; i < slots_per_op; i++) {
488                                         iter->slots_per_op = slots_per_op - i;
489                                         last_used = iter;
490                                         iter = list_entry(iter->slot_node.next,
491                                                 struct mv_xor_desc_slot,
492                                                 slot_node);
493                                 }
494                                 num_slots -= slots_per_op;
495                         }
496                         alloc_tail->group_head = alloc_start;
497                         alloc_tail->async_tx.cookie = -EBUSY;
498                         list_splice(&chain, &alloc_tail->tx_list);
499                         mv_chan->last_used = last_used;
500                         mv_desc_clear_next_desc(alloc_start);
501                         mv_desc_clear_next_desc(alloc_tail);
502                         return alloc_tail;
503                 }
504         }
505         if (!retry++)
506                 goto retry;
507
508         /* try to free some slots if the allocation fails */
509         tasklet_schedule(&mv_chan->irq_tasklet);
510
511         return NULL;
512 }
513
514 /************************ DMA engine API functions ****************************/
515 static dma_cookie_t
516 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
517 {
518         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
519         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
520         struct mv_xor_desc_slot *grp_start, *old_chain_tail;
521         dma_cookie_t cookie;
522         int new_hw_chain = 1;
523
524         dev_dbg(mv_chan_to_devp(mv_chan),
525                 "%s sw_desc %p: async_tx %p\n",
526                 __func__, sw_desc, &sw_desc->async_tx);
527
528         grp_start = sw_desc->group_head;
529
530         spin_lock_bh(&mv_chan->lock);
531         cookie = dma_cookie_assign(tx);
532
533         if (list_empty(&mv_chan->chain))
534                 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
535         else {
536                 new_hw_chain = 0;
537
538                 old_chain_tail = list_entry(mv_chan->chain.prev,
539                                             struct mv_xor_desc_slot,
540                                             chain_node);
541                 list_splice_init(&grp_start->tx_list,
542                                  &old_chain_tail->chain_node);
543
544                 if (!mv_can_chain(grp_start))
545                         goto submit_done;
546
547                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
548                         old_chain_tail->async_tx.phys);
549
550                 /* fix up the hardware chain */
551                 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
552
553                 /* if the channel is not busy */
554                 if (!mv_chan_is_busy(mv_chan)) {
555                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
556                         /*
557                          * and the curren desc is the end of the chain before
558                          * the append, then we need to start the channel
559                          */
560                         if (current_desc == old_chain_tail->async_tx.phys)
561                                 new_hw_chain = 1;
562                 }
563         }
564
565         if (new_hw_chain)
566                 mv_xor_start_new_chain(mv_chan, grp_start);
567
568 submit_done:
569         spin_unlock_bh(&mv_chan->lock);
570
571         return cookie;
572 }
573
574 /* returns the number of allocated descriptors */
575 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
576 {
577         char *hw_desc;
578         int idx;
579         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580         struct mv_xor_desc_slot *slot = NULL;
581         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
582
583         /* Allocate descriptor slots */
584         idx = mv_chan->slots_allocated;
585         while (idx < num_descs_in_pool) {
586                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
587                 if (!slot) {
588                         printk(KERN_INFO "MV XOR Channel only initialized"
589                                 " %d descriptor slots", idx);
590                         break;
591                 }
592                 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
593                 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
594
595                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
596                 slot->async_tx.tx_submit = mv_xor_tx_submit;
597                 INIT_LIST_HEAD(&slot->chain_node);
598                 INIT_LIST_HEAD(&slot->slot_node);
599                 INIT_LIST_HEAD(&slot->tx_list);
600                 hw_desc = (char *) mv_chan->dma_desc_pool;
601                 slot->async_tx.phys =
602                         (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
603                 slot->idx = idx++;
604
605                 spin_lock_bh(&mv_chan->lock);
606                 mv_chan->slots_allocated = idx;
607                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
608                 spin_unlock_bh(&mv_chan->lock);
609         }
610
611         if (mv_chan->slots_allocated && !mv_chan->last_used)
612                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
613                                         struct mv_xor_desc_slot,
614                                         slot_node);
615
616         dev_dbg(mv_chan_to_devp(mv_chan),
617                 "allocated %d descriptor slots last_used: %p\n",
618                 mv_chan->slots_allocated, mv_chan->last_used);
619
620         return mv_chan->slots_allocated ? : -ENOMEM;
621 }
622
623 static struct dma_async_tx_descriptor *
624 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
625                 size_t len, unsigned long flags)
626 {
627         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
628         struct mv_xor_desc_slot *sw_desc, *grp_start;
629         int slot_cnt;
630
631         dev_dbg(mv_chan_to_devp(mv_chan),
632                 "%s dest: %x src %x len: %u flags: %ld\n",
633                 __func__, dest, src, len, flags);
634         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
635                 return NULL;
636
637         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
638
639         spin_lock_bh(&mv_chan->lock);
640         slot_cnt = mv_chan_memcpy_slot_count(len);
641         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
642         if (sw_desc) {
643                 sw_desc->type = DMA_MEMCPY;
644                 sw_desc->async_tx.flags = flags;
645                 grp_start = sw_desc->group_head;
646                 mv_desc_init(grp_start, flags);
647                 mv_desc_set_byte_count(grp_start, len);
648                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
649                 mv_desc_set_src_addr(grp_start, 0, src);
650                 sw_desc->unmap_src_cnt = 1;
651                 sw_desc->unmap_len = len;
652         }
653         spin_unlock_bh(&mv_chan->lock);
654
655         dev_dbg(mv_chan_to_devp(mv_chan),
656                 "%s sw_desc %p async_tx %p\n",
657                 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
658
659         return sw_desc ? &sw_desc->async_tx : NULL;
660 }
661
662 static struct dma_async_tx_descriptor *
663 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
664                     unsigned int src_cnt, size_t len, unsigned long flags)
665 {
666         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
667         struct mv_xor_desc_slot *sw_desc, *grp_start;
668         int slot_cnt;
669
670         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
671                 return NULL;
672
673         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
674
675         dev_dbg(mv_chan_to_devp(mv_chan),
676                 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
677                 __func__, src_cnt, len, dest, flags);
678
679         spin_lock_bh(&mv_chan->lock);
680         slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
681         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
682         if (sw_desc) {
683                 sw_desc->type = DMA_XOR;
684                 sw_desc->async_tx.flags = flags;
685                 grp_start = sw_desc->group_head;
686                 mv_desc_init(grp_start, flags);
687                 /* the byte count field is the same as in memcpy desc*/
688                 mv_desc_set_byte_count(grp_start, len);
689                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
690                 sw_desc->unmap_src_cnt = src_cnt;
691                 sw_desc->unmap_len = len;
692                 while (src_cnt--)
693                         mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
694         }
695         spin_unlock_bh(&mv_chan->lock);
696         dev_dbg(mv_chan_to_devp(mv_chan),
697                 "%s sw_desc %p async_tx %p \n",
698                 __func__, sw_desc, &sw_desc->async_tx);
699         return sw_desc ? &sw_desc->async_tx : NULL;
700 }
701
702 static void mv_xor_free_chan_resources(struct dma_chan *chan)
703 {
704         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
705         struct mv_xor_desc_slot *iter, *_iter;
706         int in_use_descs = 0;
707
708         mv_xor_slot_cleanup(mv_chan);
709
710         spin_lock_bh(&mv_chan->lock);
711         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
712                                         chain_node) {
713                 in_use_descs++;
714                 list_del(&iter->chain_node);
715         }
716         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
717                                  completed_node) {
718                 in_use_descs++;
719                 list_del(&iter->completed_node);
720         }
721         list_for_each_entry_safe_reverse(
722                 iter, _iter, &mv_chan->all_slots, slot_node) {
723                 list_del(&iter->slot_node);
724                 kfree(iter);
725                 mv_chan->slots_allocated--;
726         }
727         mv_chan->last_used = NULL;
728
729         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
730                 __func__, mv_chan->slots_allocated);
731         spin_unlock_bh(&mv_chan->lock);
732
733         if (in_use_descs)
734                 dev_err(mv_chan_to_devp(mv_chan),
735                         "freeing %d in use descriptors!\n", in_use_descs);
736 }
737
738 /**
739  * mv_xor_status - poll the status of an XOR transaction
740  * @chan: XOR channel handle
741  * @cookie: XOR transaction identifier
742  * @txstate: XOR transactions state holder (or NULL)
743  */
744 static enum dma_status mv_xor_status(struct dma_chan *chan,
745                                           dma_cookie_t cookie,
746                                           struct dma_tx_state *txstate)
747 {
748         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
749         enum dma_status ret;
750
751         ret = dma_cookie_status(chan, cookie, txstate);
752         if (ret == DMA_SUCCESS) {
753                 mv_xor_clean_completed_slots(mv_chan);
754                 return ret;
755         }
756         mv_xor_slot_cleanup(mv_chan);
757
758         return dma_cookie_status(chan, cookie, txstate);
759 }
760
761 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
762 {
763         u32 val;
764
765         val = readl_relaxed(XOR_CONFIG(chan));
766         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
767
768         val = readl_relaxed(XOR_ACTIVATION(chan));
769         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
770
771         val = readl_relaxed(XOR_INTR_CAUSE(chan));
772         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
773
774         val = readl_relaxed(XOR_INTR_MASK(chan));
775         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
776
777         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
778         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
779
780         val = readl_relaxed(XOR_ERROR_ADDR(chan));
781         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
782 }
783
784 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
785                                          u32 intr_cause)
786 {
787         if (intr_cause & (1 << 4)) {
788              dev_dbg(mv_chan_to_devp(chan),
789                      "ignore this error\n");
790              return;
791         }
792
793         dev_err(mv_chan_to_devp(chan),
794                 "error on chan %d. intr cause 0x%08x\n",
795                 chan->idx, intr_cause);
796
797         mv_dump_xor_regs(chan);
798         BUG();
799 }
800
801 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
802 {
803         struct mv_xor_chan *chan = data;
804         u32 intr_cause = mv_chan_get_intr_cause(chan);
805
806         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
807
808         if (mv_is_err_intr(intr_cause))
809                 mv_xor_err_interrupt_handler(chan, intr_cause);
810
811         tasklet_schedule(&chan->irq_tasklet);
812
813         mv_xor_device_clear_eoc_cause(chan);
814
815         return IRQ_HANDLED;
816 }
817
818 static void mv_xor_issue_pending(struct dma_chan *chan)
819 {
820         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
821
822         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
823                 mv_chan->pending = 0;
824                 mv_chan_activate(mv_chan);
825         }
826 }
827
828 /*
829  * Perform a transaction to verify the HW works.
830  */
831 #define MV_XOR_TEST_SIZE 2000
832
833 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
834 {
835         int i;
836         void *src, *dest;
837         dma_addr_t src_dma, dest_dma;
838         struct dma_chan *dma_chan;
839         dma_cookie_t cookie;
840         struct dma_async_tx_descriptor *tx;
841         int err = 0;
842
843         src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
844         if (!src)
845                 return -ENOMEM;
846
847         dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
848         if (!dest) {
849                 kfree(src);
850                 return -ENOMEM;
851         }
852
853         /* Fill in src buffer */
854         for (i = 0; i < MV_XOR_TEST_SIZE; i++)
855                 ((u8 *) src)[i] = (u8)i;
856
857         dma_chan = &mv_chan->dmachan;
858         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
859                 err = -ENODEV;
860                 goto out;
861         }
862
863         dest_dma = dma_map_single(dma_chan->device->dev, dest,
864                                   MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
865
866         src_dma = dma_map_single(dma_chan->device->dev, src,
867                                  MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
868
869         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
870                                     MV_XOR_TEST_SIZE, 0);
871         cookie = mv_xor_tx_submit(tx);
872         mv_xor_issue_pending(dma_chan);
873         async_tx_ack(tx);
874         msleep(1);
875
876         if (mv_xor_status(dma_chan, cookie, NULL) !=
877             DMA_SUCCESS) {
878                 dev_err(dma_chan->device->dev,
879                         "Self-test copy timed out, disabling\n");
880                 err = -ENODEV;
881                 goto free_resources;
882         }
883
884         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
885                                 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
886         if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
887                 dev_err(dma_chan->device->dev,
888                         "Self-test copy failed compare, disabling\n");
889                 err = -ENODEV;
890                 goto free_resources;
891         }
892
893 free_resources:
894         mv_xor_free_chan_resources(dma_chan);
895 out:
896         kfree(src);
897         kfree(dest);
898         return err;
899 }
900
901 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
902 static int
903 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
904 {
905         int i, src_idx;
906         struct page *dest;
907         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
908         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
909         dma_addr_t dest_dma;
910         struct dma_async_tx_descriptor *tx;
911         struct dma_chan *dma_chan;
912         dma_cookie_t cookie;
913         u8 cmp_byte = 0;
914         u32 cmp_word;
915         int err = 0;
916
917         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
918                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
919                 if (!xor_srcs[src_idx]) {
920                         while (src_idx--)
921                                 __free_page(xor_srcs[src_idx]);
922                         return -ENOMEM;
923                 }
924         }
925
926         dest = alloc_page(GFP_KERNEL);
927         if (!dest) {
928                 while (src_idx--)
929                         __free_page(xor_srcs[src_idx]);
930                 return -ENOMEM;
931         }
932
933         /* Fill in src buffers */
934         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
935                 u8 *ptr = page_address(xor_srcs[src_idx]);
936                 for (i = 0; i < PAGE_SIZE; i++)
937                         ptr[i] = (1 << src_idx);
938         }
939
940         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
941                 cmp_byte ^= (u8) (1 << src_idx);
942
943         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
944                 (cmp_byte << 8) | cmp_byte;
945
946         memset(page_address(dest), 0, PAGE_SIZE);
947
948         dma_chan = &mv_chan->dmachan;
949         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
950                 err = -ENODEV;
951                 goto out;
952         }
953
954         /* test xor */
955         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
956                                 DMA_FROM_DEVICE);
957
958         for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
959                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
960                                            0, PAGE_SIZE, DMA_TO_DEVICE);
961
962         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
963                                  MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
964
965         cookie = mv_xor_tx_submit(tx);
966         mv_xor_issue_pending(dma_chan);
967         async_tx_ack(tx);
968         msleep(8);
969
970         if (mv_xor_status(dma_chan, cookie, NULL) !=
971             DMA_SUCCESS) {
972                 dev_err(dma_chan->device->dev,
973                         "Self-test xor timed out, disabling\n");
974                 err = -ENODEV;
975                 goto free_resources;
976         }
977
978         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
979                                 PAGE_SIZE, DMA_FROM_DEVICE);
980         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
981                 u32 *ptr = page_address(dest);
982                 if (ptr[i] != cmp_word) {
983                         dev_err(dma_chan->device->dev,
984                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
985                                 i, ptr[i], cmp_word);
986                         err = -ENODEV;
987                         goto free_resources;
988                 }
989         }
990
991 free_resources:
992         mv_xor_free_chan_resources(dma_chan);
993 out:
994         src_idx = MV_XOR_NUM_SRC_TEST;
995         while (src_idx--)
996                 __free_page(xor_srcs[src_idx]);
997         __free_page(dest);
998         return err;
999 }
1000
1001 /* This driver does not implement any of the optional DMA operations. */
1002 static int
1003 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1004                unsigned long arg)
1005 {
1006         return -ENOSYS;
1007 }
1008
1009 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010 {
1011         struct dma_chan *chan, *_chan;
1012         struct device *dev = mv_chan->dmadev.dev;
1013
1014         dma_async_device_unregister(&mv_chan->dmadev);
1015
1016         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1017                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018
1019         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1020                                  device_node) {
1021                 list_del(&chan->device_node);
1022         }
1023
1024         free_irq(mv_chan->irq, mv_chan);
1025
1026         return 0;
1027 }
1028
1029 static struct mv_xor_chan *
1030 mv_xor_channel_add(struct mv_xor_device *xordev,
1031                    struct platform_device *pdev,
1032                    int idx, dma_cap_mask_t cap_mask, int irq)
1033 {
1034         int ret = 0;
1035         struct mv_xor_chan *mv_chan;
1036         struct dma_device *dma_dev;
1037
1038         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1039         if (!mv_chan)
1040                 return ERR_PTR(-ENOMEM);
1041
1042         mv_chan->idx = idx;
1043         mv_chan->irq = irq;
1044
1045         dma_dev = &mv_chan->dmadev;
1046
1047         /* allocate coherent memory for hardware descriptors
1048          * note: writecombine gives slightly better performance, but
1049          * requires that we explicitly flush the writes
1050          */
1051         mv_chan->dma_desc_pool_virt =
1052           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1053                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
1054         if (!mv_chan->dma_desc_pool_virt)
1055                 return ERR_PTR(-ENOMEM);
1056
1057         /* discover transaction capabilites from the platform data */
1058         dma_dev->cap_mask = cap_mask;
1059
1060         INIT_LIST_HEAD(&dma_dev->channels);
1061
1062         /* set base routines */
1063         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1064         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1065         dma_dev->device_tx_status = mv_xor_status;
1066         dma_dev->device_issue_pending = mv_xor_issue_pending;
1067         dma_dev->device_control = mv_xor_control;
1068         dma_dev->dev = &pdev->dev;
1069
1070         /* set prep routines based on capability */
1071         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1072                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1073         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1074                 dma_dev->max_xor = 8;
1075                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1076         }
1077
1078         mv_chan->mmr_base = xordev->xor_base;
1079         if (!mv_chan->mmr_base) {
1080                 ret = -ENOMEM;
1081                 goto err_free_dma;
1082         }
1083         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1084                      mv_chan);
1085
1086         /* clear errors before enabling interrupts */
1087         mv_xor_device_clear_err_status(mv_chan);
1088
1089         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1090                           0, dev_name(&pdev->dev), mv_chan);
1091         if (ret)
1092                 goto err_free_dma;
1093
1094         mv_chan_unmask_interrupts(mv_chan);
1095
1096         mv_set_mode(mv_chan, DMA_MEMCPY);
1097
1098         spin_lock_init(&mv_chan->lock);
1099         INIT_LIST_HEAD(&mv_chan->chain);
1100         INIT_LIST_HEAD(&mv_chan->completed_slots);
1101         INIT_LIST_HEAD(&mv_chan->all_slots);
1102         mv_chan->dmachan.device = dma_dev;
1103         dma_cookie_init(&mv_chan->dmachan);
1104
1105         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1106
1107         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1108                 ret = mv_xor_memcpy_self_test(mv_chan);
1109                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1110                 if (ret)
1111                         goto err_free_irq;
1112         }
1113
1114         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1115                 ret = mv_xor_xor_self_test(mv_chan);
1116                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1117                 if (ret)
1118                         goto err_free_irq;
1119         }
1120
1121         dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1122                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1123                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1124                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1125
1126         dma_async_device_register(dma_dev);
1127         return mv_chan;
1128
1129 err_free_irq:
1130         free_irq(mv_chan->irq, mv_chan);
1131  err_free_dma:
1132         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1133                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1134         return ERR_PTR(ret);
1135 }
1136
1137 static void
1138 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1139                          const struct mbus_dram_target_info *dram)
1140 {
1141         void __iomem *base = xordev->xor_base;
1142         u32 win_enable = 0;
1143         int i;
1144
1145         for (i = 0; i < 8; i++) {
1146                 writel(0, base + WINDOW_BASE(i));
1147                 writel(0, base + WINDOW_SIZE(i));
1148                 if (i < 4)
1149                         writel(0, base + WINDOW_REMAP_HIGH(i));
1150         }
1151
1152         for (i = 0; i < dram->num_cs; i++) {
1153                 const struct mbus_dram_window *cs = dram->cs + i;
1154
1155                 writel((cs->base & 0xffff0000) |
1156                        (cs->mbus_attr << 8) |
1157                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1158                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1159
1160                 win_enable |= (1 << i);
1161                 win_enable |= 3 << (16 + (2 * i));
1162         }
1163
1164         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1165         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1166         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1167         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1168 }
1169
1170 static int mv_xor_probe(struct platform_device *pdev)
1171 {
1172         const struct mbus_dram_target_info *dram;
1173         struct mv_xor_device *xordev;
1174         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1175         struct resource *res;
1176         int i, ret;
1177
1178         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1179
1180         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1181         if (!xordev)
1182                 return -ENOMEM;
1183
1184         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1185         if (!res)
1186                 return -ENODEV;
1187
1188         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1189                                         resource_size(res));
1190         if (!xordev->xor_base)
1191                 return -EBUSY;
1192
1193         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1194         if (!res)
1195                 return -ENODEV;
1196
1197         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1198                                              resource_size(res));
1199         if (!xordev->xor_high_base)
1200                 return -EBUSY;
1201
1202         platform_set_drvdata(pdev, xordev);
1203
1204         /*
1205          * (Re-)program MBUS remapping windows if we are asked to.
1206          */
1207         dram = mv_mbus_dram_info();
1208         if (dram)
1209                 mv_xor_conf_mbus_windows(xordev, dram);
1210
1211         /* Not all platforms can gate the clock, so it is not
1212          * an error if the clock does not exists.
1213          */
1214         xordev->clk = clk_get(&pdev->dev, NULL);
1215         if (!IS_ERR(xordev->clk))
1216                 clk_prepare_enable(xordev->clk);
1217
1218         if (pdev->dev.of_node) {
1219                 struct device_node *np;
1220                 int i = 0;
1221
1222                 for_each_child_of_node(pdev->dev.of_node, np) {
1223                         dma_cap_mask_t cap_mask;
1224                         int irq;
1225
1226                         dma_cap_zero(cap_mask);
1227                         if (of_property_read_bool(np, "dmacap,memcpy"))
1228                                 dma_cap_set(DMA_MEMCPY, cap_mask);
1229                         if (of_property_read_bool(np, "dmacap,xor"))
1230                                 dma_cap_set(DMA_XOR, cap_mask);
1231                         if (of_property_read_bool(np, "dmacap,interrupt"))
1232                                 dma_cap_set(DMA_INTERRUPT, cap_mask);
1233
1234                         irq = irq_of_parse_and_map(np, 0);
1235                         if (!irq) {
1236                                 ret = -ENODEV;
1237                                 goto err_channel_add;
1238                         }
1239
1240                         xordev->channels[i] =
1241                                 mv_xor_channel_add(xordev, pdev, i,
1242                                                    cap_mask, irq);
1243                         if (IS_ERR(xordev->channels[i])) {
1244                                 ret = PTR_ERR(xordev->channels[i]);
1245                                 xordev->channels[i] = NULL;
1246                                 irq_dispose_mapping(irq);
1247                                 goto err_channel_add;
1248                         }
1249
1250                         i++;
1251                 }
1252         } else if (pdata && pdata->channels) {
1253                 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1254                         struct mv_xor_channel_data *cd;
1255                         int irq;
1256
1257                         cd = &pdata->channels[i];
1258                         if (!cd) {
1259                                 ret = -ENODEV;
1260                                 goto err_channel_add;
1261                         }
1262
1263                         irq = platform_get_irq(pdev, i);
1264                         if (irq < 0) {
1265                                 ret = irq;
1266                                 goto err_channel_add;
1267                         }
1268
1269                         xordev->channels[i] =
1270                                 mv_xor_channel_add(xordev, pdev, i,
1271                                                    cd->cap_mask, irq);
1272                         if (IS_ERR(xordev->channels[i])) {
1273                                 ret = PTR_ERR(xordev->channels[i]);
1274                                 goto err_channel_add;
1275                         }
1276                 }
1277         }
1278
1279         return 0;
1280
1281 err_channel_add:
1282         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1283                 if (xordev->channels[i]) {
1284                         mv_xor_channel_remove(xordev->channels[i]);
1285                         if (pdev->dev.of_node)
1286                                 irq_dispose_mapping(xordev->channels[i]->irq);
1287                 }
1288
1289         if (!IS_ERR(xordev->clk)) {
1290                 clk_disable_unprepare(xordev->clk);
1291                 clk_put(xordev->clk);
1292         }
1293
1294         return ret;
1295 }
1296
1297 static int mv_xor_remove(struct platform_device *pdev)
1298 {
1299         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1300         int i;
1301
1302         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1303                 if (xordev->channels[i])
1304                         mv_xor_channel_remove(xordev->channels[i]);
1305         }
1306
1307         if (!IS_ERR(xordev->clk)) {
1308                 clk_disable_unprepare(xordev->clk);
1309                 clk_put(xordev->clk);
1310         }
1311
1312         return 0;
1313 }
1314
1315 #ifdef CONFIG_OF
1316 static struct of_device_id mv_xor_dt_ids[] = {
1317        { .compatible = "marvell,orion-xor", },
1318        {},
1319 };
1320 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1321 #endif
1322
1323 static struct platform_driver mv_xor_driver = {
1324         .probe          = mv_xor_probe,
1325         .remove         = mv_xor_remove,
1326         .driver         = {
1327                 .owner          = THIS_MODULE,
1328                 .name           = MV_XOR_NAME,
1329                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1330         },
1331 };
1332
1333
1334 static int __init mv_xor_init(void)
1335 {
1336         return platform_driver_register(&mv_xor_driver);
1337 }
1338 module_init(mv_xor_init);
1339
1340 /* it's currently unsafe to unload this module */
1341 #if 0
1342 static void __exit mv_xor_exit(void)
1343 {
1344         platform_driver_unregister(&mv_xor_driver);
1345         return;
1346 }
1347
1348 module_exit(mv_xor_exit);
1349 #endif
1350
1351 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1352 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1353 MODULE_LICENSE("GPL");