]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/ccp/ccp-dmaengine.c
cpufreq: intel_pstate: Add support for Gemini Lake
[karo-tx-linux.git] / drivers / crypto / ccp / ccp-dmaengine.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2016 Advanced Micro Devices, Inc.
5  *
6  * Author: Gary R Hook <gary.hook@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
18
19 #include "ccp-dev.h"
20 #include "../../dma/dmaengine.h"
21
22 #define CCP_DMA_WIDTH(_mask)            \
23 ({                                      \
24         u64 mask = _mask + 1;           \
25         (mask == 0) ? 64 : fls64(mask); \
26 })
27
28 static void ccp_free_cmd_resources(struct ccp_device *ccp,
29                                    struct list_head *list)
30 {
31         struct ccp_dma_cmd *cmd, *ctmp;
32
33         list_for_each_entry_safe(cmd, ctmp, list, entry) {
34                 list_del(&cmd->entry);
35                 kmem_cache_free(ccp->dma_cmd_cache, cmd);
36         }
37 }
38
39 static void ccp_free_desc_resources(struct ccp_device *ccp,
40                                     struct list_head *list)
41 {
42         struct ccp_dma_desc *desc, *dtmp;
43
44         list_for_each_entry_safe(desc, dtmp, list, entry) {
45                 ccp_free_cmd_resources(ccp, &desc->active);
46                 ccp_free_cmd_resources(ccp, &desc->pending);
47
48                 list_del(&desc->entry);
49                 kmem_cache_free(ccp->dma_desc_cache, desc);
50         }
51 }
52
53 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
54 {
55         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
56                                                  dma_chan);
57         unsigned long flags;
58
59         dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
60
61         spin_lock_irqsave(&chan->lock, flags);
62
63         ccp_free_desc_resources(chan->ccp, &chan->complete);
64         ccp_free_desc_resources(chan->ccp, &chan->active);
65         ccp_free_desc_resources(chan->ccp, &chan->pending);
66         ccp_free_desc_resources(chan->ccp, &chan->created);
67
68         spin_unlock_irqrestore(&chan->lock, flags);
69 }
70
71 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
72                                        struct list_head *list)
73 {
74         struct ccp_dma_desc *desc, *dtmp;
75
76         list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
77                 if (!async_tx_test_ack(&desc->tx_desc))
78                         continue;
79
80                 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
81
82                 ccp_free_cmd_resources(ccp, &desc->active);
83                 ccp_free_cmd_resources(ccp, &desc->pending);
84
85                 list_del(&desc->entry);
86                 kmem_cache_free(ccp->dma_desc_cache, desc);
87         }
88 }
89
90 static void ccp_do_cleanup(unsigned long data)
91 {
92         struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
93         unsigned long flags;
94
95         dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
96                 dma_chan_name(&chan->dma_chan));
97
98         spin_lock_irqsave(&chan->lock, flags);
99
100         ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
101
102         spin_unlock_irqrestore(&chan->lock, flags);
103 }
104
105 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
106 {
107         struct ccp_dma_cmd *cmd;
108         int ret;
109
110         cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
111         list_move(&cmd->entry, &desc->active);
112
113         dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
114                 desc->tx_desc.cookie, cmd);
115
116         ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
117         if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
118                 return 0;
119
120         dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
121                 ret, desc->tx_desc.cookie, cmd);
122
123         return ret;
124 }
125
126 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
127 {
128         struct ccp_dma_cmd *cmd;
129
130         cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
131                                        entry);
132         if (!cmd)
133                 return;
134
135         dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
136                 __func__, desc->tx_desc.cookie, cmd);
137
138         list_del(&cmd->entry);
139         kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
140 }
141
142 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
143                                                 struct ccp_dma_desc *desc)
144 {
145         /* Move current DMA descriptor to the complete list */
146         if (desc)
147                 list_move(&desc->entry, &chan->complete);
148
149         /* Get the next DMA descriptor on the active list */
150         desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
151                                         entry);
152
153         return desc;
154 }
155
156 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
157                                                    struct ccp_dma_desc *desc)
158 {
159         struct dma_async_tx_descriptor *tx_desc;
160         unsigned long flags;
161
162         /* Loop over descriptors until one is found with commands */
163         do {
164                 if (desc) {
165                         /* Remove the DMA command from the list and free it */
166                         ccp_free_active_cmd(desc);
167
168                         if (!list_empty(&desc->pending)) {
169                                 /* No errors, keep going */
170                                 if (desc->status != DMA_ERROR)
171                                         return desc;
172
173                                 /* Error, free remaining commands and move on */
174                                 ccp_free_cmd_resources(desc->ccp,
175                                                        &desc->pending);
176                         }
177
178                         tx_desc = &desc->tx_desc;
179                 } else {
180                         tx_desc = NULL;
181                 }
182
183                 spin_lock_irqsave(&chan->lock, flags);
184
185                 if (desc) {
186                         if (desc->status != DMA_ERROR)
187                                 desc->status = DMA_COMPLETE;
188
189                         dev_dbg(desc->ccp->dev,
190                                 "%s - tx %d complete, status=%u\n", __func__,
191                                 desc->tx_desc.cookie, desc->status);
192
193                         dma_cookie_complete(tx_desc);
194                 }
195
196                 desc = __ccp_next_dma_desc(chan, desc);
197
198                 spin_unlock_irqrestore(&chan->lock, flags);
199
200                 if (tx_desc) {
201                         if (tx_desc->callback &&
202                             (tx_desc->flags & DMA_PREP_INTERRUPT))
203                                 tx_desc->callback(tx_desc->callback_param);
204
205                         dma_run_dependencies(tx_desc);
206                 }
207         } while (desc);
208
209         return NULL;
210 }
211
212 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
213 {
214         struct ccp_dma_desc *desc;
215
216         if (list_empty(&chan->pending))
217                 return NULL;
218
219         desc = list_empty(&chan->active)
220                 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
221                 : NULL;
222
223         list_splice_tail_init(&chan->pending, &chan->active);
224
225         return desc;
226 }
227
228 static void ccp_cmd_callback(void *data, int err)
229 {
230         struct ccp_dma_desc *desc = data;
231         struct ccp_dma_chan *chan;
232         int ret;
233
234         if (err == -EINPROGRESS)
235                 return;
236
237         chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
238                             dma_chan);
239
240         dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
241                 __func__, desc->tx_desc.cookie, err);
242
243         if (err)
244                 desc->status = DMA_ERROR;
245
246         while (true) {
247                 /* Check for DMA descriptor completion */
248                 desc = ccp_handle_active_desc(chan, desc);
249
250                 /* Don't submit cmd if no descriptor or DMA is paused */
251                 if (!desc || (chan->status == DMA_PAUSED))
252                         break;
253
254                 ret = ccp_issue_next_cmd(desc);
255                 if (!ret)
256                         break;
257
258                 desc->status = DMA_ERROR;
259         }
260
261         tasklet_schedule(&chan->cleanup_tasklet);
262 }
263
264 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
265 {
266         struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
267                                                  tx_desc);
268         struct ccp_dma_chan *chan;
269         dma_cookie_t cookie;
270         unsigned long flags;
271
272         chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
273
274         spin_lock_irqsave(&chan->lock, flags);
275
276         cookie = dma_cookie_assign(tx_desc);
277         list_del(&desc->entry);
278         list_add_tail(&desc->entry, &chan->pending);
279
280         spin_unlock_irqrestore(&chan->lock, flags);
281
282         dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
283                 __func__, cookie);
284
285         return cookie;
286 }
287
288 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
289 {
290         struct ccp_dma_cmd *cmd;
291
292         cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
293         if (cmd)
294                 memset(cmd, 0, sizeof(*cmd));
295
296         return cmd;
297 }
298
299 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
300                                                unsigned long flags)
301 {
302         struct ccp_dma_desc *desc;
303
304         desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
305         if (!desc)
306                 return NULL;
307
308         dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
309         desc->tx_desc.flags = flags;
310         desc->tx_desc.tx_submit = ccp_tx_submit;
311         desc->ccp = chan->ccp;
312         INIT_LIST_HEAD(&desc->pending);
313         INIT_LIST_HEAD(&desc->active);
314         desc->status = DMA_IN_PROGRESS;
315
316         return desc;
317 }
318
319 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
320                                             struct scatterlist *dst_sg,
321                                             unsigned int dst_nents,
322                                             struct scatterlist *src_sg,
323                                             unsigned int src_nents,
324                                             unsigned long flags)
325 {
326         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
327                                                  dma_chan);
328         struct ccp_device *ccp = chan->ccp;
329         struct ccp_dma_desc *desc;
330         struct ccp_dma_cmd *cmd;
331         struct ccp_cmd *ccp_cmd;
332         struct ccp_passthru_nomap_engine *ccp_pt;
333         unsigned int src_offset, src_len;
334         unsigned int dst_offset, dst_len;
335         unsigned int len;
336         unsigned long sflags;
337         size_t total_len;
338
339         if (!dst_sg || !src_sg)
340                 return NULL;
341
342         if (!dst_nents || !src_nents)
343                 return NULL;
344
345         desc = ccp_alloc_dma_desc(chan, flags);
346         if (!desc)
347                 return NULL;
348
349         total_len = 0;
350
351         src_len = sg_dma_len(src_sg);
352         src_offset = 0;
353
354         dst_len = sg_dma_len(dst_sg);
355         dst_offset = 0;
356
357         while (true) {
358                 if (!src_len) {
359                         src_nents--;
360                         if (!src_nents)
361                                 break;
362
363                         src_sg = sg_next(src_sg);
364                         if (!src_sg)
365                                 break;
366
367                         src_len = sg_dma_len(src_sg);
368                         src_offset = 0;
369                         continue;
370                 }
371
372                 if (!dst_len) {
373                         dst_nents--;
374                         if (!dst_nents)
375                                 break;
376
377                         dst_sg = sg_next(dst_sg);
378                         if (!dst_sg)
379                                 break;
380
381                         dst_len = sg_dma_len(dst_sg);
382                         dst_offset = 0;
383                         continue;
384                 }
385
386                 len = min(dst_len, src_len);
387
388                 cmd = ccp_alloc_dma_cmd(chan);
389                 if (!cmd)
390                         goto err;
391
392                 ccp_cmd = &cmd->ccp_cmd;
393                 ccp_pt = &ccp_cmd->u.passthru_nomap;
394                 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
395                 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
396                 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
397                 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
398                 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
399                 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
400                 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
401                 ccp_pt->src_len = len;
402                 ccp_pt->final = 1;
403                 ccp_cmd->callback = ccp_cmd_callback;
404                 ccp_cmd->data = desc;
405
406                 list_add_tail(&cmd->entry, &desc->pending);
407
408                 dev_dbg(ccp->dev,
409                         "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
410                         cmd, &ccp_pt->src_dma,
411                         &ccp_pt->dst_dma, ccp_pt->src_len);
412
413                 total_len += len;
414
415                 src_len -= len;
416                 src_offset += len;
417
418                 dst_len -= len;
419                 dst_offset += len;
420         }
421
422         desc->len = total_len;
423
424         if (list_empty(&desc->pending))
425                 goto err;
426
427         dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
428
429         spin_lock_irqsave(&chan->lock, sflags);
430
431         list_add_tail(&desc->entry, &chan->created);
432
433         spin_unlock_irqrestore(&chan->lock, sflags);
434
435         return desc;
436
437 err:
438         ccp_free_cmd_resources(ccp, &desc->pending);
439         kmem_cache_free(ccp->dma_desc_cache, desc);
440
441         return NULL;
442 }
443
444 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
445         struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
446         unsigned long flags)
447 {
448         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
449                                                  dma_chan);
450         struct ccp_dma_desc *desc;
451         struct scatterlist dst_sg, src_sg;
452
453         dev_dbg(chan->ccp->dev,
454                 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
455                 __func__, &src, &dst, len, flags);
456
457         sg_init_table(&dst_sg, 1);
458         sg_dma_address(&dst_sg) = dst;
459         sg_dma_len(&dst_sg) = len;
460
461         sg_init_table(&src_sg, 1);
462         sg_dma_address(&src_sg) = src;
463         sg_dma_len(&src_sg) = len;
464
465         desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
466         if (!desc)
467                 return NULL;
468
469         return &desc->tx_desc;
470 }
471
472 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
473         struct dma_chan *dma_chan, struct scatterlist *dst_sg,
474         unsigned int dst_nents, struct scatterlist *src_sg,
475         unsigned int src_nents, unsigned long flags)
476 {
477         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
478                                                  dma_chan);
479         struct ccp_dma_desc *desc;
480
481         dev_dbg(chan->ccp->dev,
482                 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
483                 __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
484
485         desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
486                                flags);
487         if (!desc)
488                 return NULL;
489
490         return &desc->tx_desc;
491 }
492
493 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
494         struct dma_chan *dma_chan, unsigned long flags)
495 {
496         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
497                                                  dma_chan);
498         struct ccp_dma_desc *desc;
499
500         desc = ccp_alloc_dma_desc(chan, flags);
501         if (!desc)
502                 return NULL;
503
504         return &desc->tx_desc;
505 }
506
507 static void ccp_issue_pending(struct dma_chan *dma_chan)
508 {
509         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
510                                                  dma_chan);
511         struct ccp_dma_desc *desc;
512         unsigned long flags;
513
514         dev_dbg(chan->ccp->dev, "%s\n", __func__);
515
516         spin_lock_irqsave(&chan->lock, flags);
517
518         desc = __ccp_pending_to_active(chan);
519
520         spin_unlock_irqrestore(&chan->lock, flags);
521
522         /* If there was nothing active, start processing */
523         if (desc)
524                 ccp_cmd_callback(desc, 0);
525 }
526
527 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
528                                      dma_cookie_t cookie,
529                                      struct dma_tx_state *state)
530 {
531         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
532                                                  dma_chan);
533         struct ccp_dma_desc *desc;
534         enum dma_status ret;
535         unsigned long flags;
536
537         if (chan->status == DMA_PAUSED) {
538                 ret = DMA_PAUSED;
539                 goto out;
540         }
541
542         ret = dma_cookie_status(dma_chan, cookie, state);
543         if (ret == DMA_COMPLETE) {
544                 spin_lock_irqsave(&chan->lock, flags);
545
546                 /* Get status from complete chain, if still there */
547                 list_for_each_entry(desc, &chan->complete, entry) {
548                         if (desc->tx_desc.cookie != cookie)
549                                 continue;
550
551                         ret = desc->status;
552                         break;
553                 }
554
555                 spin_unlock_irqrestore(&chan->lock, flags);
556         }
557
558 out:
559         dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
560
561         return ret;
562 }
563
564 static int ccp_pause(struct dma_chan *dma_chan)
565 {
566         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
567                                                  dma_chan);
568
569         chan->status = DMA_PAUSED;
570
571         /*TODO: Wait for active DMA to complete before returning? */
572
573         return 0;
574 }
575
576 static int ccp_resume(struct dma_chan *dma_chan)
577 {
578         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
579                                                  dma_chan);
580         struct ccp_dma_desc *desc;
581         unsigned long flags;
582
583         spin_lock_irqsave(&chan->lock, flags);
584
585         desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
586                                         entry);
587
588         spin_unlock_irqrestore(&chan->lock, flags);
589
590         /* Indicate the channel is running again */
591         chan->status = DMA_IN_PROGRESS;
592
593         /* If there was something active, re-start */
594         if (desc)
595                 ccp_cmd_callback(desc, 0);
596
597         return 0;
598 }
599
600 static int ccp_terminate_all(struct dma_chan *dma_chan)
601 {
602         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
603                                                  dma_chan);
604         unsigned long flags;
605
606         dev_dbg(chan->ccp->dev, "%s\n", __func__);
607
608         /*TODO: Wait for active DMA to complete before continuing */
609
610         spin_lock_irqsave(&chan->lock, flags);
611
612         /*TODO: Purge the complete list? */
613         ccp_free_desc_resources(chan->ccp, &chan->active);
614         ccp_free_desc_resources(chan->ccp, &chan->pending);
615         ccp_free_desc_resources(chan->ccp, &chan->created);
616
617         spin_unlock_irqrestore(&chan->lock, flags);
618
619         return 0;
620 }
621
622 int ccp_dmaengine_register(struct ccp_device *ccp)
623 {
624         struct ccp_dma_chan *chan;
625         struct dma_device *dma_dev = &ccp->dma_dev;
626         struct dma_chan *dma_chan;
627         char *dma_cmd_cache_name;
628         char *dma_desc_cache_name;
629         unsigned int i;
630         int ret;
631
632         ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
633                                          sizeof(*(ccp->ccp_dma_chan)),
634                                          GFP_KERNEL);
635         if (!ccp->ccp_dma_chan)
636                 return -ENOMEM;
637
638         dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
639                                             "%s-dmaengine-cmd-cache",
640                                             ccp->name);
641         if (!dma_cmd_cache_name)
642                 return -ENOMEM;
643
644         ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
645                                                sizeof(struct ccp_dma_cmd),
646                                                sizeof(void *),
647                                                SLAB_HWCACHE_ALIGN, NULL);
648         if (!ccp->dma_cmd_cache)
649                 return -ENOMEM;
650
651         dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
652                                              "%s-dmaengine-desc-cache",
653                                              ccp->name);
654         if (!dma_desc_cache_name) {
655                 ret = -ENOMEM;
656                 goto err_cache;
657         }
658
659         ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
660                                                 sizeof(struct ccp_dma_desc),
661                                                 sizeof(void *),
662                                                 SLAB_HWCACHE_ALIGN, NULL);
663         if (!ccp->dma_desc_cache) {
664                 ret = -ENOMEM;
665                 goto err_cache;
666         }
667
668         dma_dev->dev = ccp->dev;
669         dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
670         dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
671         dma_dev->directions = DMA_MEM_TO_MEM;
672         dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
673         dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
674         dma_cap_set(DMA_SG, dma_dev->cap_mask);
675         dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
676
677         INIT_LIST_HEAD(&dma_dev->channels);
678         for (i = 0; i < ccp->cmd_q_count; i++) {
679                 chan = ccp->ccp_dma_chan + i;
680                 dma_chan = &chan->dma_chan;
681
682                 chan->ccp = ccp;
683
684                 spin_lock_init(&chan->lock);
685                 INIT_LIST_HEAD(&chan->created);
686                 INIT_LIST_HEAD(&chan->pending);
687                 INIT_LIST_HEAD(&chan->active);
688                 INIT_LIST_HEAD(&chan->complete);
689
690                 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
691                              (unsigned long)chan);
692
693                 dma_chan->device = dma_dev;
694                 dma_cookie_init(dma_chan);
695
696                 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
697         }
698
699         dma_dev->device_free_chan_resources = ccp_free_chan_resources;
700         dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
701         dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
702         dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
703         dma_dev->device_issue_pending = ccp_issue_pending;
704         dma_dev->device_tx_status = ccp_tx_status;
705         dma_dev->device_pause = ccp_pause;
706         dma_dev->device_resume = ccp_resume;
707         dma_dev->device_terminate_all = ccp_terminate_all;
708
709         ret = dma_async_device_register(dma_dev);
710         if (ret)
711                 goto err_reg;
712
713         return 0;
714
715 err_reg:
716         kmem_cache_destroy(ccp->dma_desc_cache);
717
718 err_cache:
719         kmem_cache_destroy(ccp->dma_cmd_cache);
720
721         return ret;
722 }
723
724 void ccp_dmaengine_unregister(struct ccp_device *ccp)
725 {
726         struct dma_device *dma_dev = &ccp->dma_dev;
727
728         dma_async_device_unregister(dma_dev);
729
730         kmem_cache_destroy(ccp->dma_desc_cache);
731         kmem_cache_destroy(ccp->dma_cmd_cache);
732 }