2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Gary R Hook <gary.hook@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/dmaengine.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/ccp.h>
20 #include "../../dma/dmaengine.h"
22 #define CCP_DMA_WIDTH(_mask) \
24 u64 mask = _mask + 1; \
25 (mask == 0) ? 64 : fls64(mask); \
28 static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list)
31 struct ccp_dma_cmd *cmd, *ctmp;
33 list_for_each_entry_safe(cmd, ctmp, list, entry) {
34 list_del(&cmd->entry);
35 kmem_cache_free(ccp->dma_cmd_cache, cmd);
39 static void ccp_free_desc_resources(struct ccp_device *ccp,
40 struct list_head *list)
42 struct ccp_dma_desc *desc, *dtmp;
44 list_for_each_entry_safe(desc, dtmp, list, entry) {
45 ccp_free_cmd_resources(ccp, &desc->active);
46 ccp_free_cmd_resources(ccp, &desc->pending);
48 list_del(&desc->entry);
49 kmem_cache_free(ccp->dma_desc_cache, desc);
53 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
55 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
59 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
61 spin_lock_irqsave(&chan->lock, flags);
63 ccp_free_desc_resources(chan->ccp, &chan->complete);
64 ccp_free_desc_resources(chan->ccp, &chan->active);
65 ccp_free_desc_resources(chan->ccp, &chan->pending);
67 spin_unlock_irqrestore(&chan->lock, flags);
70 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
71 struct list_head *list)
73 struct ccp_dma_desc *desc, *dtmp;
75 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
76 if (!async_tx_test_ack(&desc->tx_desc))
79 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
81 ccp_free_cmd_resources(ccp, &desc->active);
82 ccp_free_cmd_resources(ccp, &desc->pending);
84 list_del(&desc->entry);
85 kmem_cache_free(ccp->dma_desc_cache, desc);
89 static void ccp_do_cleanup(unsigned long data)
91 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
94 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
95 dma_chan_name(&chan->dma_chan));
97 spin_lock_irqsave(&chan->lock, flags);
99 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
101 spin_unlock_irqrestore(&chan->lock, flags);
104 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
106 struct ccp_dma_cmd *cmd;
109 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
110 list_move(&cmd->entry, &desc->active);
112 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
113 desc->tx_desc.cookie, cmd);
115 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
116 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
119 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
120 ret, desc->tx_desc.cookie, cmd);
125 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
127 struct ccp_dma_cmd *cmd;
129 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
134 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
135 __func__, desc->tx_desc.cookie, cmd);
137 list_del(&cmd->entry);
138 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
141 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
142 struct ccp_dma_desc *desc)
144 /* Move current DMA descriptor to the complete list */
146 list_move(&desc->entry, &chan->complete);
148 /* Get the next DMA descriptor on the active list */
149 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
155 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
156 struct ccp_dma_desc *desc)
158 struct dma_async_tx_descriptor *tx_desc;
161 /* Loop over descriptors until one is found with commands */
164 /* Remove the DMA command from the list and free it */
165 ccp_free_active_cmd(desc);
167 if (!list_empty(&desc->pending)) {
168 /* No errors, keep going */
169 if (desc->status != DMA_ERROR)
172 /* Error, free remaining commands and move on */
173 ccp_free_cmd_resources(desc->ccp,
177 tx_desc = &desc->tx_desc;
182 spin_lock_irqsave(&chan->lock, flags);
185 if (desc->status != DMA_ERROR)
186 desc->status = DMA_COMPLETE;
188 dev_dbg(desc->ccp->dev,
189 "%s - tx %d complete, status=%u\n", __func__,
190 desc->tx_desc.cookie, desc->status);
192 dma_cookie_complete(tx_desc);
195 desc = __ccp_next_dma_desc(chan, desc);
197 spin_unlock_irqrestore(&chan->lock, flags);
200 if (tx_desc->callback &&
201 (tx_desc->flags & DMA_PREP_INTERRUPT))
202 tx_desc->callback(tx_desc->callback_param);
204 dma_run_dependencies(tx_desc);
211 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
213 struct ccp_dma_desc *desc;
215 if (list_empty(&chan->pending))
218 desc = list_empty(&chan->active)
219 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
222 list_splice_tail_init(&chan->pending, &chan->active);
227 static void ccp_cmd_callback(void *data, int err)
229 struct ccp_dma_desc *desc = data;
230 struct ccp_dma_chan *chan;
233 if (err == -EINPROGRESS)
236 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
239 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
240 __func__, desc->tx_desc.cookie, err);
243 desc->status = DMA_ERROR;
246 /* Check for DMA descriptor completion */
247 desc = ccp_handle_active_desc(chan, desc);
249 /* Don't submit cmd if no descriptor or DMA is paused */
250 if (!desc || (chan->status == DMA_PAUSED))
253 ret = ccp_issue_next_cmd(desc);
257 desc->status = DMA_ERROR;
260 tasklet_schedule(&chan->cleanup_tasklet);
263 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
265 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
267 struct ccp_dma_chan *chan;
271 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
273 spin_lock_irqsave(&chan->lock, flags);
275 cookie = dma_cookie_assign(tx_desc);
276 list_add_tail(&desc->entry, &chan->pending);
278 spin_unlock_irqrestore(&chan->lock, flags);
280 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
286 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
288 struct ccp_dma_cmd *cmd;
290 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
292 memset(cmd, 0, sizeof(*cmd));
297 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
300 struct ccp_dma_desc *desc;
302 desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
306 memset(desc, 0, sizeof(*desc));
308 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
309 desc->tx_desc.flags = flags;
310 desc->tx_desc.tx_submit = ccp_tx_submit;
311 desc->ccp = chan->ccp;
312 INIT_LIST_HEAD(&desc->pending);
313 INIT_LIST_HEAD(&desc->active);
314 desc->status = DMA_IN_PROGRESS;
319 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
320 struct scatterlist *dst_sg,
321 unsigned int dst_nents,
322 struct scatterlist *src_sg,
323 unsigned int src_nents,
326 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
328 struct ccp_device *ccp = chan->ccp;
329 struct ccp_dma_desc *desc;
330 struct ccp_dma_cmd *cmd;
331 struct ccp_cmd *ccp_cmd;
332 struct ccp_passthru_nomap_engine *ccp_pt;
333 unsigned int src_offset, src_len;
334 unsigned int dst_offset, dst_len;
336 unsigned long sflags;
339 if (!dst_sg || !src_sg)
342 if (!dst_nents || !src_nents)
345 desc = ccp_alloc_dma_desc(chan, flags);
351 src_len = sg_dma_len(src_sg);
354 dst_len = sg_dma_len(dst_sg);
363 src_sg = sg_next(src_sg);
367 src_len = sg_dma_len(src_sg);
377 dst_sg = sg_next(dst_sg);
381 dst_len = sg_dma_len(dst_sg);
386 len = min(dst_len, src_len);
388 cmd = ccp_alloc_dma_cmd(chan);
392 ccp_cmd = &cmd->ccp_cmd;
393 ccp_pt = &ccp_cmd->u.passthru_nomap;
394 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
395 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
396 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
397 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
398 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
399 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
400 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
401 ccp_pt->src_len = len;
403 ccp_cmd->callback = ccp_cmd_callback;
404 ccp_cmd->data = desc;
406 list_add_tail(&cmd->entry, &desc->pending);
409 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
410 cmd, &ccp_pt->src_dma,
411 &ccp_pt->dst_dma, ccp_pt->src_len);
422 desc->len = total_len;
424 if (list_empty(&desc->pending))
427 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
429 spin_lock_irqsave(&chan->lock, sflags);
431 list_add_tail(&desc->entry, &chan->pending);
433 spin_unlock_irqrestore(&chan->lock, sflags);
438 ccp_free_cmd_resources(ccp, &desc->pending);
439 kmem_cache_free(ccp->dma_desc_cache, desc);
444 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
445 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
448 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
450 struct ccp_dma_desc *desc;
451 struct scatterlist dst_sg, src_sg;
453 dev_dbg(chan->ccp->dev,
454 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
455 __func__, &src, &dst, len, flags);
457 sg_init_table(&dst_sg, 1);
458 sg_dma_address(&dst_sg) = dst;
459 sg_dma_len(&dst_sg) = len;
461 sg_init_table(&src_sg, 1);
462 sg_dma_address(&src_sg) = src;
463 sg_dma_len(&src_sg) = len;
465 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
469 return &desc->tx_desc;
472 static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
473 struct dma_chan *dma_chan, struct scatterlist *dst_sg,
474 unsigned int dst_nents, struct scatterlist *src_sg,
475 unsigned int src_nents, unsigned long flags)
477 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
479 struct ccp_dma_desc *desc;
481 dev_dbg(chan->ccp->dev,
482 "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
483 __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
485 desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
490 return &desc->tx_desc;
493 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
494 struct dma_chan *dma_chan, unsigned long flags)
496 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
498 struct ccp_dma_desc *desc;
500 desc = ccp_alloc_dma_desc(chan, flags);
504 return &desc->tx_desc;
507 static void ccp_issue_pending(struct dma_chan *dma_chan)
509 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
511 struct ccp_dma_desc *desc;
514 dev_dbg(chan->ccp->dev, "%s\n", __func__);
516 spin_lock_irqsave(&chan->lock, flags);
518 desc = __ccp_pending_to_active(chan);
520 spin_unlock_irqrestore(&chan->lock, flags);
522 /* If there was nothing active, start processing */
524 ccp_cmd_callback(desc, 0);
527 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
529 struct dma_tx_state *state)
531 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
533 struct ccp_dma_desc *desc;
537 if (chan->status == DMA_PAUSED) {
542 ret = dma_cookie_status(dma_chan, cookie, state);
543 if (ret == DMA_COMPLETE) {
544 spin_lock_irqsave(&chan->lock, flags);
546 /* Get status from complete chain, if still there */
547 list_for_each_entry(desc, &chan->complete, entry) {
548 if (desc->tx_desc.cookie != cookie)
555 spin_unlock_irqrestore(&chan->lock, flags);
559 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
564 static int ccp_pause(struct dma_chan *dma_chan)
566 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
569 chan->status = DMA_PAUSED;
571 /*TODO: Wait for active DMA to complete before returning? */
576 static int ccp_resume(struct dma_chan *dma_chan)
578 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
580 struct ccp_dma_desc *desc;
583 spin_lock_irqsave(&chan->lock, flags);
585 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
588 spin_unlock_irqrestore(&chan->lock, flags);
590 /* Indicate the channel is running again */
591 chan->status = DMA_IN_PROGRESS;
593 /* If there was something active, re-start */
595 ccp_cmd_callback(desc, 0);
600 static int ccp_terminate_all(struct dma_chan *dma_chan)
602 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
606 dev_dbg(chan->ccp->dev, "%s\n", __func__);
608 /*TODO: Wait for active DMA to complete before continuing */
610 spin_lock_irqsave(&chan->lock, flags);
612 /*TODO: Purge the complete list? */
613 ccp_free_desc_resources(chan->ccp, &chan->active);
614 ccp_free_desc_resources(chan->ccp, &chan->pending);
616 spin_unlock_irqrestore(&chan->lock, flags);
621 int ccp_dmaengine_register(struct ccp_device *ccp)
623 struct ccp_dma_chan *chan;
624 struct dma_device *dma_dev = &ccp->dma_dev;
625 struct dma_chan *dma_chan;
626 char *dma_cmd_cache_name;
627 char *dma_desc_cache_name;
631 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
632 sizeof(*(ccp->ccp_dma_chan)),
634 if (!ccp->ccp_dma_chan)
637 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
638 "%s-dmaengine-cmd-cache",
640 if (!dma_cmd_cache_name)
643 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
644 sizeof(struct ccp_dma_cmd),
646 SLAB_HWCACHE_ALIGN, NULL);
647 if (!ccp->dma_cmd_cache)
650 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
651 "%s-dmaengine-desc-cache",
653 if (!dma_cmd_cache_name)
655 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
656 sizeof(struct ccp_dma_desc),
658 SLAB_HWCACHE_ALIGN, NULL);
659 if (!ccp->dma_desc_cache) {
664 dma_dev->dev = ccp->dev;
665 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
666 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
667 dma_dev->directions = DMA_MEM_TO_MEM;
668 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
669 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
670 dma_cap_set(DMA_SG, dma_dev->cap_mask);
671 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
673 INIT_LIST_HEAD(&dma_dev->channels);
674 for (i = 0; i < ccp->cmd_q_count; i++) {
675 chan = ccp->ccp_dma_chan + i;
676 dma_chan = &chan->dma_chan;
680 spin_lock_init(&chan->lock);
681 INIT_LIST_HEAD(&chan->pending);
682 INIT_LIST_HEAD(&chan->active);
683 INIT_LIST_HEAD(&chan->complete);
685 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
686 (unsigned long)chan);
688 dma_chan->device = dma_dev;
689 dma_cookie_init(dma_chan);
691 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
694 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
695 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
696 dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
697 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
698 dma_dev->device_issue_pending = ccp_issue_pending;
699 dma_dev->device_tx_status = ccp_tx_status;
700 dma_dev->device_pause = ccp_pause;
701 dma_dev->device_resume = ccp_resume;
702 dma_dev->device_terminate_all = ccp_terminate_all;
704 ret = dma_async_device_register(dma_dev);
711 kmem_cache_destroy(ccp->dma_desc_cache);
714 kmem_cache_destroy(ccp->dma_cmd_cache);
719 void ccp_dmaengine_unregister(struct ccp_device *ccp)
721 struct dma_device *dma_dev = &ccp->dma_dev;
723 dma_async_device_unregister(dma_dev);
725 kmem_cache_destroy(ccp->dma_desc_cache);
726 kmem_cache_destroy(ccp->dma_cmd_cache);