4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/omap-dma.h>
30 #include <linux/pm_runtime.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
35 #include <linux/crypto.h>
36 #include <linux/interrupt.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/aes.h>
40 #define DST_MAXBURST 4
41 #define DMA_MIN (DST_MAXBURST * sizeof(u32))
43 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
45 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
46 number. For example 7:0 */
47 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
48 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
50 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
52 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
54 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
55 #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7)
56 #define AES_REG_CTRL_CTR_WIDTH_32 0
57 #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7)
58 #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8)
59 #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7)
60 #define AES_REG_CTRL_CTR BIT(6)
61 #define AES_REG_CTRL_CBC BIT(5)
62 #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3)
63 #define AES_REG_CTRL_DIRECTION BIT(2)
64 #define AES_REG_CTRL_INPUT_READY BIT(1)
65 #define AES_REG_CTRL_OUTPUT_READY BIT(0)
66 #define AES_REG_CTRL_MASK GENMASK(24, 2)
68 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
70 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
72 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
73 #define AES_REG_MASK_SIDLE BIT(6)
74 #define AES_REG_MASK_START BIT(5)
75 #define AES_REG_MASK_DMA_OUT_EN BIT(3)
76 #define AES_REG_MASK_DMA_IN_EN BIT(2)
77 #define AES_REG_MASK_SOFTRESET BIT(1)
78 #define AES_REG_AUTOIDLE BIT(0)
80 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
82 #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
83 #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
84 #define AES_REG_IRQ_DATA_IN BIT(1)
85 #define AES_REG_IRQ_DATA_OUT BIT(2)
86 #define DEFAULT_TIMEOUT (5*HZ)
88 #define FLAGS_MODE_MASK 0x000f
89 #define FLAGS_ENCRYPT BIT(0)
90 #define FLAGS_CBC BIT(1)
91 #define FLAGS_GIV BIT(2)
92 #define FLAGS_CTR BIT(3)
94 #define FLAGS_INIT BIT(4)
95 #define FLAGS_FAST BIT(5)
96 #define FLAGS_BUSY BIT(6)
98 #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
100 struct omap_aes_ctx {
101 struct omap_aes_dev *dd;
104 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
108 struct omap_aes_reqctx {
112 #define OMAP_AES_QUEUE_LENGTH 1
113 #define OMAP_AES_CACHE_SIZE 0
115 struct omap_aes_algs_info {
116 struct crypto_alg *algs_list;
118 unsigned int registered;
121 struct omap_aes_pdata {
122 struct omap_aes_algs_info *algs_info;
123 unsigned int algs_info_size;
125 void (*trigger)(struct omap_aes_dev *dd, int length);
146 struct omap_aes_dev {
147 struct list_head list;
148 unsigned long phys_base;
149 void __iomem *io_base;
150 struct omap_aes_ctx *ctx;
156 struct crypto_queue queue;
158 struct tasklet_struct done_task;
159 struct tasklet_struct queue_task;
161 struct ablkcipher_request *req;
164 * total is used by PIO mode for book keeping so introduce
165 * variable total_save as need it to calc page_order
170 struct scatterlist *in_sg;
171 struct scatterlist *out_sg;
173 /* Buffers for copying for unaligned cases */
174 struct scatterlist in_sgl;
175 struct scatterlist out_sgl;
176 struct scatterlist *orig_out;
179 struct scatter_walk in_walk;
180 struct scatter_walk out_walk;
182 struct dma_chan *dma_lch_in;
184 struct dma_chan *dma_lch_out;
188 const struct omap_aes_pdata *pdata;
191 /* keep registered devices data here */
192 static LIST_HEAD(dev_list);
193 static DEFINE_SPINLOCK(list_lock);
196 #define omap_aes_read(dd, offset) \
199 _read_ret = __raw_readl(dd->io_base + offset); \
200 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
201 offset, _read_ret); \
205 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
207 return __raw_readl(dd->io_base + offset);
212 #define omap_aes_write(dd, offset, value) \
214 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
216 __raw_writel(value, dd->io_base + offset); \
219 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
222 __raw_writel(value, dd->io_base + offset);
226 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
231 val = omap_aes_read(dd, offset);
234 omap_aes_write(dd, offset, val);
237 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
238 u32 *value, int count)
240 for (; count--; value++, offset += 4)
241 omap_aes_write(dd, offset, *value);
244 static int omap_aes_hw_init(struct omap_aes_dev *dd)
246 if (!(dd->flags & FLAGS_INIT)) {
247 dd->flags |= FLAGS_INIT;
254 static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
260 err = omap_aes_hw_init(dd);
264 key32 = dd->ctx->keylen / sizeof(u32);
266 /* it seems a key should always be set even if it has not changed */
267 for (i = 0; i < key32; i++) {
268 omap_aes_write(dd, AES_REG_KEY(dd, i),
269 __le32_to_cpu(dd->ctx->key[i]));
272 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
273 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
275 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
276 if (dd->flags & FLAGS_CBC)
277 val |= AES_REG_CTRL_CBC;
278 if (dd->flags & FLAGS_CTR)
279 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
281 if (dd->flags & FLAGS_ENCRYPT)
282 val |= AES_REG_CTRL_DIRECTION;
284 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
289 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
293 val = dd->pdata->dma_start;
295 if (dd->dma_lch_out != NULL)
296 val |= dd->pdata->dma_enable_out;
297 if (dd->dma_lch_in != NULL)
298 val |= dd->pdata->dma_enable_in;
300 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
301 dd->pdata->dma_start;
303 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
307 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
309 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
310 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
312 omap_aes_dma_trigger_omap2(dd, length);
315 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
319 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
320 dd->pdata->dma_start;
322 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
325 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
327 struct omap_aes_dev *dd = NULL, *tmp;
329 spin_lock_bh(&list_lock);
331 list_for_each_entry(tmp, &dev_list, list) {
332 /* FIXME: take fist available aes core */
338 /* already found before */
341 spin_unlock_bh(&list_lock);
346 static void omap_aes_dma_out_callback(void *data)
348 struct omap_aes_dev *dd = data;
350 /* dma_lch_out - completed */
351 tasklet_schedule(&dd->done_task);
354 static int omap_aes_dma_init(struct omap_aes_dev *dd)
359 dd->dma_lch_out = NULL;
360 dd->dma_lch_in = NULL;
363 dma_cap_set(DMA_SLAVE, mask);
365 dd->dma_lch_in = dma_request_slave_channel_compat(mask,
369 if (!dd->dma_lch_in) {
370 dev_err(dd->dev, "Unable to request in DMA channel\n");
374 dd->dma_lch_out = dma_request_slave_channel_compat(mask,
378 if (!dd->dma_lch_out) {
379 dev_err(dd->dev, "Unable to request out DMA channel\n");
386 dma_release_channel(dd->dma_lch_in);
389 pr_err("error: %d\n", err);
393 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
395 dma_release_channel(dd->dma_lch_out);
396 dma_release_channel(dd->dma_lch_in);
399 static void sg_copy_buf(void *buf, struct scatterlist *sg,
400 unsigned int start, unsigned int nbytes, int out)
402 struct scatter_walk walk;
407 scatterwalk_start(&walk, sg);
408 scatterwalk_advance(&walk, start);
409 scatterwalk_copychunks(buf, &walk, nbytes, out);
410 scatterwalk_done(&walk, out, 0);
413 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
414 struct scatterlist *in_sg, struct scatterlist *out_sg,
415 int in_sg_len, int out_sg_len)
417 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
418 struct omap_aes_dev *dd = ctx->dd;
419 struct dma_async_tx_descriptor *tx_in, *tx_out;
420 struct dma_slave_config cfg;
424 scatterwalk_start(&dd->in_walk, dd->in_sg);
425 scatterwalk_start(&dd->out_walk, dd->out_sg);
427 /* Enable DATAIN interrupt and let it take
429 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
433 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
435 memset(&cfg, 0, sizeof(cfg));
437 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
438 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
439 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
440 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
441 cfg.src_maxburst = DST_MAXBURST;
442 cfg.dst_maxburst = DST_MAXBURST;
445 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
447 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
452 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
454 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
456 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
460 /* No callback necessary */
461 tx_in->callback_param = dd;
464 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
466 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
471 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
473 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
475 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
479 tx_out->callback = omap_aes_dma_out_callback;
480 tx_out->callback_param = dd;
482 dmaengine_submit(tx_in);
483 dmaengine_submit(tx_out);
485 dma_async_issue_pending(dd->dma_lch_in);
486 dma_async_issue_pending(dd->dma_lch_out);
489 dd->pdata->trigger(dd, dd->total);
494 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
496 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
497 crypto_ablkcipher_reqtfm(dd->req));
500 pr_debug("total: %d\n", dd->total);
503 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
506 dev_err(dd->dev, "dma_map_sg() error\n");
510 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
513 dev_err(dd->dev, "dma_map_sg() error\n");
518 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
520 if (err && !dd->pio_only) {
521 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
522 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
529 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
531 struct ablkcipher_request *req = dd->req;
533 pr_debug("err: %d\n", err);
535 dd->flags &= ~FLAGS_BUSY;
537 req->base.complete(&req->base, err);
540 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
542 pr_debug("total: %d\n", dd->total);
544 omap_aes_dma_stop(dd);
546 dmaengine_terminate_all(dd->dma_lch_in);
547 dmaengine_terminate_all(dd->dma_lch_out);
552 static int omap_aes_check_aligned(struct scatterlist *sg, int total)
556 if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
560 if (!IS_ALIGNED(sg->offset, 4))
562 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
575 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
577 void *buf_in, *buf_out;
580 total = ALIGN(dd->total, AES_BLOCK_SIZE);
581 pages = get_order(total);
583 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
584 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
586 if (!buf_in || !buf_out) {
587 pr_err("Couldn't allocated pages for unaligned cases.\n");
591 dd->orig_out = dd->out_sg;
593 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
595 sg_init_table(&dd->in_sgl, 1);
596 sg_set_buf(&dd->in_sgl, buf_in, total);
597 dd->in_sg = &dd->in_sgl;
599 sg_init_table(&dd->out_sgl, 1);
600 sg_set_buf(&dd->out_sgl, buf_out, total);
601 dd->out_sg = &dd->out_sgl;
606 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
607 struct ablkcipher_request *req)
609 struct crypto_async_request *async_req, *backlog;
610 struct omap_aes_ctx *ctx;
611 struct omap_aes_reqctx *rctx;
613 int err, ret = 0, len;
615 spin_lock_irqsave(&dd->lock, flags);
617 ret = ablkcipher_enqueue_request(&dd->queue, req);
618 if (dd->flags & FLAGS_BUSY) {
619 spin_unlock_irqrestore(&dd->lock, flags);
622 backlog = crypto_get_backlog(&dd->queue);
623 async_req = crypto_dequeue_request(&dd->queue);
625 dd->flags |= FLAGS_BUSY;
626 spin_unlock_irqrestore(&dd->lock, flags);
632 backlog->complete(backlog, -EINPROGRESS);
634 req = ablkcipher_request_cast(async_req);
636 /* assign new request to device */
638 dd->total = req->nbytes;
639 dd->total_save = req->nbytes;
640 dd->in_sg = req->src;
641 dd->out_sg = req->dst;
643 if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
644 omap_aes_check_aligned(dd->out_sg, dd->total)) {
645 if (omap_aes_copy_sgs(dd))
646 pr_err("Failed to copy SGs for unaligned cases\n");
652 len = ALIGN(dd->total, AES_BLOCK_SIZE);
653 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
654 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
655 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
657 rctx = ablkcipher_request_ctx(req);
658 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
659 rctx->mode &= FLAGS_MODE_MASK;
660 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
665 err = omap_aes_write_ctrl(dd);
667 err = omap_aes_crypt_dma_start(dd);
669 /* aes_task will not finish it, so do it here */
670 omap_aes_finish_req(dd, err);
671 tasklet_schedule(&dd->queue_task);
674 return ret; /* return ret, which is enqueue return value */
677 static void omap_aes_done_task(unsigned long data)
679 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
680 void *buf_in, *buf_out;
683 pr_debug("enter done_task\n");
686 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
688 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
689 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
691 omap_aes_crypt_dma_stop(dd);
694 if (dd->sgs_copied) {
695 buf_in = sg_virt(&dd->in_sgl);
696 buf_out = sg_virt(&dd->out_sgl);
698 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
700 len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
701 pages = get_order(len);
702 free_pages((unsigned long)buf_in, pages);
703 free_pages((unsigned long)buf_out, pages);
706 omap_aes_finish_req(dd, 0);
707 omap_aes_handle_queue(dd, NULL);
712 static void omap_aes_queue_task(unsigned long data)
714 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
716 omap_aes_handle_queue(dd, NULL);
719 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
721 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
722 crypto_ablkcipher_reqtfm(req));
723 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
724 struct omap_aes_dev *dd;
726 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
727 !!(mode & FLAGS_ENCRYPT),
728 !!(mode & FLAGS_CBC));
730 dd = omap_aes_find_dev(ctx);
736 return omap_aes_handle_queue(dd, req);
739 /* ********************** ALG API ************************************ */
741 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
744 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
746 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
747 keylen != AES_KEYSIZE_256)
750 pr_debug("enter, keylen: %d\n", keylen);
752 memcpy(ctx->key, key, keylen);
753 ctx->keylen = keylen;
758 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
760 return omap_aes_crypt(req, FLAGS_ENCRYPT);
763 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
765 return omap_aes_crypt(req, 0);
768 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
770 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
773 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
775 return omap_aes_crypt(req, FLAGS_CBC);
778 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
780 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
783 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
785 return omap_aes_crypt(req, FLAGS_CTR);
788 static int omap_aes_cra_init(struct crypto_tfm *tfm)
790 struct omap_aes_dev *dd = NULL;
793 /* Find AES device, currently picks the first device */
794 spin_lock_bh(&list_lock);
795 list_for_each_entry(dd, &dev_list, list) {
798 spin_unlock_bh(&list_lock);
800 err = pm_runtime_get_sync(dd->dev);
802 dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
807 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
812 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
814 struct omap_aes_dev *dd = NULL;
816 /* Find AES device, currently picks the first device */
817 spin_lock_bh(&list_lock);
818 list_for_each_entry(dd, &dev_list, list) {
821 spin_unlock_bh(&list_lock);
823 pm_runtime_put_sync(dd->dev);
826 /* ********************** ALGS ************************************ */
828 static struct crypto_alg algs_ecb_cbc[] = {
830 .cra_name = "ecb(aes)",
831 .cra_driver_name = "ecb-aes-omap",
833 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
834 CRYPTO_ALG_KERN_DRIVER_ONLY |
836 .cra_blocksize = AES_BLOCK_SIZE,
837 .cra_ctxsize = sizeof(struct omap_aes_ctx),
839 .cra_type = &crypto_ablkcipher_type,
840 .cra_module = THIS_MODULE,
841 .cra_init = omap_aes_cra_init,
842 .cra_exit = omap_aes_cra_exit,
843 .cra_u.ablkcipher = {
844 .min_keysize = AES_MIN_KEY_SIZE,
845 .max_keysize = AES_MAX_KEY_SIZE,
846 .setkey = omap_aes_setkey,
847 .encrypt = omap_aes_ecb_encrypt,
848 .decrypt = omap_aes_ecb_decrypt,
852 .cra_name = "cbc(aes)",
853 .cra_driver_name = "cbc-aes-omap",
855 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
856 CRYPTO_ALG_KERN_DRIVER_ONLY |
858 .cra_blocksize = AES_BLOCK_SIZE,
859 .cra_ctxsize = sizeof(struct omap_aes_ctx),
861 .cra_type = &crypto_ablkcipher_type,
862 .cra_module = THIS_MODULE,
863 .cra_init = omap_aes_cra_init,
864 .cra_exit = omap_aes_cra_exit,
865 .cra_u.ablkcipher = {
866 .min_keysize = AES_MIN_KEY_SIZE,
867 .max_keysize = AES_MAX_KEY_SIZE,
868 .ivsize = AES_BLOCK_SIZE,
869 .setkey = omap_aes_setkey,
870 .encrypt = omap_aes_cbc_encrypt,
871 .decrypt = omap_aes_cbc_decrypt,
876 static struct crypto_alg algs_ctr[] = {
878 .cra_name = "ctr(aes)",
879 .cra_driver_name = "ctr-aes-omap",
881 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
882 CRYPTO_ALG_KERN_DRIVER_ONLY |
884 .cra_blocksize = AES_BLOCK_SIZE,
885 .cra_ctxsize = sizeof(struct omap_aes_ctx),
887 .cra_type = &crypto_ablkcipher_type,
888 .cra_module = THIS_MODULE,
889 .cra_init = omap_aes_cra_init,
890 .cra_exit = omap_aes_cra_exit,
891 .cra_u.ablkcipher = {
892 .min_keysize = AES_MIN_KEY_SIZE,
893 .max_keysize = AES_MAX_KEY_SIZE,
895 .ivsize = AES_BLOCK_SIZE,
896 .setkey = omap_aes_setkey,
897 .encrypt = omap_aes_ctr_encrypt,
898 .decrypt = omap_aes_ctr_decrypt,
903 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
905 .algs_list = algs_ecb_cbc,
906 .size = ARRAY_SIZE(algs_ecb_cbc),
910 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
911 .algs_info = omap_aes_algs_info_ecb_cbc,
912 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
913 .trigger = omap_aes_dma_trigger_omap2,
920 .dma_enable_in = BIT(2),
921 .dma_enable_out = BIT(3),
930 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
932 .algs_list = algs_ecb_cbc,
933 .size = ARRAY_SIZE(algs_ecb_cbc),
936 .algs_list = algs_ctr,
937 .size = ARRAY_SIZE(algs_ctr),
941 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
942 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
943 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
944 .trigger = omap_aes_dma_trigger_omap2,
951 .dma_enable_in = BIT(2),
952 .dma_enable_out = BIT(3),
960 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
961 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
962 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
963 .trigger = omap_aes_dma_trigger_omap4,
970 .irq_status_ofs = 0x8c,
971 .irq_enable_ofs = 0x90,
972 .dma_enable_in = BIT(5),
973 .dma_enable_out = BIT(6),
974 .major_mask = 0x0700,
976 .minor_mask = 0x003f,
980 static irqreturn_t omap_aes_irq(int irq, void *dev_id)
982 struct omap_aes_dev *dd = dev_id;
986 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
987 if (status & AES_REG_IRQ_DATA_IN) {
988 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
992 BUG_ON(_calc_walked(in) > dd->in_sg->length);
994 src = sg_virt(dd->in_sg) + _calc_walked(in);
996 for (i = 0; i < AES_BLOCK_WORDS; i++) {
997 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
999 scatterwalk_advance(&dd->in_walk, 4);
1000 if (dd->in_sg->length == _calc_walked(in)) {
1001 dd->in_sg = sg_next(dd->in_sg);
1003 scatterwalk_start(&dd->in_walk,
1005 src = sg_virt(dd->in_sg) +
1013 /* Clear IRQ status */
1014 status &= ~AES_REG_IRQ_DATA_IN;
1015 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1017 /* Enable DATA_OUT interrupt */
1018 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
1020 } else if (status & AES_REG_IRQ_DATA_OUT) {
1021 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
1023 BUG_ON(!dd->out_sg);
1025 BUG_ON(_calc_walked(out) > dd->out_sg->length);
1027 dst = sg_virt(dd->out_sg) + _calc_walked(out);
1029 for (i = 0; i < AES_BLOCK_WORDS; i++) {
1030 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
1031 scatterwalk_advance(&dd->out_walk, 4);
1032 if (dd->out_sg->length == _calc_walked(out)) {
1033 dd->out_sg = sg_next(dd->out_sg);
1035 scatterwalk_start(&dd->out_walk,
1037 dst = sg_virt(dd->out_sg) +
1045 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
1047 /* Clear IRQ status */
1048 status &= ~AES_REG_IRQ_DATA_OUT;
1049 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1052 /* All bytes read! */
1053 tasklet_schedule(&dd->done_task);
1055 /* Enable DATA_IN interrupt for next block */
1056 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1062 static const struct of_device_id omap_aes_of_match[] = {
1064 .compatible = "ti,omap2-aes",
1065 .data = &omap_aes_pdata_omap2,
1068 .compatible = "ti,omap3-aes",
1069 .data = &omap_aes_pdata_omap3,
1072 .compatible = "ti,omap4-aes",
1073 .data = &omap_aes_pdata_omap4,
1077 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
1079 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1080 struct device *dev, struct resource *res)
1082 struct device_node *node = dev->of_node;
1083 const struct of_device_id *match;
1086 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
1088 dev_err(dev, "no compatible OF match\n");
1093 err = of_address_to_resource(node, 0, res);
1095 dev_err(dev, "can't translate OF node address\n");
1100 dd->dma_out = -1; /* Dummy value that's unused */
1101 dd->dma_in = -1; /* Dummy value that's unused */
1103 dd->pdata = match->data;
1109 static const struct of_device_id omap_aes_of_match[] = {
1113 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1114 struct device *dev, struct resource *res)
1120 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1121 struct platform_device *pdev, struct resource *res)
1123 struct device *dev = &pdev->dev;
1127 /* Get the base address */
1128 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1130 dev_err(dev, "no MEM resource info\n");
1134 memcpy(res, r, sizeof(*res));
1136 /* Get the DMA out channel */
1137 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1139 dev_err(dev, "no DMA out resource info\n");
1143 dd->dma_out = r->start;
1145 /* Get the DMA in channel */
1146 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1148 dev_err(dev, "no DMA in resource info\n");
1152 dd->dma_in = r->start;
1154 /* Only OMAP2/3 can be non-DT */
1155 dd->pdata = &omap_aes_pdata_omap2;
1161 static int omap_aes_probe(struct platform_device *pdev)
1163 struct device *dev = &pdev->dev;
1164 struct omap_aes_dev *dd;
1165 struct crypto_alg *algp;
1166 struct resource res;
1167 int err = -ENOMEM, i, j, irq = -1;
1170 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1172 dev_err(dev, "unable to alloc data struct.\n");
1176 platform_set_drvdata(pdev, dd);
1178 spin_lock_init(&dd->lock);
1179 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
1181 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1182 omap_aes_get_res_pdev(dd, pdev, &res);
1186 dd->io_base = devm_ioremap_resource(dev, &res);
1187 if (IS_ERR(dd->io_base)) {
1188 err = PTR_ERR(dd->io_base);
1191 dd->phys_base = res.start;
1193 pm_runtime_enable(dev);
1194 err = pm_runtime_get_sync(dev);
1196 dev_err(dev, "%s: failed to get_sync(%d)\n",
1201 omap_aes_dma_stop(dd);
1203 reg = omap_aes_read(dd, AES_REG_REV(dd));
1205 pm_runtime_put_sync(dev);
1207 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1208 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1209 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1211 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1212 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1214 err = omap_aes_dma_init(dd);
1215 if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1218 irq = platform_get_irq(pdev, 0);
1220 dev_err(dev, "can't get IRQ resource\n");
1224 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1227 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1233 INIT_LIST_HEAD(&dd->list);
1234 spin_lock(&list_lock);
1235 list_add_tail(&dd->list, &dev_list);
1236 spin_unlock(&list_lock);
1238 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1239 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1240 algp = &dd->pdata->algs_info[i].algs_list[j];
1242 pr_debug("reg alg: %s\n", algp->cra_name);
1243 INIT_LIST_HEAD(&algp->cra_list);
1245 err = crypto_register_alg(algp);
1249 dd->pdata->algs_info[i].registered++;
1255 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1256 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1257 crypto_unregister_alg(
1258 &dd->pdata->algs_info[i].algs_list[j]);
1260 omap_aes_dma_cleanup(dd);
1262 tasklet_kill(&dd->done_task);
1263 tasklet_kill(&dd->queue_task);
1264 pm_runtime_disable(dev);
1268 dev_err(dev, "initialization failed.\n");
1272 static int omap_aes_remove(struct platform_device *pdev)
1274 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1280 spin_lock(&list_lock);
1281 list_del(&dd->list);
1282 spin_unlock(&list_lock);
1284 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1285 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1286 crypto_unregister_alg(
1287 &dd->pdata->algs_info[i].algs_list[j]);
1289 tasklet_kill(&dd->done_task);
1290 tasklet_kill(&dd->queue_task);
1291 omap_aes_dma_cleanup(dd);
1292 pm_runtime_disable(dd->dev);
1298 #ifdef CONFIG_PM_SLEEP
1299 static int omap_aes_suspend(struct device *dev)
1301 pm_runtime_put_sync(dev);
1305 static int omap_aes_resume(struct device *dev)
1307 pm_runtime_get_sync(dev);
1312 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1314 static struct platform_driver omap_aes_driver = {
1315 .probe = omap_aes_probe,
1316 .remove = omap_aes_remove,
1319 .pm = &omap_aes_pm_ops,
1320 .of_match_table = omap_aes_of_match,
1324 module_platform_driver(omap_aes_driver);
1326 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1327 MODULE_LICENSE("GPL v2");
1328 MODULE_AUTHOR("Dmitry Kasatkin");