4 * Support for ATMEL SHA1/SHA256 HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-sham.c drivers.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/of_device.h>
34 #include <linux/delay.h>
35 #include <linux/crypto.h>
36 #include <linux/cryptohash.h>
37 #include <crypto/scatterwalk.h>
38 #include <crypto/algapi.h>
39 #include <crypto/sha.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include "atmel-sha-regs.h"
46 #define SHA_FLAGS_BUSY BIT(0)
47 #define SHA_FLAGS_FINAL BIT(1)
48 #define SHA_FLAGS_DMA_ACTIVE BIT(2)
49 #define SHA_FLAGS_OUTPUT_READY BIT(3)
50 #define SHA_FLAGS_INIT BIT(4)
51 #define SHA_FLAGS_CPU BIT(5)
52 #define SHA_FLAGS_DMA_READY BIT(6)
54 #define SHA_FLAGS_FINUP BIT(16)
55 #define SHA_FLAGS_SG BIT(17)
56 #define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
57 #define SHA_FLAGS_SHA1 BIT(18)
58 #define SHA_FLAGS_SHA224 BIT(19)
59 #define SHA_FLAGS_SHA256 BIT(20)
60 #define SHA_FLAGS_SHA384 BIT(21)
61 #define SHA_FLAGS_SHA512 BIT(22)
62 #define SHA_FLAGS_ERROR BIT(23)
63 #define SHA_FLAGS_PAD BIT(24)
64 #define SHA_FLAGS_RESTORE BIT(25)
66 #define SHA_OP_UPDATE 1
67 #define SHA_OP_FINAL 2
69 #define SHA_BUFFER_LEN (PAGE_SIZE / 16)
71 #define ATMEL_SHA_DMA_THRESHOLD 56
73 struct atmel_sha_caps {
84 * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
85 * tested by the ahash_prepare_alg() function.
87 struct atmel_sha_state {
88 u8 digest[SHA512_DIGEST_SIZE];
89 u8 buffer[SHA_BUFFER_LEN];
94 struct atmel_sha_reqctx {
95 struct atmel_sha_dev *dd;
99 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
106 struct scatterlist *sg;
107 unsigned int offset; /* offset in current sg */
108 unsigned int total; /* total request */
112 u8 buffer[0] __aligned(sizeof(u32));
115 struct atmel_sha_ctx {
116 struct atmel_sha_dev *dd;
121 #define ATMEL_SHA_QUEUE_LENGTH 50
123 struct atmel_sha_dma {
124 struct dma_chan *chan;
125 struct dma_slave_config dma_conf;
128 struct atmel_sha_dev {
129 struct list_head list;
130 unsigned long phys_base;
134 void __iomem *io_base;
138 struct tasklet_struct done_task;
139 struct tasklet_struct queue_task;
142 struct crypto_queue queue;
143 struct ahash_request *req;
145 struct atmel_sha_dma dma_lch_in;
147 struct atmel_sha_caps caps;
152 struct atmel_sha_drv {
153 struct list_head dev_list;
157 static struct atmel_sha_drv atmel_sha = {
158 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
159 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
162 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
164 return readl_relaxed(dd->io_base + offset);
167 static inline void atmel_sha_write(struct atmel_sha_dev *dd,
168 u32 offset, u32 value)
170 writel_relaxed(value, dd->io_base + offset);
173 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
177 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
178 count = min(ctx->sg->length - ctx->offset, ctx->total);
179 count = min(count, ctx->buflen - ctx->bufcnt);
183 * Check if count <= 0 because the buffer is full or
184 * because the sg length is 0. In the latest case,
185 * check if there is another sg in the list, a 0 length
186 * sg doesn't necessarily mean the end of the sg list.
188 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
189 ctx->sg = sg_next(ctx->sg);
196 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
197 ctx->offset, count, 0);
199 ctx->bufcnt += count;
200 ctx->offset += count;
203 if (ctx->offset == ctx->sg->length) {
204 ctx->sg = sg_next(ctx->sg);
216 * The purpose of this padding is to ensure that the padded message is a
217 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
218 * The bit "1" is appended at the end of the message followed by
219 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
220 * 128 bits block (SHA384/SHA512) equals to the message length in bits
223 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
224 * - if message length < 56 bytes then padlen = 56 - message length
225 * - else padlen = 64 + 56 - message length
227 * For SHA384/SHA512, padlen is calculated as followed:
228 * - if message length < 112 bytes then padlen = 112 - message length
229 * - else padlen = 128 + 112 - message length
231 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
233 unsigned int index, padlen;
237 size[0] = ctx->digcnt[0];
238 size[1] = ctx->digcnt[1];
240 size[0] += ctx->bufcnt;
241 if (size[0] < ctx->bufcnt)
245 if (size[0] < length)
248 bits[1] = cpu_to_be64(size[0] << 3);
249 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
251 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
252 index = ctx->bufcnt & 0x7f;
253 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
254 *(ctx->buffer + ctx->bufcnt) = 0x80;
255 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
256 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
257 ctx->bufcnt += padlen + 16;
258 ctx->flags |= SHA_FLAGS_PAD;
260 index = ctx->bufcnt & 0x3f;
261 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
262 *(ctx->buffer + ctx->bufcnt) = 0x80;
263 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
264 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
265 ctx->bufcnt += padlen + 8;
266 ctx->flags |= SHA_FLAGS_PAD;
270 static int atmel_sha_init(struct ahash_request *req)
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
274 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
275 struct atmel_sha_dev *dd = NULL;
276 struct atmel_sha_dev *tmp;
278 spin_lock_bh(&atmel_sha.lock);
280 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
289 spin_unlock_bh(&atmel_sha.lock);
295 dev_dbg(dd->dev, "init: digest size: %d\n",
296 crypto_ahash_digestsize(tfm));
298 switch (crypto_ahash_digestsize(tfm)) {
299 case SHA1_DIGEST_SIZE:
300 ctx->flags |= SHA_FLAGS_SHA1;
301 ctx->block_size = SHA1_BLOCK_SIZE;
303 case SHA224_DIGEST_SIZE:
304 ctx->flags |= SHA_FLAGS_SHA224;
305 ctx->block_size = SHA224_BLOCK_SIZE;
307 case SHA256_DIGEST_SIZE:
308 ctx->flags |= SHA_FLAGS_SHA256;
309 ctx->block_size = SHA256_BLOCK_SIZE;
311 case SHA384_DIGEST_SIZE:
312 ctx->flags |= SHA_FLAGS_SHA384;
313 ctx->block_size = SHA384_BLOCK_SIZE;
315 case SHA512_DIGEST_SIZE:
316 ctx->flags |= SHA_FLAGS_SHA512;
317 ctx->block_size = SHA512_BLOCK_SIZE;
327 ctx->buflen = SHA_BUFFER_LEN;
332 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
334 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
335 u32 valmr = SHA_MR_MODE_AUTO;
336 unsigned int i, hashsize = 0;
339 if (!dd->caps.has_dma)
340 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
341 valmr = SHA_MR_MODE_PDC;
342 if (dd->caps.has_dualbuff)
343 valmr |= SHA_MR_DUALBUFF;
345 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
348 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
350 valmr |= SHA_MR_ALGO_SHA1;
351 hashsize = SHA1_DIGEST_SIZE;
354 case SHA_FLAGS_SHA224:
355 valmr |= SHA_MR_ALGO_SHA224;
356 hashsize = SHA256_DIGEST_SIZE;
359 case SHA_FLAGS_SHA256:
360 valmr |= SHA_MR_ALGO_SHA256;
361 hashsize = SHA256_DIGEST_SIZE;
364 case SHA_FLAGS_SHA384:
365 valmr |= SHA_MR_ALGO_SHA384;
366 hashsize = SHA512_DIGEST_SIZE;
369 case SHA_FLAGS_SHA512:
370 valmr |= SHA_MR_ALGO_SHA512;
371 hashsize = SHA512_DIGEST_SIZE;
378 /* Setting CR_FIRST only for the first iteration */
379 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
380 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
381 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
382 const u32 *hash = (const u32 *)ctx->digest;
385 * Restore the hardware context: update the User Initialize
386 * Hash Value (UIHV) with the value saved when the latest
387 * 'update' operation completed on this very same crypto
390 ctx->flags &= ~SHA_FLAGS_RESTORE;
391 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
392 for (i = 0; i < hashsize / sizeof(u32); ++i)
393 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
394 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
395 valmr |= SHA_MR_UIHV;
398 * WARNING: If the UIHV feature is not available, the hardware CANNOT
399 * process concurrent requests: the internal registers used to store
400 * the hash/digest are still set to the partial digest output values
401 * computed during the latest round.
404 atmel_sha_write(dd, SHA_MR, valmr);
407 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
408 size_t length, int final)
410 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
412 const u32 *buffer = (const u32 *)buf;
414 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
415 ctx->digcnt[1], ctx->digcnt[0], length, final);
417 atmel_sha_write_ctrl(dd, 0);
419 /* should be non-zero before next lines to disable clocks later */
420 ctx->digcnt[0] += length;
421 if (ctx->digcnt[0] < length)
425 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
427 len32 = DIV_ROUND_UP(length, sizeof(u32));
429 dd->flags |= SHA_FLAGS_CPU;
431 for (count = 0; count < len32; count++)
432 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
437 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
438 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
440 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
443 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
444 ctx->digcnt[1], ctx->digcnt[0], length1, final);
446 len32 = DIV_ROUND_UP(length1, sizeof(u32));
447 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
448 atmel_sha_write(dd, SHA_TPR, dma_addr1);
449 atmel_sha_write(dd, SHA_TCR, len32);
451 len32 = DIV_ROUND_UP(length2, sizeof(u32));
452 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
453 atmel_sha_write(dd, SHA_TNCR, len32);
455 atmel_sha_write_ctrl(dd, 1);
457 /* should be non-zero before next lines to disable clocks later */
458 ctx->digcnt[0] += length1;
459 if (ctx->digcnt[0] < length1)
463 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
465 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
467 /* Start DMA transfer */
468 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
473 static void atmel_sha_dma_callback(void *data)
475 struct atmel_sha_dev *dd = data;
477 /* dma_lch_in - completed - wait DATRDY */
478 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
481 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
482 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
484 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
485 struct dma_async_tx_descriptor *in_desc;
486 struct scatterlist sg[2];
488 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
489 ctx->digcnt[1], ctx->digcnt[0], length1, final);
491 dd->dma_lch_in.dma_conf.src_maxburst = 16;
492 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
494 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
497 sg_init_table(sg, 2);
498 sg_dma_address(&sg[0]) = dma_addr1;
499 sg_dma_len(&sg[0]) = length1;
500 sg_dma_address(&sg[1]) = dma_addr2;
501 sg_dma_len(&sg[1]) = length2;
502 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
503 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
505 sg_init_table(sg, 1);
506 sg_dma_address(&sg[0]) = dma_addr1;
507 sg_dma_len(&sg[0]) = length1;
508 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
509 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
514 in_desc->callback = atmel_sha_dma_callback;
515 in_desc->callback_param = dd;
517 atmel_sha_write_ctrl(dd, 1);
519 /* should be non-zero before next lines to disable clocks later */
520 ctx->digcnt[0] += length1;
521 if (ctx->digcnt[0] < length1)
525 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
527 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
529 /* Start DMA transfer */
530 dmaengine_submit(in_desc);
531 dma_async_issue_pending(dd->dma_lch_in.chan);
536 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
537 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
539 if (dd->caps.has_dma)
540 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
541 dma_addr2, length2, final);
543 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
544 dma_addr2, length2, final);
547 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
549 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
552 atmel_sha_append_sg(ctx);
553 atmel_sha_fill_padding(ctx, 0);
554 bufcnt = ctx->bufcnt;
557 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
560 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
561 struct atmel_sha_reqctx *ctx,
562 size_t length, int final)
564 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
565 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
566 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
567 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
572 ctx->flags &= ~SHA_FLAGS_SG;
574 /* next call does not fail... so no unmap in the case of error */
575 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
578 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
580 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
584 atmel_sha_append_sg(ctx);
586 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
588 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
589 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
592 atmel_sha_fill_padding(ctx, 0);
594 if (final || (ctx->bufcnt == ctx->buflen)) {
597 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
603 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
605 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
606 unsigned int length, final, tail;
607 struct scatterlist *sg;
613 if (ctx->bufcnt || ctx->offset)
614 return atmel_sha_update_dma_slow(dd);
616 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
617 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
621 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
622 return atmel_sha_update_dma_slow(dd);
624 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
625 /* size is not ctx->block_size aligned */
626 return atmel_sha_update_dma_slow(dd);
628 length = min(ctx->total, sg->length);
630 if (sg_is_last(sg)) {
631 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
632 /* not last sg must be ctx->block_size aligned */
633 tail = length & (ctx->block_size - 1);
638 ctx->total -= length;
639 ctx->offset = length; /* offset where to start slow */
641 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
645 tail = length & (ctx->block_size - 1);
648 ctx->offset = length; /* offset where to start slow */
651 atmel_sha_append_sg(ctx);
653 atmel_sha_fill_padding(ctx, length);
655 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
656 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
657 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
658 dev_err(dd->dev, "dma %u bytes error\n",
659 ctx->buflen + ctx->block_size);
664 ctx->flags &= ~SHA_FLAGS_SG;
667 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
671 if (!dma_map_sg(dd->dev, ctx->sg, 1,
673 dev_err(dd->dev, "dma_map_sg error\n");
677 ctx->flags |= SHA_FLAGS_SG;
681 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
682 length, ctx->dma_addr, count, final);
686 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
687 dev_err(dd->dev, "dma_map_sg error\n");
691 ctx->flags |= SHA_FLAGS_SG;
693 /* next call does not fail... so no unmap in the case of error */
694 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
698 static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
700 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
702 if (ctx->flags & SHA_FLAGS_SG) {
703 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
704 if (ctx->sg->length == ctx->offset) {
705 ctx->sg = sg_next(ctx->sg);
709 if (ctx->flags & SHA_FLAGS_PAD) {
710 dma_unmap_single(dd->dev, ctx->dma_addr,
711 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
714 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
715 ctx->block_size, DMA_TO_DEVICE);
721 static int atmel_sha_update_req(struct atmel_sha_dev *dd)
723 struct ahash_request *req = dd->req;
724 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
727 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
728 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
730 if (ctx->flags & SHA_FLAGS_CPU)
731 err = atmel_sha_update_cpu(dd);
733 err = atmel_sha_update_dma_start(dd);
735 /* wait for dma completion before can take more data */
736 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
737 err, ctx->digcnt[1], ctx->digcnt[0]);
742 static int atmel_sha_final_req(struct atmel_sha_dev *dd)
744 struct ahash_request *req = dd->req;
745 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
749 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
750 atmel_sha_fill_padding(ctx, 0);
753 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
755 /* faster to handle last block with cpu */
757 atmel_sha_fill_padding(ctx, 0);
760 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
763 dev_dbg(dd->dev, "final_req: err: %d\n", err);
768 static void atmel_sha_copy_hash(struct ahash_request *req)
770 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
771 u32 *hash = (u32 *)ctx->digest;
772 unsigned int i, hashsize;
774 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
776 hashsize = SHA1_DIGEST_SIZE;
779 case SHA_FLAGS_SHA224:
780 case SHA_FLAGS_SHA256:
781 hashsize = SHA256_DIGEST_SIZE;
784 case SHA_FLAGS_SHA384:
785 case SHA_FLAGS_SHA512:
786 hashsize = SHA512_DIGEST_SIZE;
790 /* Should not happen... */
794 for (i = 0; i < hashsize / sizeof(u32); ++i)
795 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
796 ctx->flags |= SHA_FLAGS_RESTORE;
799 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
801 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
806 if (ctx->flags & SHA_FLAGS_SHA1)
807 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
808 else if (ctx->flags & SHA_FLAGS_SHA224)
809 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
810 else if (ctx->flags & SHA_FLAGS_SHA256)
811 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
812 else if (ctx->flags & SHA_FLAGS_SHA384)
813 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
815 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
818 static int atmel_sha_finish(struct ahash_request *req)
820 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
821 struct atmel_sha_dev *dd = ctx->dd;
823 if (ctx->digcnt[0] || ctx->digcnt[1])
824 atmel_sha_copy_ready_hash(req);
826 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
827 ctx->digcnt[0], ctx->bufcnt);
832 static void atmel_sha_finish_req(struct ahash_request *req, int err)
834 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
835 struct atmel_sha_dev *dd = ctx->dd;
838 atmel_sha_copy_hash(req);
839 if (SHA_FLAGS_FINAL & dd->flags)
840 err = atmel_sha_finish(req);
842 ctx->flags |= SHA_FLAGS_ERROR;
845 /* atomic operation is not needed here */
846 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
847 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
849 clk_disable(dd->iclk);
851 if (req->base.complete)
852 req->base.complete(&req->base, err);
854 /* handle new request */
855 tasklet_schedule(&dd->queue_task);
858 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
862 err = clk_enable(dd->iclk);
866 if (!(SHA_FLAGS_INIT & dd->flags)) {
867 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
868 dd->flags |= SHA_FLAGS_INIT;
875 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
877 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
880 static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
882 atmel_sha_hw_init(dd);
884 dd->hw_version = atmel_sha_get_version(dd);
887 "version: 0x%x\n", dd->hw_version);
889 clk_disable(dd->iclk);
892 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
893 struct ahash_request *req)
895 struct crypto_async_request *async_req, *backlog;
896 struct atmel_sha_reqctx *ctx;
898 int err = 0, ret = 0;
900 spin_lock_irqsave(&dd->lock, flags);
902 ret = ahash_enqueue_request(&dd->queue, req);
904 if (SHA_FLAGS_BUSY & dd->flags) {
905 spin_unlock_irqrestore(&dd->lock, flags);
909 backlog = crypto_get_backlog(&dd->queue);
910 async_req = crypto_dequeue_request(&dd->queue);
912 dd->flags |= SHA_FLAGS_BUSY;
914 spin_unlock_irqrestore(&dd->lock, flags);
920 backlog->complete(backlog, -EINPROGRESS);
922 req = ahash_request_cast(async_req);
924 ctx = ahash_request_ctx(req);
926 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
927 ctx->op, req->nbytes);
929 err = atmel_sha_hw_init(dd);
934 if (ctx->op == SHA_OP_UPDATE) {
935 err = atmel_sha_update_req(dd);
936 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
937 /* no final() after finup() */
938 err = atmel_sha_final_req(dd);
939 } else if (ctx->op == SHA_OP_FINAL) {
940 err = atmel_sha_final_req(dd);
944 if (err != -EINPROGRESS)
945 /* done_task will not finish it, so do it here */
946 atmel_sha_finish_req(req, err);
948 dev_dbg(dd->dev, "exit, err: %d\n", err);
953 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
956 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
957 struct atmel_sha_dev *dd = tctx->dd;
961 return atmel_sha_handle_queue(dd, req);
964 static int atmel_sha_update(struct ahash_request *req)
966 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
971 ctx->total = req->nbytes;
975 if (ctx->flags & SHA_FLAGS_FINUP) {
976 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
977 /* faster to use CPU for short transfers */
978 ctx->flags |= SHA_FLAGS_CPU;
979 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
980 atmel_sha_append_sg(ctx);
983 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
986 static int atmel_sha_final(struct ahash_request *req)
988 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
989 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
990 struct atmel_sha_dev *dd = tctx->dd;
994 ctx->flags |= SHA_FLAGS_FINUP;
996 if (ctx->flags & SHA_FLAGS_ERROR)
997 return 0; /* uncompleted hash is not needed */
1000 return atmel_sha_enqueue(req, SHA_OP_FINAL);
1001 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
1002 err = atmel_sha_hw_init(dd);
1007 dd->flags |= SHA_FLAGS_BUSY;
1008 err = atmel_sha_final_req(dd);
1010 /* copy ready hash (+ finalize hmac) */
1011 return atmel_sha_finish(req);
1015 if (err != -EINPROGRESS)
1016 /* done_task will not finish it, so do it here */
1017 atmel_sha_finish_req(req, err);
1022 static int atmel_sha_finup(struct ahash_request *req)
1024 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1027 ctx->flags |= SHA_FLAGS_FINUP;
1029 err1 = atmel_sha_update(req);
1030 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1034 * final() has to be always called to cleanup resources
1035 * even if udpate() failed, except EINPROGRESS
1037 err2 = atmel_sha_final(req);
1039 return err1 ?: err2;
1042 static int atmel_sha_digest(struct ahash_request *req)
1044 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1048 static int atmel_sha_export(struct ahash_request *req, void *out)
1050 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1051 struct atmel_sha_state state;
1053 memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
1054 memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
1055 state.bufcnt = ctx->bufcnt;
1056 state.digcnt[0] = ctx->digcnt[0];
1057 state.digcnt[1] = ctx->digcnt[1];
1059 /* out might be unaligned. */
1060 memcpy(out, &state, sizeof(state));
1064 static int atmel_sha_import(struct ahash_request *req, const void *in)
1066 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1067 struct atmel_sha_state state;
1069 /* in might be unaligned. */
1070 memcpy(&state, in, sizeof(state));
1072 memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
1073 memcpy(ctx->buffer, state.buffer, state.bufcnt);
1074 ctx->bufcnt = state.bufcnt;
1075 ctx->digcnt[0] = state.digcnt[0];
1076 ctx->digcnt[1] = state.digcnt[1];
1080 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
1082 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1083 sizeof(struct atmel_sha_reqctx) +
1084 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
1089 static struct ahash_alg sha_1_256_algs[] = {
1091 .init = atmel_sha_init,
1092 .update = atmel_sha_update,
1093 .final = atmel_sha_final,
1094 .finup = atmel_sha_finup,
1095 .digest = atmel_sha_digest,
1096 .export = atmel_sha_export,
1097 .import = atmel_sha_import,
1099 .digestsize = SHA1_DIGEST_SIZE,
1100 .statesize = sizeof(struct atmel_sha_state),
1103 .cra_driver_name = "atmel-sha1",
1104 .cra_priority = 100,
1105 .cra_flags = CRYPTO_ALG_ASYNC,
1106 .cra_blocksize = SHA1_BLOCK_SIZE,
1107 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1109 .cra_module = THIS_MODULE,
1110 .cra_init = atmel_sha_cra_init,
1115 .init = atmel_sha_init,
1116 .update = atmel_sha_update,
1117 .final = atmel_sha_final,
1118 .finup = atmel_sha_finup,
1119 .digest = atmel_sha_digest,
1120 .export = atmel_sha_export,
1121 .import = atmel_sha_import,
1123 .digestsize = SHA256_DIGEST_SIZE,
1124 .statesize = sizeof(struct atmel_sha_state),
1126 .cra_name = "sha256",
1127 .cra_driver_name = "atmel-sha256",
1128 .cra_priority = 100,
1129 .cra_flags = CRYPTO_ALG_ASYNC,
1130 .cra_blocksize = SHA256_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1133 .cra_module = THIS_MODULE,
1134 .cra_init = atmel_sha_cra_init,
1140 static struct ahash_alg sha_224_alg = {
1141 .init = atmel_sha_init,
1142 .update = atmel_sha_update,
1143 .final = atmel_sha_final,
1144 .finup = atmel_sha_finup,
1145 .digest = atmel_sha_digest,
1146 .export = atmel_sha_export,
1147 .import = atmel_sha_import,
1149 .digestsize = SHA224_DIGEST_SIZE,
1150 .statesize = sizeof(struct atmel_sha_state),
1152 .cra_name = "sha224",
1153 .cra_driver_name = "atmel-sha224",
1154 .cra_priority = 100,
1155 .cra_flags = CRYPTO_ALG_ASYNC,
1156 .cra_blocksize = SHA224_BLOCK_SIZE,
1157 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_sha_cra_init,
1165 static struct ahash_alg sha_384_512_algs[] = {
1167 .init = atmel_sha_init,
1168 .update = atmel_sha_update,
1169 .final = atmel_sha_final,
1170 .finup = atmel_sha_finup,
1171 .digest = atmel_sha_digest,
1172 .export = atmel_sha_export,
1173 .import = atmel_sha_import,
1175 .digestsize = SHA384_DIGEST_SIZE,
1176 .statesize = sizeof(struct atmel_sha_state),
1178 .cra_name = "sha384",
1179 .cra_driver_name = "atmel-sha384",
1180 .cra_priority = 100,
1181 .cra_flags = CRYPTO_ALG_ASYNC,
1182 .cra_blocksize = SHA384_BLOCK_SIZE,
1183 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1184 .cra_alignmask = 0x3,
1185 .cra_module = THIS_MODULE,
1186 .cra_init = atmel_sha_cra_init,
1191 .init = atmel_sha_init,
1192 .update = atmel_sha_update,
1193 .final = atmel_sha_final,
1194 .finup = atmel_sha_finup,
1195 .digest = atmel_sha_digest,
1196 .export = atmel_sha_export,
1197 .import = atmel_sha_import,
1199 .digestsize = SHA512_DIGEST_SIZE,
1200 .statesize = sizeof(struct atmel_sha_state),
1202 .cra_name = "sha512",
1203 .cra_driver_name = "atmel-sha512",
1204 .cra_priority = 100,
1205 .cra_flags = CRYPTO_ALG_ASYNC,
1206 .cra_blocksize = SHA512_BLOCK_SIZE,
1207 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1208 .cra_alignmask = 0x3,
1209 .cra_module = THIS_MODULE,
1210 .cra_init = atmel_sha_cra_init,
1216 static void atmel_sha_queue_task(unsigned long data)
1218 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1220 atmel_sha_handle_queue(dd, NULL);
1223 static void atmel_sha_done_task(unsigned long data)
1225 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1228 if (SHA_FLAGS_CPU & dd->flags) {
1229 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1230 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1233 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1234 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1235 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1236 atmel_sha_update_dma_stop(dd);
1242 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1243 /* hash or semi-hash ready */
1244 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1245 SHA_FLAGS_OUTPUT_READY);
1246 err = atmel_sha_update_dma_start(dd);
1247 if (err != -EINPROGRESS)
1254 /* finish curent request */
1255 atmel_sha_finish_req(dd->req, err);
1258 static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1260 struct atmel_sha_dev *sha_dd = dev_id;
1263 reg = atmel_sha_read(sha_dd, SHA_ISR);
1264 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1265 atmel_sha_write(sha_dd, SHA_IDR, reg);
1266 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1267 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1268 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1269 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1270 tasklet_schedule(&sha_dd->done_task);
1272 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1280 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1284 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1285 crypto_unregister_ahash(&sha_1_256_algs[i]);
1287 if (dd->caps.has_sha224)
1288 crypto_unregister_ahash(&sha_224_alg);
1290 if (dd->caps.has_sha_384_512) {
1291 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1292 crypto_unregister_ahash(&sha_384_512_algs[i]);
1296 static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1300 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1301 err = crypto_register_ahash(&sha_1_256_algs[i]);
1303 goto err_sha_1_256_algs;
1306 if (dd->caps.has_sha224) {
1307 err = crypto_register_ahash(&sha_224_alg);
1309 goto err_sha_224_algs;
1312 if (dd->caps.has_sha_384_512) {
1313 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1314 err = crypto_register_ahash(&sha_384_512_algs[i]);
1316 goto err_sha_384_512_algs;
1322 err_sha_384_512_algs:
1323 for (j = 0; j < i; j++)
1324 crypto_unregister_ahash(&sha_384_512_algs[j]);
1325 crypto_unregister_ahash(&sha_224_alg);
1327 i = ARRAY_SIZE(sha_1_256_algs);
1329 for (j = 0; j < i; j++)
1330 crypto_unregister_ahash(&sha_1_256_algs[j]);
1335 static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1337 struct at_dma_slave *sl = slave;
1339 if (sl && sl->dma_dev == chan->device->dev) {
1347 static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1348 struct crypto_platform_data *pdata)
1351 dma_cap_mask_t mask_in;
1353 /* Try to grab DMA channel */
1354 dma_cap_zero(mask_in);
1355 dma_cap_set(DMA_SLAVE, mask_in);
1357 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1358 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1359 if (!dd->dma_lch_in.chan) {
1360 dev_warn(dd->dev, "no DMA channel available\n");
1364 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1365 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1367 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1368 dd->dma_lch_in.dma_conf.src_addr_width =
1369 DMA_SLAVE_BUSWIDTH_4_BYTES;
1370 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1371 dd->dma_lch_in.dma_conf.dst_addr_width =
1372 DMA_SLAVE_BUSWIDTH_4_BYTES;
1373 dd->dma_lch_in.dma_conf.device_fc = false;
1378 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1380 dma_release_channel(dd->dma_lch_in.chan);
1383 static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1386 dd->caps.has_dma = 0;
1387 dd->caps.has_dualbuff = 0;
1388 dd->caps.has_sha224 = 0;
1389 dd->caps.has_sha_384_512 = 0;
1390 dd->caps.has_uihv = 0;
1392 /* keep only major version number */
1393 switch (dd->hw_version & 0xff0) {
1395 dd->caps.has_dma = 1;
1396 dd->caps.has_dualbuff = 1;
1397 dd->caps.has_sha224 = 1;
1398 dd->caps.has_sha_384_512 = 1;
1399 dd->caps.has_uihv = 1;
1402 dd->caps.has_dma = 1;
1403 dd->caps.has_dualbuff = 1;
1404 dd->caps.has_sha224 = 1;
1405 dd->caps.has_sha_384_512 = 1;
1406 dd->caps.has_uihv = 1;
1409 dd->caps.has_dma = 1;
1410 dd->caps.has_dualbuff = 1;
1411 dd->caps.has_sha224 = 1;
1412 dd->caps.has_sha_384_512 = 1;
1415 dd->caps.has_dma = 1;
1416 dd->caps.has_dualbuff = 1;
1417 dd->caps.has_sha224 = 1;
1423 "Unmanaged sha version, set minimum capabilities\n");
1428 #if defined(CONFIG_OF)
1429 static const struct of_device_id atmel_sha_dt_ids[] = {
1430 { .compatible = "atmel,at91sam9g46-sha" },
1434 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1436 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1438 struct device_node *np = pdev->dev.of_node;
1439 struct crypto_platform_data *pdata;
1442 dev_err(&pdev->dev, "device node not found\n");
1443 return ERR_PTR(-EINVAL);
1446 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1448 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1449 return ERR_PTR(-ENOMEM);
1452 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1453 sizeof(*(pdata->dma_slave)),
1455 if (!pdata->dma_slave) {
1456 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1457 return ERR_PTR(-ENOMEM);
1462 #else /* CONFIG_OF */
1463 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1465 return ERR_PTR(-EINVAL);
1469 static int atmel_sha_probe(struct platform_device *pdev)
1471 struct atmel_sha_dev *sha_dd;
1472 struct crypto_platform_data *pdata;
1473 struct device *dev = &pdev->dev;
1474 struct resource *sha_res;
1477 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
1478 if (sha_dd == NULL) {
1479 dev_err(dev, "unable to alloc data struct.\n");
1486 platform_set_drvdata(pdev, sha_dd);
1488 INIT_LIST_HEAD(&sha_dd->list);
1489 spin_lock_init(&sha_dd->lock);
1491 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1492 (unsigned long)sha_dd);
1493 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
1494 (unsigned long)sha_dd);
1496 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1500 /* Get the base address */
1501 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1503 dev_err(dev, "no MEM resource info\n");
1507 sha_dd->phys_base = sha_res->start;
1510 sha_dd->irq = platform_get_irq(pdev, 0);
1511 if (sha_dd->irq < 0) {
1512 dev_err(dev, "no IRQ resource info\n");
1517 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1518 IRQF_SHARED, "atmel-sha", sha_dd);
1520 dev_err(dev, "unable to request sha irq.\n");
1524 /* Initializing the clock */
1525 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
1526 if (IS_ERR(sha_dd->iclk)) {
1527 dev_err(dev, "clock initialization failed.\n");
1528 err = PTR_ERR(sha_dd->iclk);
1532 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
1533 if (!sha_dd->io_base) {
1534 dev_err(dev, "can't ioremap\n");
1539 err = clk_prepare(sha_dd->iclk);
1543 atmel_sha_hw_version_init(sha_dd);
1545 atmel_sha_get_cap(sha_dd);
1547 if (sha_dd->caps.has_dma) {
1548 pdata = pdev->dev.platform_data;
1550 pdata = atmel_sha_of_init(pdev);
1551 if (IS_ERR(pdata)) {
1552 dev_err(&pdev->dev, "platform data not available\n");
1553 err = PTR_ERR(pdata);
1554 goto iclk_unprepare;
1557 if (!pdata->dma_slave) {
1559 goto iclk_unprepare;
1561 err = atmel_sha_dma_init(sha_dd, pdata);
1565 dev_info(dev, "using %s for DMA transfers\n",
1566 dma_chan_name(sha_dd->dma_lch_in.chan));
1569 spin_lock(&atmel_sha.lock);
1570 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1571 spin_unlock(&atmel_sha.lock);
1573 err = atmel_sha_register_algs(sha_dd);
1577 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1578 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1579 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
1584 spin_lock(&atmel_sha.lock);
1585 list_del(&sha_dd->list);
1586 spin_unlock(&atmel_sha.lock);
1587 if (sha_dd->caps.has_dma)
1588 atmel_sha_dma_cleanup(sha_dd);
1591 clk_unprepare(sha_dd->iclk);
1593 tasklet_kill(&sha_dd->queue_task);
1594 tasklet_kill(&sha_dd->done_task);
1596 dev_err(dev, "initialization failed.\n");
1601 static int atmel_sha_remove(struct platform_device *pdev)
1603 static struct atmel_sha_dev *sha_dd;
1605 sha_dd = platform_get_drvdata(pdev);
1608 spin_lock(&atmel_sha.lock);
1609 list_del(&sha_dd->list);
1610 spin_unlock(&atmel_sha.lock);
1612 atmel_sha_unregister_algs(sha_dd);
1614 tasklet_kill(&sha_dd->queue_task);
1615 tasklet_kill(&sha_dd->done_task);
1617 if (sha_dd->caps.has_dma)
1618 atmel_sha_dma_cleanup(sha_dd);
1620 clk_unprepare(sha_dd->iclk);
1625 static struct platform_driver atmel_sha_driver = {
1626 .probe = atmel_sha_probe,
1627 .remove = atmel_sha_remove,
1629 .name = "atmel_sha",
1630 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
1634 module_platform_driver(atmel_sha_driver);
1636 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
1637 MODULE_LICENSE("GPL v2");
1638 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");