2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/md5.h>
16 #include <crypto/sha.h>
20 struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43 iter->src.op_offset = 0;
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
49 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
60 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
69 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
83 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
93 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
100 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->base);
109 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114 mv_cesa_ahash_dma_cleanup(req);
117 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122 mv_cesa_ahash_dma_last_cleanup(req);
125 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
127 unsigned int index, padlen;
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
135 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
137 unsigned int index, padlen;
140 /* Pad out to 56 mod 64 */
141 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
142 padlen = mv_cesa_ahash_pad_len(creq);
143 memset(buf + 1, 0, padlen - 1);
146 __le64 bits = cpu_to_le64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
149 __be64 bits = cpu_to_be64(creq->len << 3);
150 memcpy(buf + padlen, &bits, sizeof(bits));
156 static void mv_cesa_ahash_std_step(struct ahash_request *req)
158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
160 struct mv_cesa_engine *engine = creq->base.engine;
161 struct mv_cesa_op_ctx *op;
162 unsigned int new_cache_ptr = 0;
165 unsigned int digsize;
168 mv_cesa_adjust_op(engine, &creq->op_tmpl);
169 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
172 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
173 for (i = 0; i < digsize / 4; i++)
174 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
178 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
179 creq->cache, creq->cache_ptr);
181 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
182 CESA_SA_SRAM_PAYLOAD_SIZE);
184 if (!creq->last_req) {
185 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
186 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
189 if (len - creq->cache_ptr)
190 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
192 CESA_SA_DATA_SRAM_OFFSET +
194 len - creq->cache_ptr,
199 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
201 if (creq->last_req && sreq->offset == req->nbytes &&
202 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
203 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
204 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
205 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
206 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
209 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
210 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
212 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
213 mv_cesa_set_mac_op_total_len(op, creq->len);
215 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
217 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
218 len &= CESA_HASH_BLOCK_SIZE_MSK;
219 new_cache_ptr = 64 - trailerlen;
220 memcpy_fromio(creq->cache,
222 CESA_SA_DATA_SRAM_OFFSET + len,
225 len += mv_cesa_ahash_pad_req(creq,
227 CESA_SA_DATA_SRAM_OFFSET);
230 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
231 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
233 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
237 mv_cesa_set_mac_op_frag_len(op, len);
238 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
240 /* FIXME: only update enc_len field */
241 memcpy_toio(engine->sram, op, sizeof(*op));
243 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
244 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
245 CESA_SA_DESC_CFG_FRAG_MSK);
247 creq->cache_ptr = new_cache_ptr;
249 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
250 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
251 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
252 CESA_SA_CMD_EN_CESA_SA_ACCL0);
253 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
256 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
258 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
259 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
261 if (sreq->offset < (req->nbytes - creq->cache_ptr))
267 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
269 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
270 struct mv_cesa_req *basereq = &creq->base;
272 mv_cesa_dma_prepare(basereq, basereq->engine);
275 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
277 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
278 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
283 static void mv_cesa_ahash_step(struct crypto_async_request *req)
285 struct ahash_request *ahashreq = ahash_request_cast(req);
286 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
288 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
289 mv_cesa_dma_step(&creq->base);
291 mv_cesa_ahash_std_step(ahashreq);
294 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
296 struct ahash_request *ahashreq = ahash_request_cast(req);
297 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
299 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
300 return mv_cesa_dma_process(&creq->base, status);
302 return mv_cesa_ahash_std_process(ahashreq, status);
305 static void mv_cesa_ahash_complete(struct crypto_async_request *req)
307 struct ahash_request *ahashreq = ahash_request_cast(req);
308 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
309 struct mv_cesa_engine *engine = creq->base.engine;
310 unsigned int digsize;
313 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
315 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
316 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
320 * Result is already in the correct endianess when the SA is
323 data = creq->base.chain.last->op->ctx.hash.hash;
324 for (i = 0; i < digsize / 4; i++)
325 creq->state[i] = cpu_to_le32(data[i]);
327 memcpy(ahashreq->result, data, digsize);
329 for (i = 0; i < digsize / 4; i++)
330 creq->state[i] = readl_relaxed(engine->regs +
332 if (creq->last_req) {
334 * Hardware's MD5 digest is in little endian format, but
335 * SHA in big endian format
338 __le32 *result = (void *)ahashreq->result;
340 for (i = 0; i < digsize / 4; i++)
341 result[i] = cpu_to_le32(creq->state[i]);
343 __be32 *result = (void *)ahashreq->result;
345 for (i = 0; i < digsize / 4; i++)
346 result[i] = cpu_to_be32(creq->state[i]);
351 atomic_sub(ahashreq->nbytes, &engine->load);
354 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
355 struct mv_cesa_engine *engine)
357 struct ahash_request *ahashreq = ahash_request_cast(req);
358 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
360 creq->base.engine = engine;
362 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
363 mv_cesa_ahash_dma_prepare(ahashreq);
365 mv_cesa_ahash_std_prepare(ahashreq);
368 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
370 struct ahash_request *ahashreq = ahash_request_cast(req);
371 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
374 mv_cesa_ahash_last_cleanup(ahashreq);
376 mv_cesa_ahash_cleanup(ahashreq);
379 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
382 ahashreq->nbytes - creq->cache_ptr);
385 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
386 .step = mv_cesa_ahash_step,
387 .process = mv_cesa_ahash_process,
388 .cleanup = mv_cesa_ahash_req_cleanup,
389 .complete = mv_cesa_ahash_complete,
392 static void mv_cesa_ahash_init(struct ahash_request *req,
393 struct mv_cesa_op_ctx *tmpl, bool algo_le)
395 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
397 memset(creq, 0, sizeof(*creq));
398 mv_cesa_update_op_cfg(tmpl,
399 CESA_SA_DESC_CFG_OP_MAC_ONLY |
400 CESA_SA_DESC_CFG_FIRST_FRAG,
401 CESA_SA_DESC_CFG_OP_MSK |
402 CESA_SA_DESC_CFG_FRAG_MSK);
403 mv_cesa_set_mac_op_total_len(tmpl, 0);
404 mv_cesa_set_mac_op_frag_len(tmpl, 0);
405 creq->op_tmpl = *tmpl;
407 creq->algo_le = algo_le;
410 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
412 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
414 ctx->base.ops = &mv_cesa_ahash_req_ops;
416 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
417 sizeof(struct mv_cesa_ahash_req));
421 static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
423 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
426 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
432 sg_pcopy_to_buffer(req->src, creq->src_nents,
433 creq->cache + creq->cache_ptr,
436 creq->cache_ptr += req->nbytes;
442 static struct mv_cesa_op_ctx *
443 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
444 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
447 struct mv_cesa_op_ctx *op;
450 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
454 /* Set the operation block fragment length. */
455 mv_cesa_set_mac_op_frag_len(op, frag_len);
457 /* Append dummy desc to launch operation */
458 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
462 if (mv_cesa_mac_op_is_first_frag(tmpl))
463 mv_cesa_update_op_cfg(tmpl,
464 CESA_SA_DESC_CFG_MID_FRAG,
465 CESA_SA_DESC_CFG_FRAG_MSK);
471 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
472 struct mv_cesa_ahash_req *creq,
475 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
478 if (!creq->cache_ptr)
481 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
485 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
487 return mv_cesa_dma_add_data_transfer(chain,
488 CESA_SA_DATA_SRAM_OFFSET,
489 ahashdreq->cache_dma,
491 CESA_TDMA_DST_IN_SRAM,
495 static struct mv_cesa_op_ctx *
496 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
497 struct mv_cesa_ahash_dma_iter *dma_iter,
498 struct mv_cesa_ahash_req *creq,
499 unsigned int frag_len, gfp_t flags)
501 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
502 unsigned int len, trailerlen, padoff = 0;
503 struct mv_cesa_op_ctx *op;
507 * If the transfer is smaller than our maximum length, and we have
508 * some data outstanding, we can ask the engine to finish the hash.
510 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
511 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
516 mv_cesa_set_mac_op_total_len(op, creq->len);
517 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
518 CESA_SA_DESC_CFG_NOT_FRAG :
519 CESA_SA_DESC_CFG_LAST_FRAG,
520 CESA_SA_DESC_CFG_FRAG_MSK);
522 ret = mv_cesa_dma_add_result_op(chain,
523 CESA_SA_CFG_SRAM_OFFSET,
524 CESA_SA_DATA_SRAM_OFFSET,
525 CESA_TDMA_SRC_IN_SRAM, flags);
527 return ERR_PTR(-ENOMEM);
532 * The request is longer than the engine can handle, or we have
533 * no data outstanding. Manually generate the padding, adding it
534 * as a "mid" fragment.
536 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
540 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
542 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
544 ret = mv_cesa_dma_add_data_transfer(chain,
545 CESA_SA_DATA_SRAM_OFFSET +
547 ahashdreq->padding_dma,
548 len, CESA_TDMA_DST_IN_SRAM,
553 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
558 if (len == trailerlen)
564 ret = mv_cesa_dma_add_data_transfer(chain,
565 CESA_SA_DATA_SRAM_OFFSET,
566 ahashdreq->padding_dma +
569 CESA_TDMA_DST_IN_SRAM,
574 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
578 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
580 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
581 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
582 GFP_KERNEL : GFP_ATOMIC;
583 struct mv_cesa_req *basereq = &creq->base;
584 struct mv_cesa_ahash_dma_iter iter;
585 struct mv_cesa_op_ctx *op = NULL;
586 unsigned int frag_len;
590 basereq->chain.first = NULL;
591 basereq->chain.last = NULL;
593 if (creq->src_nents) {
594 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
602 mv_cesa_tdma_desc_iter_init(&basereq->chain);
603 mv_cesa_ahash_req_iter_init(&iter, req);
606 * Add the cache (left-over data from a previous block) first.
607 * This will never overflow the SRAM size.
609 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
615 * Add all the new data, inserting an operation block and
616 * launch command between each full SRAM block-worth of
617 * data. We intentionally do not add the final op block.
620 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
626 frag_len = iter.base.op_len;
628 if (!mv_cesa_ahash_req_iter_next_op(&iter))
631 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
639 /* Account for the data that was in the cache. */
640 frag_len = iter.base.op_len;
644 * At this point, frag_len indicates whether we have any data
645 * outstanding which needs an operation. Queue up the final
646 * operation, which depends whether this is the final request.
649 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
652 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
661 * If results are copied via DMA, this means that this
662 * request can be directly processed by the engine,
663 * without partial updates. So we can chain it at the
664 * DMA level with other requests.
666 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
668 if (op && type != CESA_TDMA_RESULT) {
669 /* Add dummy desc to wait for crypto operation end */
670 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
676 creq->cache_ptr = req->nbytes + creq->cache_ptr -
681 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
683 if (type != CESA_TDMA_RESULT)
684 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
689 mv_cesa_dma_cleanup(basereq);
690 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
693 mv_cesa_ahash_last_cleanup(req);
698 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
700 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
702 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
703 if (creq->src_nents < 0) {
704 dev_err(cesa_dev->dev, "Invalid number of src SG");
705 return creq->src_nents;
708 *cached = mv_cesa_ahash_cache_req(req);
713 if (cesa_dev->caps->has_tdma)
714 return mv_cesa_ahash_dma_req_init(req);
719 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
721 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
722 struct mv_cesa_engine *engine;
726 ret = mv_cesa_ahash_req_init(req, &cached);
733 engine = mv_cesa_select_engine(req->nbytes);
734 mv_cesa_ahash_prepare(&req->base, engine);
736 ret = mv_cesa_queue_req(&req->base, &creq->base);
738 if (mv_cesa_req_needs_cleanup(&req->base, ret))
739 mv_cesa_ahash_cleanup(req);
744 static int mv_cesa_ahash_update(struct ahash_request *req)
746 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
748 creq->len += req->nbytes;
750 return mv_cesa_ahash_queue_req(req);
753 static int mv_cesa_ahash_final(struct ahash_request *req)
755 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
756 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
758 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
759 creq->last_req = true;
762 return mv_cesa_ahash_queue_req(req);
765 static int mv_cesa_ahash_finup(struct ahash_request *req)
767 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
768 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
770 creq->len += req->nbytes;
771 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
772 creq->last_req = true;
774 return mv_cesa_ahash_queue_req(req);
777 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
778 u64 *len, void *cache)
780 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
781 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
782 unsigned int digsize = crypto_ahash_digestsize(ahash);
783 unsigned int blocksize;
785 blocksize = crypto_ahash_blocksize(ahash);
788 memcpy(hash, creq->state, digsize);
789 memset(cache, 0, blocksize);
790 memcpy(cache, creq->cache, creq->cache_ptr);
795 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
796 u64 len, const void *cache)
798 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
799 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
800 unsigned int digsize = crypto_ahash_digestsize(ahash);
801 unsigned int blocksize;
802 unsigned int cache_ptr;
805 ret = crypto_ahash_init(req);
809 blocksize = crypto_ahash_blocksize(ahash);
810 if (len >= blocksize)
811 mv_cesa_update_op_cfg(&creq->op_tmpl,
812 CESA_SA_DESC_CFG_MID_FRAG,
813 CESA_SA_DESC_CFG_FRAG_MSK);
816 memcpy(creq->state, hash, digsize);
819 cache_ptr = do_div(len, blocksize);
823 memcpy(creq->cache, cache, cache_ptr);
824 creq->cache_ptr = cache_ptr;
829 static int mv_cesa_md5_init(struct ahash_request *req)
831 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
832 struct mv_cesa_op_ctx tmpl = { };
834 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
836 mv_cesa_ahash_init(req, &tmpl, true);
838 creq->state[0] = MD5_H0;
839 creq->state[1] = MD5_H1;
840 creq->state[2] = MD5_H2;
841 creq->state[3] = MD5_H3;
846 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
848 struct md5_state *out_state = out;
850 return mv_cesa_ahash_export(req, out_state->hash,
851 &out_state->byte_count, out_state->block);
854 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
856 const struct md5_state *in_state = in;
858 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
862 static int mv_cesa_md5_digest(struct ahash_request *req)
866 ret = mv_cesa_md5_init(req);
870 return mv_cesa_ahash_finup(req);
873 struct ahash_alg mv_md5_alg = {
874 .init = mv_cesa_md5_init,
875 .update = mv_cesa_ahash_update,
876 .final = mv_cesa_ahash_final,
877 .finup = mv_cesa_ahash_finup,
878 .digest = mv_cesa_md5_digest,
879 .export = mv_cesa_md5_export,
880 .import = mv_cesa_md5_import,
882 .digestsize = MD5_DIGEST_SIZE,
883 .statesize = sizeof(struct md5_state),
886 .cra_driver_name = "mv-md5",
888 .cra_flags = CRYPTO_ALG_ASYNC |
889 CRYPTO_ALG_KERN_DRIVER_ONLY,
890 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
891 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
892 .cra_init = mv_cesa_ahash_cra_init,
893 .cra_module = THIS_MODULE,
898 static int mv_cesa_sha1_init(struct ahash_request *req)
900 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
901 struct mv_cesa_op_ctx tmpl = { };
903 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
905 mv_cesa_ahash_init(req, &tmpl, false);
907 creq->state[0] = SHA1_H0;
908 creq->state[1] = SHA1_H1;
909 creq->state[2] = SHA1_H2;
910 creq->state[3] = SHA1_H3;
911 creq->state[4] = SHA1_H4;
916 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
918 struct sha1_state *out_state = out;
920 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
924 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
926 const struct sha1_state *in_state = in;
928 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
932 static int mv_cesa_sha1_digest(struct ahash_request *req)
936 ret = mv_cesa_sha1_init(req);
940 return mv_cesa_ahash_finup(req);
943 struct ahash_alg mv_sha1_alg = {
944 .init = mv_cesa_sha1_init,
945 .update = mv_cesa_ahash_update,
946 .final = mv_cesa_ahash_final,
947 .finup = mv_cesa_ahash_finup,
948 .digest = mv_cesa_sha1_digest,
949 .export = mv_cesa_sha1_export,
950 .import = mv_cesa_sha1_import,
952 .digestsize = SHA1_DIGEST_SIZE,
953 .statesize = sizeof(struct sha1_state),
956 .cra_driver_name = "mv-sha1",
958 .cra_flags = CRYPTO_ALG_ASYNC |
959 CRYPTO_ALG_KERN_DRIVER_ONLY,
960 .cra_blocksize = SHA1_BLOCK_SIZE,
961 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
962 .cra_init = mv_cesa_ahash_cra_init,
963 .cra_module = THIS_MODULE,
968 static int mv_cesa_sha256_init(struct ahash_request *req)
970 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
971 struct mv_cesa_op_ctx tmpl = { };
973 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
975 mv_cesa_ahash_init(req, &tmpl, false);
977 creq->state[0] = SHA256_H0;
978 creq->state[1] = SHA256_H1;
979 creq->state[2] = SHA256_H2;
980 creq->state[3] = SHA256_H3;
981 creq->state[4] = SHA256_H4;
982 creq->state[5] = SHA256_H5;
983 creq->state[6] = SHA256_H6;
984 creq->state[7] = SHA256_H7;
989 static int mv_cesa_sha256_digest(struct ahash_request *req)
993 ret = mv_cesa_sha256_init(req);
997 return mv_cesa_ahash_finup(req);
1000 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1002 struct sha256_state *out_state = out;
1004 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1008 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1010 const struct sha256_state *in_state = in;
1012 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1016 struct ahash_alg mv_sha256_alg = {
1017 .init = mv_cesa_sha256_init,
1018 .update = mv_cesa_ahash_update,
1019 .final = mv_cesa_ahash_final,
1020 .finup = mv_cesa_ahash_finup,
1021 .digest = mv_cesa_sha256_digest,
1022 .export = mv_cesa_sha256_export,
1023 .import = mv_cesa_sha256_import,
1025 .digestsize = SHA256_DIGEST_SIZE,
1026 .statesize = sizeof(struct sha256_state),
1028 .cra_name = "sha256",
1029 .cra_driver_name = "mv-sha256",
1030 .cra_priority = 300,
1031 .cra_flags = CRYPTO_ALG_ASYNC |
1032 CRYPTO_ALG_KERN_DRIVER_ONLY,
1033 .cra_blocksize = SHA256_BLOCK_SIZE,
1034 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1035 .cra_init = mv_cesa_ahash_cra_init,
1036 .cra_module = THIS_MODULE,
1041 struct mv_cesa_ahash_result {
1042 struct completion completion;
1046 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1049 struct mv_cesa_ahash_result *result = req->data;
1051 if (error == -EINPROGRESS)
1054 result->error = error;
1055 complete(&result->completion);
1058 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1059 void *state, unsigned int blocksize)
1061 struct mv_cesa_ahash_result result;
1062 struct scatterlist sg;
1065 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1066 mv_cesa_hmac_ahash_complete, &result);
1067 sg_init_one(&sg, pad, blocksize);
1068 ahash_request_set_crypt(req, &sg, pad, blocksize);
1069 init_completion(&result.completion);
1071 ret = crypto_ahash_init(req);
1075 ret = crypto_ahash_update(req);
1076 if (ret && ret != -EINPROGRESS)
1079 wait_for_completion_interruptible(&result.completion);
1081 return result.error;
1083 ret = crypto_ahash_export(req, state);
1090 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1091 const u8 *key, unsigned int keylen,
1093 unsigned int blocksize)
1095 struct mv_cesa_ahash_result result;
1096 struct scatterlist sg;
1100 if (keylen <= blocksize) {
1101 memcpy(ipad, key, keylen);
1103 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1108 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1109 mv_cesa_hmac_ahash_complete,
1111 sg_init_one(&sg, keydup, keylen);
1112 ahash_request_set_crypt(req, &sg, ipad, keylen);
1113 init_completion(&result.completion);
1115 ret = crypto_ahash_digest(req);
1116 if (ret == -EINPROGRESS) {
1117 wait_for_completion_interruptible(&result.completion);
1121 /* Set the memory region to 0 to avoid any leak. */
1122 memset(keydup, 0, keylen);
1128 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1131 memset(ipad + keylen, 0, blocksize - keylen);
1132 memcpy(opad, ipad, blocksize);
1134 for (i = 0; i < blocksize; i++) {
1142 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1143 const u8 *key, unsigned int keylen,
1144 void *istate, void *ostate)
1146 struct ahash_request *req;
1147 struct crypto_ahash *tfm;
1148 unsigned int blocksize;
1153 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1154 CRYPTO_ALG_TYPE_AHASH_MASK);
1156 return PTR_ERR(tfm);
1158 req = ahash_request_alloc(tfm, GFP_KERNEL);
1164 crypto_ahash_clear_flags(tfm, ~0);
1166 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1168 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1174 opad = ipad + blocksize;
1176 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1180 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1184 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1189 ahash_request_free(req);
1191 crypto_free_ahash(tfm);
1196 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1198 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1200 ctx->base.ops = &mv_cesa_ahash_req_ops;
1202 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1203 sizeof(struct mv_cesa_ahash_req));
1207 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1209 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1210 struct mv_cesa_op_ctx tmpl = { };
1212 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1213 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1215 mv_cesa_ahash_init(req, &tmpl, true);
1220 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1221 unsigned int keylen)
1223 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1224 struct md5_state istate, ostate;
1227 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1231 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1232 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1234 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1235 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1240 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1244 ret = mv_cesa_ahmac_md5_init(req);
1248 return mv_cesa_ahash_finup(req);
1251 struct ahash_alg mv_ahmac_md5_alg = {
1252 .init = mv_cesa_ahmac_md5_init,
1253 .update = mv_cesa_ahash_update,
1254 .final = mv_cesa_ahash_final,
1255 .finup = mv_cesa_ahash_finup,
1256 .digest = mv_cesa_ahmac_md5_digest,
1257 .setkey = mv_cesa_ahmac_md5_setkey,
1258 .export = mv_cesa_md5_export,
1259 .import = mv_cesa_md5_import,
1261 .digestsize = MD5_DIGEST_SIZE,
1262 .statesize = sizeof(struct md5_state),
1264 .cra_name = "hmac(md5)",
1265 .cra_driver_name = "mv-hmac-md5",
1266 .cra_priority = 300,
1267 .cra_flags = CRYPTO_ALG_ASYNC |
1268 CRYPTO_ALG_KERN_DRIVER_ONLY,
1269 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1270 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1271 .cra_init = mv_cesa_ahmac_cra_init,
1272 .cra_module = THIS_MODULE,
1277 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1279 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1280 struct mv_cesa_op_ctx tmpl = { };
1282 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1283 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1285 mv_cesa_ahash_init(req, &tmpl, false);
1290 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1291 unsigned int keylen)
1293 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1294 struct sha1_state istate, ostate;
1297 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1301 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1302 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1304 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1305 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1310 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1314 ret = mv_cesa_ahmac_sha1_init(req);
1318 return mv_cesa_ahash_finup(req);
1321 struct ahash_alg mv_ahmac_sha1_alg = {
1322 .init = mv_cesa_ahmac_sha1_init,
1323 .update = mv_cesa_ahash_update,
1324 .final = mv_cesa_ahash_final,
1325 .finup = mv_cesa_ahash_finup,
1326 .digest = mv_cesa_ahmac_sha1_digest,
1327 .setkey = mv_cesa_ahmac_sha1_setkey,
1328 .export = mv_cesa_sha1_export,
1329 .import = mv_cesa_sha1_import,
1331 .digestsize = SHA1_DIGEST_SIZE,
1332 .statesize = sizeof(struct sha1_state),
1334 .cra_name = "hmac(sha1)",
1335 .cra_driver_name = "mv-hmac-sha1",
1336 .cra_priority = 300,
1337 .cra_flags = CRYPTO_ALG_ASYNC |
1338 CRYPTO_ALG_KERN_DRIVER_ONLY,
1339 .cra_blocksize = SHA1_BLOCK_SIZE,
1340 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1341 .cra_init = mv_cesa_ahmac_cra_init,
1342 .cra_module = THIS_MODULE,
1347 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1348 unsigned int keylen)
1350 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1351 struct sha256_state istate, ostate;
1354 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1358 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1359 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1361 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1362 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1367 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1369 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1370 struct mv_cesa_op_ctx tmpl = { };
1372 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1373 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1375 mv_cesa_ahash_init(req, &tmpl, false);
1380 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1384 ret = mv_cesa_ahmac_sha256_init(req);
1388 return mv_cesa_ahash_finup(req);
1391 struct ahash_alg mv_ahmac_sha256_alg = {
1392 .init = mv_cesa_ahmac_sha256_init,
1393 .update = mv_cesa_ahash_update,
1394 .final = mv_cesa_ahash_final,
1395 .finup = mv_cesa_ahash_finup,
1396 .digest = mv_cesa_ahmac_sha256_digest,
1397 .setkey = mv_cesa_ahmac_sha256_setkey,
1398 .export = mv_cesa_sha256_export,
1399 .import = mv_cesa_sha256_import,
1401 .digestsize = SHA256_DIGEST_SIZE,
1402 .statesize = sizeof(struct sha256_state),
1404 .cra_name = "hmac(sha256)",
1405 .cra_driver_name = "mv-hmac-sha256",
1406 .cra_priority = 300,
1407 .cra_flags = CRYPTO_ALG_ASYNC |
1408 CRYPTO_ALG_KERN_DRIVER_ONLY,
1409 .cra_blocksize = SHA256_BLOCK_SIZE,
1410 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1411 .cra_init = mv_cesa_ahmac_cra_init,
1412 .cra_module = THIS_MODULE,