2 * Multi buffer SHA1 algorithm Glue Code
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * Copyright(c) 2014 Intel Corporation.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
25 * Copyright(c) 2014 Intel Corporation.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
70 #include <asm/xsave.h>
71 #include <linux/hardirq.h>
72 #include <asm/fpu-internal.h>
73 #include "sha_mb_ctx.h"
75 #define FLUSH_INTERVAL 1000 /* in usec */
77 struct mcryptd_alg_state sha1_mb_alg_state;
80 struct mcryptd_ahash *mcryptd_tfm;
83 static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
85 struct shash_desc *desc;
87 desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
88 return container_of(desc, struct mcryptd_hash_request_ctx, desc);
91 static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
93 return container_of((void *) ctx, struct ahash_request, __ctx);
96 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
97 struct shash_desc *desc)
99 rctx->flag = HASH_UPDATE;
102 asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
103 asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state,
104 struct job_sha1 *job);
105 asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
106 asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
108 inline void sha1_init_digest(uint32_t *digest)
110 static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
111 SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
112 memcpy(digest, initial_digest, sizeof(initial_digest));
115 inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
118 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
120 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
123 i += ((SHA1_BLOCK_SIZE - 1) &
124 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
125 + 1 + SHA1_PADLENGTHFIELD_SIZE;
127 #if SHA1_PADLENGTHFIELD_SIZE == 16
128 *((uint64_t *) &padblock[i - 16]) = 0;
131 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
133 /* Number of extra blocks to hash */
134 return i >> SHA1_LOG2_BLOCK_SIZE;
137 static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx)
140 if (ctx->status & HASH_CTX_STS_COMPLETE) {
141 /* Clear PROCESSING bit */
142 ctx->status = HASH_CTX_STS_COMPLETE;
147 * If the extra blocks are empty, begin hashing what remains
148 * in the user's buffer.
150 if (ctx->partial_block_buffer_length == 0 &&
151 ctx->incoming_buffer_length) {
153 const void *buffer = ctx->incoming_buffer;
154 uint32_t len = ctx->incoming_buffer_length;
158 * Only entire blocks can be hashed.
159 * Copy remainder to extra blocks buffer.
161 copy_len = len & (SHA1_BLOCK_SIZE-1);
165 memcpy(ctx->partial_block_buffer,
166 ((const char *) buffer + len),
168 ctx->partial_block_buffer_length = copy_len;
171 ctx->incoming_buffer_length = 0;
173 /* len should be a multiple of the block size now */
174 assert((len % SHA1_BLOCK_SIZE) == 0);
176 /* Set len to the number of blocks to be hashed */
177 len >>= SHA1_LOG2_BLOCK_SIZE;
181 ctx->job.buffer = (uint8_t *) buffer;
183 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr,
190 * If the extra blocks are not empty, then we are
191 * either on the last block(s) or we need more
192 * user input before continuing.
194 if (ctx->status & HASH_CTX_STS_LAST) {
196 uint8_t *buf = ctx->partial_block_buffer;
197 uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length);
199 ctx->status = (HASH_CTX_STS_PROCESSING |
200 HASH_CTX_STS_COMPLETE);
201 ctx->job.buffer = buf;
202 ctx->job.len = (uint32_t) n_extra_blocks;
203 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
208 ctx->status = HASH_CTX_STS_IDLE;
215 struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
218 * If get_comp_job returns NULL, there are no jobs complete.
219 * If get_comp_job returns a job, verify that it is safe to return to the user.
220 * If it is not ready, resubmit the job to finish processing.
221 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
222 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
224 struct sha1_hash_ctx *ctx;
226 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
227 return sha1_ctx_mgr_resubmit(mgr, ctx);
230 void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
232 sha1_job_mgr_init(&mgr->mgr);
235 struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
236 struct sha1_hash_ctx *ctx,
241 if (flags & (~HASH_ENTIRE)) {
242 /* User should not pass anything other than FIRST, UPDATE, or LAST */
243 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
247 if (ctx->status & HASH_CTX_STS_PROCESSING) {
248 /* Cannot submit to a currently processing job. */
249 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
253 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
254 /* Cannot update a finished job. */
255 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
260 if (flags & HASH_FIRST) {
262 sha1_init_digest(ctx->job.result_digest);
264 /* Reset byte counter */
265 ctx->total_length = 0;
267 /* Clear extra blocks */
268 ctx->partial_block_buffer_length = 0;
271 /* If we made it here, there were no errors during this call to submit */
272 ctx->error = HASH_CTX_ERROR_NONE;
274 /* Store buffer ptr info from user */
275 ctx->incoming_buffer = buffer;
276 ctx->incoming_buffer_length = len;
278 /* Store the user's request flags and mark this ctx as currently being processed. */
279 ctx->status = (flags & HASH_LAST) ?
280 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
281 HASH_CTX_STS_PROCESSING;
283 /* Advance byte counter */
284 ctx->total_length += len;
287 * If there is anything currently buffered in the extra blocks,
288 * append to it until it contains a whole block.
289 * Or if the user's buffer contains less than a whole block,
290 * append as much as possible to the extra block.
292 if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) {
293 /* Compute how many bytes to copy from user buffer into extra block */
294 uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length;
299 /* Copy and update relevant pointers and counters */
300 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
303 ctx->partial_block_buffer_length += copy_len;
304 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
305 ctx->incoming_buffer_length = len - copy_len;
308 /* The extra block should never contain more than 1 block here */
309 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
311 /* If the extra block buffer contains exactly 1 block, it can be hashed. */
312 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
313 ctx->partial_block_buffer_length = 0;
315 ctx->job.buffer = ctx->partial_block_buffer;
317 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
321 return sha1_ctx_mgr_resubmit(mgr, ctx);
324 struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
326 struct sha1_hash_ctx *ctx;
329 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
331 /* If flush returned 0, there are no more jobs in flight. */
336 * If flush returned a job, resubmit the job to finish processing.
338 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
341 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
342 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr
343 * still need processing. Loop.
350 static int sha1_mb_init(struct shash_desc *desc)
352 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
355 sctx->job.result_digest[0] = SHA1_H0;
356 sctx->job.result_digest[1] = SHA1_H1;
357 sctx->job.result_digest[2] = SHA1_H2;
358 sctx->job.result_digest[3] = SHA1_H3;
359 sctx->job.result_digest[4] = SHA1_H4;
360 sctx->total_length = 0;
361 sctx->partial_block_buffer_length = 0;
362 sctx->status = HASH_CTX_STS_IDLE;
367 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
370 struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
371 __be32 *dst = (__be32 *) rctx->out;
373 for (i = 0; i < 5; ++i)
374 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
379 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
380 struct mcryptd_alg_cstate *cstate, bool flush)
382 int flag = HASH_UPDATE;
384 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
385 struct sha1_hash_ctx *sha_ctx;
388 while (!(rctx->flag & HASH_DONE)) {
389 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
394 /* check if the walk is done */
395 if (crypto_ahash_walk_last(&rctx->walk)) {
396 rctx->flag |= HASH_DONE;
397 if (rctx->flag & HASH_FINAL)
401 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc);
403 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
406 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
410 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
417 /* copy the results */
418 if (rctx->flag & HASH_FINAL)
419 sha1_mb_set_results(rctx);
426 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
427 struct mcryptd_alg_cstate *cstate,
430 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
431 struct sha1_hash_ctx *sha_ctx;
432 struct mcryptd_hash_request_ctx *req_ctx;
435 /* remove from work list */
436 spin_lock(&cstate->work_lock);
437 list_del(&rctx->waiter);
438 spin_unlock(&cstate->work_lock);
441 rctx->complete(&req->base, err);
444 rctx->complete(&req->base, err);
448 /* check to see if there are other jobs that are done */
449 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
451 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
452 ret = sha_finish_walk(&req_ctx, cstate, false);
454 spin_lock(&cstate->work_lock);
455 list_del(&req_ctx->waiter);
456 spin_unlock(&cstate->work_lock);
458 req = cast_mcryptd_ctx_to_req(req_ctx);
460 rctx->complete(&req->base, ret);
463 rctx->complete(&req->base, ret);
467 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
473 static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
474 struct mcryptd_alg_cstate *cstate)
476 unsigned long next_flush;
477 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
480 rctx->tag.arrival = jiffies; /* tag the arrival time */
481 rctx->tag.seq_num = cstate->next_seq_num++;
482 next_flush = rctx->tag.arrival + delay;
483 rctx->tag.expire = next_flush;
485 spin_lock(&cstate->work_lock);
486 list_add_tail(&rctx->waiter, &cstate->work_list);
487 spin_unlock(&cstate->work_lock);
489 mcryptd_arm_flusher(cstate, delay);
492 static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
495 struct mcryptd_hash_request_ctx *rctx =
496 container_of(desc, struct mcryptd_hash_request_ctx, desc);
497 struct mcryptd_alg_cstate *cstate =
498 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
500 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
501 struct sha1_hash_ctx *sha_ctx;
506 if (rctx->tag.cpu != smp_processor_id()) {
507 pr_err("mcryptd error: cpu clash\n");
511 /* need to init context */
512 req_ctx_init(rctx, desc);
514 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
521 if (crypto_ahash_walk_last(&rctx->walk))
522 rctx->flag |= HASH_DONE;
525 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
526 sha1_mb_add_list(rctx, cstate);
528 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE);
531 /* check if anything is returned */
535 if (sha_ctx->error) {
536 ret = sha_ctx->error;
537 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
541 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
542 ret = sha_finish_walk(&rctx, cstate, false);
547 sha_complete_job(rctx, cstate, ret);
551 static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
552 unsigned int len, u8 *out)
554 struct mcryptd_hash_request_ctx *rctx =
555 container_of(desc, struct mcryptd_hash_request_ctx, desc);
556 struct mcryptd_alg_cstate *cstate =
557 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
559 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
560 struct sha1_hash_ctx *sha_ctx;
561 int ret = 0, flag = HASH_UPDATE, nbytes;
564 if (rctx->tag.cpu != smp_processor_id()) {
565 pr_err("mcryptd error: cpu clash\n");
569 /* need to init context */
570 req_ctx_init(rctx, desc);
572 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
579 if (crypto_ahash_walk_last(&rctx->walk)) {
580 rctx->flag |= HASH_DONE;
586 rctx->flag |= HASH_FINAL;
587 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
588 sha1_mb_add_list(rctx, cstate);
591 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
594 /* check if anything is returned */
598 if (sha_ctx->error) {
599 ret = sha_ctx->error;
603 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
604 ret = sha_finish_walk(&rctx, cstate, false);
608 sha_complete_job(rctx, cstate, ret);
612 static int sha1_mb_final(struct shash_desc *desc, u8 *out)
614 struct mcryptd_hash_request_ctx *rctx =
615 container_of(desc, struct mcryptd_hash_request_ctx, desc);
616 struct mcryptd_alg_cstate *cstate =
617 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
619 struct sha1_hash_ctx *sha_ctx;
624 if (rctx->tag.cpu != smp_processor_id()) {
625 pr_err("mcryptd error: cpu clash\n");
629 /* need to init context */
630 req_ctx_init(rctx, desc);
633 rctx->flag |= HASH_DONE | HASH_FINAL;
635 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
636 /* flag HASH_FINAL and 0 data size */
637 sha1_mb_add_list(rctx, cstate);
639 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST);
642 /* check if anything is returned */
646 if (sha_ctx->error) {
647 ret = sha_ctx->error;
648 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
652 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
653 ret = sha_finish_walk(&rctx, cstate, false);
657 sha_complete_job(rctx, cstate, ret);
661 static int sha1_mb_export(struct shash_desc *desc, void *out)
663 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
665 memcpy(out, sctx, sizeof(*sctx));
670 static int sha1_mb_import(struct shash_desc *desc, const void *in)
672 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
674 memcpy(sctx, in, sizeof(*sctx));
680 static struct shash_alg sha1_mb_shash_alg = {
681 .digestsize = SHA1_DIGEST_SIZE,
682 .init = sha1_mb_init,
683 .update = sha1_mb_update,
684 .final = sha1_mb_final,
685 .finup = sha1_mb_finup,
686 .export = sha1_mb_export,
687 .import = sha1_mb_import,
688 .descsize = sizeof(struct sha1_hash_ctx),
689 .statesize = sizeof(struct sha1_hash_ctx),
691 .cra_name = "__sha1-mb",
692 .cra_driver_name = "__intel_sha1-mb",
695 * use ASYNC flag as some buffers in multi-buffer
696 * algo may not have completed before hashing thread sleep
698 .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC,
699 .cra_blocksize = SHA1_BLOCK_SIZE,
700 .cra_module = THIS_MODULE,
701 .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
705 static int sha1_mb_async_init(struct ahash_request *req)
707 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
708 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
709 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
710 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
712 memcpy(mcryptd_req, req, sizeof(*req));
713 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
714 return crypto_ahash_init(mcryptd_req);
717 static int sha1_mb_async_update(struct ahash_request *req)
719 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
721 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
722 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
723 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
725 memcpy(mcryptd_req, req, sizeof(*req));
726 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
727 return crypto_ahash_update(mcryptd_req);
730 static int sha1_mb_async_finup(struct ahash_request *req)
732 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
734 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
735 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
736 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
738 memcpy(mcryptd_req, req, sizeof(*req));
739 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
740 return crypto_ahash_finup(mcryptd_req);
743 static int sha1_mb_async_final(struct ahash_request *req)
745 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
747 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
748 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
749 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
751 memcpy(mcryptd_req, req, sizeof(*req));
752 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
753 return crypto_ahash_final(mcryptd_req);
756 int sha1_mb_async_digest(struct ahash_request *req)
758 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
759 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
760 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
761 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
763 memcpy(mcryptd_req, req, sizeof(*req));
764 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
765 return crypto_ahash_digest(mcryptd_req);
768 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
770 struct mcryptd_ahash *mcryptd_tfm;
771 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
772 struct mcryptd_hash_ctx *mctx;
774 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 0, 0);
775 if (IS_ERR(mcryptd_tfm))
776 return PTR_ERR(mcryptd_tfm);
777 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
778 mctx->alg_state = &sha1_mb_alg_state;
779 ctx->mcryptd_tfm = mcryptd_tfm;
780 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
781 sizeof(struct ahash_request) +
782 crypto_ahash_reqsize(&mcryptd_tfm->base));
787 static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
789 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
791 mcryptd_free_ahash(ctx->mcryptd_tfm);
794 static struct ahash_alg sha1_mb_async_alg = {
795 .init = sha1_mb_async_init,
796 .update = sha1_mb_async_update,
797 .final = sha1_mb_async_final,
798 .finup = sha1_mb_async_finup,
799 .digest = sha1_mb_async_digest,
801 .digestsize = SHA1_DIGEST_SIZE,
804 .cra_driver_name = "sha1_mb",
806 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
807 .cra_blocksize = SHA1_BLOCK_SIZE,
808 .cra_type = &crypto_ahash_type,
809 .cra_module = THIS_MODULE,
810 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
811 .cra_init = sha1_mb_async_init_tfm,
812 .cra_exit = sha1_mb_async_exit_tfm,
813 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
819 unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
821 struct mcryptd_hash_request_ctx *rctx;
822 unsigned long cur_time;
823 unsigned long next_flush = 0;
824 struct sha1_hash_ctx *sha_ctx;
829 while (!list_empty(&cstate->work_list)) {
830 rctx = list_entry(cstate->work_list.next,
831 struct mcryptd_hash_request_ctx, waiter);
832 if time_before(cur_time, rctx->tag.expire)
835 sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
838 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
841 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
842 sha_finish_walk(&rctx, cstate, true);
843 sha_complete_job(rctx, cstate, 0);
846 if (!list_empty(&cstate->work_list)) {
847 rctx = list_entry(cstate->work_list.next,
848 struct mcryptd_hash_request_ctx, waiter);
849 /* get the hash context and then flush time */
850 next_flush = rctx->tag.expire;
851 mcryptd_arm_flusher(cstate, get_delay(next_flush));
856 static int __init sha1_mb_mod_init(void)
861 struct mcryptd_alg_cstate *cpu_state;
863 /* check for dependent cpu features */
864 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
865 !boot_cpu_has(X86_FEATURE_BMI2))
868 /* initialize multibuffer structures */
869 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
871 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
872 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
873 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
874 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
876 if (!sha1_mb_alg_state.alg_cstate)
878 for_each_possible_cpu(cpu) {
879 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
880 cpu_state->next_flush = 0;
881 cpu_state->next_seq_num = 0;
882 cpu_state->flusher_engaged = false;
883 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
884 cpu_state->cpu = cpu;
885 cpu_state->alg_state = &sha1_mb_alg_state;
886 cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL);
889 sha1_ctx_mgr_init(cpu_state->mgr);
890 INIT_LIST_HEAD(&cpu_state->work_list);
891 spin_lock_init(&cpu_state->work_lock);
893 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
895 err = crypto_register_shash(&sha1_mb_shash_alg);
898 err = crypto_register_ahash(&sha1_mb_async_alg);
905 crypto_unregister_shash(&sha1_mb_shash_alg);
907 for_each_possible_cpu(cpu) {
908 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
909 kfree(cpu_state->mgr);
911 free_percpu(sha1_mb_alg_state.alg_cstate);
915 static void __exit sha1_mb_mod_fini(void)
918 struct mcryptd_alg_cstate *cpu_state;
920 crypto_unregister_ahash(&sha1_mb_async_alg);
921 crypto_unregister_shash(&sha1_mb_shash_alg);
922 for_each_possible_cpu(cpu) {
923 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
924 kfree(cpu_state->mgr);
926 free_percpu(sha1_mb_alg_state.alg_cstate);
929 module_init(sha1_mb_mod_init);
930 module_exit(sha1_mb_mod_fini);
932 MODULE_LICENSE("GPL");
933 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
935 MODULE_ALIAS("sha1");