2 * Handle async block request by crypto hardware engine.
4 * Copyright (C) 2016 Linaro, Inc.
6 * Author: Baolin Wang <baolin.wang@linaro.org>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <crypto/engine.h>
18 #include <crypto/internal/hash.h>
21 #define CRYPTO_ENGINE_MAX_QLEN 10
24 * crypto_pump_requests - dequeue one request from engine queue to process
25 * @engine: the hardware engine
26 * @in_kthread: true if we are in the context of the request pump thread
28 * This function checks if there is any request in the engine queue that
29 * needs processing and if so call out to the driver to initialize hardware
30 * and handle each request.
32 static void crypto_pump_requests(struct crypto_engine *engine,
35 struct crypto_async_request *async_req, *backlog;
36 struct ahash_request *hreq;
37 struct ablkcipher_request *breq;
39 bool was_busy = false;
42 spin_lock_irqsave(&engine->queue_lock, flags);
44 /* Make sure we are not already running a request */
48 /* If another context is idling then defer */
50 kthread_queue_work(&engine->kworker, &engine->pump_requests);
54 /* Check if the engine queue is idle */
55 if (!crypto_queue_len(&engine->queue) || !engine->running) {
59 /* Only do teardown in the thread */
61 kthread_queue_work(&engine->kworker,
62 &engine->pump_requests);
67 engine->idling = true;
68 spin_unlock_irqrestore(&engine->queue_lock, flags);
70 if (engine->unprepare_crypt_hardware &&
71 engine->unprepare_crypt_hardware(engine))
72 pr_err("failed to unprepare crypt hardware\n");
74 spin_lock_irqsave(&engine->queue_lock, flags);
75 engine->idling = false;
79 /* Get the fist request from the engine queue to handle */
80 backlog = crypto_get_backlog(&engine->queue);
81 async_req = crypto_dequeue_request(&engine->queue);
85 engine->cur_req = async_req;
87 backlog->complete(backlog, -EINPROGRESS);
94 spin_unlock_irqrestore(&engine->queue_lock, flags);
96 rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
97 /* Until here we get the request need to be encrypted successfully */
98 if (!was_busy && engine->prepare_crypt_hardware) {
99 ret = engine->prepare_crypt_hardware(engine);
101 pr_err("failed to prepare crypt hardware\n");
107 case CRYPTO_ALG_TYPE_AHASH:
108 hreq = ahash_request_cast(engine->cur_req);
109 if (engine->prepare_hash_request) {
110 ret = engine->prepare_hash_request(engine, hreq);
112 pr_err("failed to prepare request: %d\n", ret);
115 engine->cur_req_prepared = true;
117 ret = engine->hash_one_request(engine, hreq);
119 pr_err("failed to hash one request from queue\n");
123 case CRYPTO_ALG_TYPE_ABLKCIPHER:
124 breq = ablkcipher_request_cast(engine->cur_req);
125 if (engine->prepare_cipher_request) {
126 ret = engine->prepare_cipher_request(engine, breq);
128 pr_err("failed to prepare request: %d\n", ret);
131 engine->cur_req_prepared = true;
133 ret = engine->cipher_one_request(engine, breq);
135 pr_err("failed to cipher one request from queue\n");
140 pr_err("failed to prepare request of unknown type\n");
146 case CRYPTO_ALG_TYPE_AHASH:
147 hreq = ahash_request_cast(engine->cur_req);
148 crypto_finalize_hash_request(engine, hreq, ret);
150 case CRYPTO_ALG_TYPE_ABLKCIPHER:
151 breq = ablkcipher_request_cast(engine->cur_req);
152 crypto_finalize_cipher_request(engine, breq, ret);
158 spin_unlock_irqrestore(&engine->queue_lock, flags);
161 static void crypto_pump_work(struct kthread_work *work)
163 struct crypto_engine *engine =
164 container_of(work, struct crypto_engine, pump_requests);
166 crypto_pump_requests(engine, true);
170 * crypto_transfer_cipher_request - transfer the new request into the
172 * @engine: the hardware engine
173 * @req: the request need to be listed into the engine queue
175 int crypto_transfer_cipher_request(struct crypto_engine *engine,
176 struct ablkcipher_request *req,
182 spin_lock_irqsave(&engine->queue_lock, flags);
184 if (!engine->running) {
185 spin_unlock_irqrestore(&engine->queue_lock, flags);
189 ret = ablkcipher_enqueue_request(&engine->queue, req);
191 if (!engine->busy && need_pump)
192 kthread_queue_work(&engine->kworker, &engine->pump_requests);
194 spin_unlock_irqrestore(&engine->queue_lock, flags);
197 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
200 * crypto_transfer_cipher_request_to_engine - transfer one request to list
201 * into the engine queue
202 * @engine: the hardware engine
203 * @req: the request need to be listed into the engine queue
205 int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
206 struct ablkcipher_request *req)
208 return crypto_transfer_cipher_request(engine, req, true);
210 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
213 * crypto_transfer_hash_request - transfer the new request into the
215 * @engine: the hardware engine
216 * @req: the request need to be listed into the engine queue
218 int crypto_transfer_hash_request(struct crypto_engine *engine,
219 struct ahash_request *req, bool need_pump)
224 spin_lock_irqsave(&engine->queue_lock, flags);
226 if (!engine->running) {
227 spin_unlock_irqrestore(&engine->queue_lock, flags);
231 ret = ahash_enqueue_request(&engine->queue, req);
233 if (!engine->busy && need_pump)
234 kthread_queue_work(&engine->kworker, &engine->pump_requests);
236 spin_unlock_irqrestore(&engine->queue_lock, flags);
239 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
242 * crypto_transfer_hash_request_to_engine - transfer one request to list
243 * into the engine queue
244 * @engine: the hardware engine
245 * @req: the request need to be listed into the engine queue
247 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
248 struct ahash_request *req)
250 return crypto_transfer_hash_request(engine, req, true);
252 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
255 * crypto_finalize_cipher_request - finalize one request if the request is done
256 * @engine: the hardware engine
257 * @req: the request need to be finalized
260 void crypto_finalize_cipher_request(struct crypto_engine *engine,
261 struct ablkcipher_request *req, int err)
264 bool finalize_cur_req = false;
267 spin_lock_irqsave(&engine->queue_lock, flags);
268 if (engine->cur_req == &req->base)
269 finalize_cur_req = true;
270 spin_unlock_irqrestore(&engine->queue_lock, flags);
272 if (finalize_cur_req) {
273 if (engine->cur_req_prepared &&
274 engine->unprepare_cipher_request) {
275 ret = engine->unprepare_cipher_request(engine, req);
277 pr_err("failed to unprepare request\n");
279 spin_lock_irqsave(&engine->queue_lock, flags);
280 engine->cur_req = NULL;
281 engine->cur_req_prepared = false;
282 spin_unlock_irqrestore(&engine->queue_lock, flags);
285 req->base.complete(&req->base, err);
287 kthread_queue_work(&engine->kworker, &engine->pump_requests);
289 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
292 * crypto_finalize_hash_request - finalize one request if the request is done
293 * @engine: the hardware engine
294 * @req: the request need to be finalized
297 void crypto_finalize_hash_request(struct crypto_engine *engine,
298 struct ahash_request *req, int err)
301 bool finalize_cur_req = false;
304 spin_lock_irqsave(&engine->queue_lock, flags);
305 if (engine->cur_req == &req->base)
306 finalize_cur_req = true;
307 spin_unlock_irqrestore(&engine->queue_lock, flags);
309 if (finalize_cur_req) {
310 if (engine->cur_req_prepared &&
311 engine->unprepare_hash_request) {
312 ret = engine->unprepare_hash_request(engine, req);
314 pr_err("failed to unprepare request\n");
316 spin_lock_irqsave(&engine->queue_lock, flags);
317 engine->cur_req = NULL;
318 engine->cur_req_prepared = false;
319 spin_unlock_irqrestore(&engine->queue_lock, flags);
322 req->base.complete(&req->base, err);
324 kthread_queue_work(&engine->kworker, &engine->pump_requests);
326 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
329 * crypto_engine_start - start the hardware engine
330 * @engine: the hardware engine need to be started
332 * Return 0 on success, else on fail.
334 int crypto_engine_start(struct crypto_engine *engine)
338 spin_lock_irqsave(&engine->queue_lock, flags);
340 if (engine->running || engine->busy) {
341 spin_unlock_irqrestore(&engine->queue_lock, flags);
345 engine->running = true;
346 spin_unlock_irqrestore(&engine->queue_lock, flags);
348 kthread_queue_work(&engine->kworker, &engine->pump_requests);
352 EXPORT_SYMBOL_GPL(crypto_engine_start);
355 * crypto_engine_stop - stop the hardware engine
356 * @engine: the hardware engine need to be stopped
358 * Return 0 on success, else on fail.
360 int crypto_engine_stop(struct crypto_engine *engine)
363 unsigned int limit = 500;
366 spin_lock_irqsave(&engine->queue_lock, flags);
369 * If the engine queue is not empty or the engine is on busy state,
370 * we need to wait for a while to pump the requests of engine queue.
372 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
373 spin_unlock_irqrestore(&engine->queue_lock, flags);
375 spin_lock_irqsave(&engine->queue_lock, flags);
378 if (crypto_queue_len(&engine->queue) || engine->busy)
381 engine->running = false;
383 spin_unlock_irqrestore(&engine->queue_lock, flags);
386 pr_warn("could not stop engine\n");
390 EXPORT_SYMBOL_GPL(crypto_engine_stop);
393 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
395 * @dev: the device attached with one hardware engine
396 * @rt: whether this queue is set to run as a realtime task
398 * This must be called from context that can sleep.
399 * Return: the crypto engine structure on success, else NULL.
401 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
403 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
404 struct crypto_engine *engine;
409 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
414 engine->running = false;
415 engine->busy = false;
416 engine->idling = false;
417 engine->cur_req_prepared = false;
418 engine->priv_data = dev;
419 snprintf(engine->name, sizeof(engine->name),
420 "%s-engine", dev_name(dev));
422 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
423 spin_lock_init(&engine->queue_lock);
425 kthread_init_worker(&engine->kworker);
426 engine->kworker_task = kthread_run(kthread_worker_fn,
427 &engine->kworker, "%s",
429 if (IS_ERR(engine->kworker_task)) {
430 dev_err(dev, "failed to create crypto request pump task\n");
433 kthread_init_work(&engine->pump_requests, crypto_pump_work);
436 dev_info(dev, "will run requests pump with realtime priority\n");
437 sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m);
442 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
445 * crypto_engine_exit - free the resources of hardware engine when exit
446 * @engine: the hardware engine need to be freed
448 * Return 0 for success.
450 int crypto_engine_exit(struct crypto_engine *engine)
454 ret = crypto_engine_stop(engine);
458 kthread_flush_worker(&engine->kworker);
459 kthread_stop(engine->kworker_task);
463 EXPORT_SYMBOL_GPL(crypto_engine_exit);
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Crypto hardware engine framework");