2 * AMD Cryptographic Coprocessor (CCP) crypto API support
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/ccp.h>
18 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
21 #include "ccp-crypto.h"
23 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
24 MODULE_LICENSE("GPL");
25 MODULE_VERSION("1.0.0");
26 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
28 static unsigned int aes_disable;
29 module_param(aes_disable, uint, 0444);
30 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
32 static unsigned int sha_disable;
33 module_param(sha_disable, uint, 0444);
34 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
37 /* List heads for the supported algorithms */
38 static LIST_HEAD(hash_algs);
39 static LIST_HEAD(cipher_algs);
41 /* For any tfm, requests for that tfm on the same CPU must be returned
42 * in the order received. With multiple queues available, the CCP can
43 * process more than one cmd at a time. Therefore we must maintain
44 * a cmd list to insure the proper ordering of requests on a given tfm/cpu
47 struct ccp_crypto_cpu_queue {
48 struct list_head cmds;
49 struct list_head *backlog;
50 unsigned int cmd_count;
52 #define CCP_CRYPTO_MAX_QLEN 50
54 struct ccp_crypto_percpu_queue {
55 struct ccp_crypto_cpu_queue __percpu *cpu_queue;
57 static struct ccp_crypto_percpu_queue req_queue;
59 struct ccp_crypto_cmd {
60 struct list_head entry;
64 /* Save the crypto_tfm and crypto_async_request addresses
65 * separately to avoid any reference to a possibly invalid
66 * crypto_async_request structure after invoking the request
69 struct crypto_async_request *req;
70 struct crypto_tfm *tfm;
72 /* Used for held command processing to determine state */
78 struct ccp_crypto_cpu {
79 struct work_struct work;
80 struct completion completion;
81 struct ccp_crypto_cmd *crypto_cmd;
86 static inline bool ccp_crypto_success(int err)
88 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
95 * ccp_crypto_cmd_complete must be called while running on the appropriate
96 * cpu and the caller must have done a get_cpu to disable preemption
98 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
99 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
101 struct ccp_crypto_cpu_queue *cpu_queue;
102 struct ccp_crypto_cmd *held = NULL, *tmp;
106 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
108 /* Held cmds will be after the current cmd in the queue so start
109 * searching for a cmd with a matching tfm for submission.
112 list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
113 if (crypto_cmd->tfm != tmp->tfm)
119 /* Process the backlog:
120 * Because cmds can be executed from any point in the cmd list
121 * special precautions have to be taken when handling the backlog.
123 if (cpu_queue->backlog != &cpu_queue->cmds) {
124 /* Skip over this cmd if it is the next backlog cmd */
125 if (cpu_queue->backlog == &crypto_cmd->entry)
126 cpu_queue->backlog = crypto_cmd->entry.next;
128 *backlog = container_of(cpu_queue->backlog,
129 struct ccp_crypto_cmd, entry);
130 cpu_queue->backlog = cpu_queue->backlog->next;
132 /* Skip over this cmd if it is now the next backlog cmd */
133 if (cpu_queue->backlog == &crypto_cmd->entry)
134 cpu_queue->backlog = crypto_cmd->entry.next;
137 /* Remove the cmd entry from the list of cmds */
138 cpu_queue->cmd_count--;
139 list_del(&crypto_cmd->entry);
144 static void ccp_crypto_complete_on_cpu(struct work_struct *work)
146 struct ccp_crypto_cpu *cpu_work =
147 container_of(work, struct ccp_crypto_cpu, work);
148 struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
149 struct ccp_crypto_cmd *held, *next, *backlog;
150 struct crypto_async_request *req = crypto_cmd->req;
151 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
156 if (cpu_work->err == -EINPROGRESS) {
157 /* Only propogate the -EINPROGRESS if necessary */
158 if (crypto_cmd->ret == -EBUSY) {
159 crypto_cmd->ret = -EINPROGRESS;
160 req->complete(req, -EINPROGRESS);
166 /* Operation has completed - update the queue before invoking
167 * the completion callbacks and retrieve the next cmd (cmd with
168 * a matching tfm) that can be submitted to the CCP.
170 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
172 backlog->ret = -EINPROGRESS;
173 backlog->req->complete(backlog->req, -EINPROGRESS);
176 /* Transition the state from -EBUSY to -EINPROGRESS first */
177 if (crypto_cmd->ret == -EBUSY)
178 req->complete(req, -EINPROGRESS);
180 /* Completion callbacks */
183 ret = ctx->complete(req, ret);
184 req->complete(req, ret);
186 /* Submit the next cmd */
188 ret = ccp_enqueue_cmd(held->cmd);
189 if (ccp_crypto_success(ret))
192 /* Error occurred, report it and get the next entry */
193 held->req->complete(held->req, ret);
195 next = ccp_crypto_cmd_complete(held, &backlog);
197 backlog->ret = -EINPROGRESS;
198 backlog->req->complete(backlog->req, -EINPROGRESS);
210 complete(&cpu_work->completion);
213 static void ccp_crypto_complete(void *data, int err)
215 struct ccp_crypto_cmd *crypto_cmd = data;
216 struct ccp_crypto_cpu cpu_work;
218 INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
219 init_completion(&cpu_work.completion);
220 cpu_work.crypto_cmd = crypto_cmd;
223 schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
225 /* Keep the completion call synchronous */
226 wait_for_completion(&cpu_work.completion);
229 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
231 struct ccp_crypto_cpu_queue *cpu_queue;
232 struct ccp_crypto_cmd *active = NULL, *tmp;
236 crypto_cmd->cpu = cpu;
238 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
240 /* Check if the cmd can/should be queued */
241 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
243 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
247 /* Look for an entry with the same tfm. If there is a cmd
248 * with the same tfm in the list for this cpu then the current
249 * cmd cannot be submitted to the CCP yet.
251 list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
252 if (crypto_cmd->tfm != tmp->tfm)
260 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
261 if (!ccp_crypto_success(ret))
265 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
267 if (cpu_queue->backlog == &cpu_queue->cmds)
268 cpu_queue->backlog = &crypto_cmd->entry;
270 crypto_cmd->ret = ret;
272 cpu_queue->cmd_count++;
273 list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
282 * ccp_crypto_enqueue_request - queue an crypto async request for processing
285 * @req: crypto_async_request struct to be processed
286 * @cmd: ccp_cmd struct to be sent to the CCP
288 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
291 struct ccp_crypto_cmd *crypto_cmd;
295 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
297 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
301 /* The tfm pointer must be saved and not referenced from the
302 * crypto_async_request (req) pointer because it is used after
303 * completion callback for the request and the req pointer
304 * might not be valid anymore.
306 crypto_cmd->cmd = cmd;
307 crypto_cmd->req = req;
308 crypto_cmd->tfm = req->tfm;
310 cmd->callback = ccp_crypto_complete;
311 cmd->data = crypto_cmd;
313 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
314 cmd->flags |= CCP_CMD_MAY_BACKLOG;
316 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
318 ret = ccp_crypto_enqueue_cmd(crypto_cmd);
319 if (!ccp_crypto_success(ret))
325 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
326 struct scatterlist *sg_add)
328 struct scatterlist *sg, *sg_last = NULL;
330 for (sg = table->sgl; sg; sg = sg_next(sg))
335 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
336 sg_set_page(sg, sg_page(sg_add), sg_add->length,
345 static int ccp_register_algs(void)
350 ret = ccp_register_aes_algs(&cipher_algs);
354 ret = ccp_register_aes_cmac_algs(&hash_algs);
358 ret = ccp_register_aes_xts_algs(&cipher_algs);
364 ret = ccp_register_sha_algs(&hash_algs);
372 static void ccp_unregister_algs(void)
374 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
375 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
377 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
378 crypto_unregister_ahash(&ahash_alg->alg);
379 list_del(&ahash_alg->entry);
383 list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
384 crypto_unregister_alg(&ablk_alg->alg);
385 list_del(&ablk_alg->entry);
390 static int ccp_init_queues(void)
392 struct ccp_crypto_cpu_queue *cpu_queue;
395 req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
396 if (!req_queue.cpu_queue)
399 for_each_possible_cpu(cpu) {
400 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
401 INIT_LIST_HEAD(&cpu_queue->cmds);
402 cpu_queue->backlog = &cpu_queue->cmds;
403 cpu_queue->cmd_count = 0;
409 static void ccp_fini_queue(void)
411 struct ccp_crypto_cpu_queue *cpu_queue;
414 for_each_possible_cpu(cpu) {
415 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
416 BUG_ON(!list_empty(&cpu_queue->cmds));
418 free_percpu(req_queue.cpu_queue);
421 static int ccp_crypto_init(void)
425 ret = ccp_init_queues();
429 ret = ccp_register_algs();
431 ccp_unregister_algs();
438 static void ccp_crypto_exit(void)
440 ccp_unregister_algs();
444 module_init(ccp_crypto_init);
445 module_exit(ccp_crypto_exit);