2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
21 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
33 unsigned int ileft = areq->nbytes;
34 unsigned int oleft = areq->nbytes;
36 struct sg_mapping_iter mi, mo;
37 unsigned int oi, oo; /* offset for in and out */
39 if (areq->nbytes == 0)
43 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
47 if (!areq->src || !areq->dst) {
48 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
52 spin_lock_bh(&ss->slock);
54 for (i = 0; i < op->keylen; i += 4)
55 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
58 for (i = 0; i < 4 && i < ivsize / 4; i++) {
59 v = *(u32 *)(areq->info + i * 4);
60 writel(v, ss->base + SS_IV0 + i * 4);
63 writel(mode, ss->base + SS_CTL);
65 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
66 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
67 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
68 SG_MITER_TO_SG | SG_MITER_ATOMIC);
71 if (!mi.addr || !mo.addr) {
72 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
77 ileft = areq->nbytes / 4;
78 oleft = areq->nbytes / 4;
82 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
85 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
88 if (oi == mi.length) {
93 spaces = readl(ss->base + SS_FCSR);
94 rx_cnt = SS_RXFIFO_SPACES(spaces);
95 tx_cnt = SS_TXFIFO_SPACES(spaces);
97 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
100 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
103 if (oo == mo.length) {
110 for (i = 0; i < 4 && i < ivsize / 4; i++) {
111 v = readl(ss->base + SS_IV0 + i * 4);
112 *(u32 *)(areq->info + i * 4) = v;
119 writel(0, ss->base + SS_CTL);
120 spin_unlock_bh(&ss->slock);
124 /* Generic function that support SG with size not multiple of 4 */
125 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
127 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
128 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
129 struct sun4i_ss_ctx *ss = op->ss;
131 struct scatterlist *in_sg = areq->src;
132 struct scatterlist *out_sg = areq->dst;
133 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
134 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
135 u32 mode = ctx->mode;
136 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
137 u32 rx_cnt = SS_RX_DEFAULT;
142 unsigned int ileft = areq->nbytes;
143 unsigned int oleft = areq->nbytes;
145 struct sg_mapping_iter mi, mo;
146 unsigned int oi, oo; /* offset for in and out */
147 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
148 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
149 unsigned int ob = 0; /* offset in buf */
150 unsigned int obo = 0; /* offset in bufo*/
151 unsigned int obl = 0; /* length of data in bufo */
153 if (areq->nbytes == 0)
157 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
161 if (!areq->src || !areq->dst) {
162 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
167 * if we have only SGs with size multiple of 4,
168 * we can use the SS optimized function
170 while (in_sg && no_chunk == 1) {
171 if ((in_sg->length % 4) != 0)
173 in_sg = sg_next(in_sg);
175 while (out_sg && no_chunk == 1) {
176 if ((out_sg->length % 4) != 0)
178 out_sg = sg_next(out_sg);
182 return sun4i_ss_opti_poll(areq);
184 spin_lock_bh(&ss->slock);
186 for (i = 0; i < op->keylen; i += 4)
187 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
190 for (i = 0; i < 4 && i < ivsize / 4; i++) {
191 v = *(u32 *)(areq->info + i * 4);
192 writel(v, ss->base + SS_IV0 + i * 4);
195 writel(mode, ss->base + SS_CTL);
197 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
198 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
199 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
200 SG_MITER_TO_SG | SG_MITER_ATOMIC);
203 if (!mi.addr || !mo.addr) {
204 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
208 ileft = areq->nbytes;
209 oleft = areq->nbytes;
216 * todo is the number of consecutive 4byte word that we
217 * can read from current SG
219 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
220 if (todo > 0 && ob == 0) {
221 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
227 * not enough consecutive bytes, so we need to
228 * linearize in buf. todo is in bytes
229 * After that copy, if we have a multiple of 4
230 * we need to be able to write all buf in one
231 * pass, so it is why we min() with rx_cnt
233 todo = min3(rx_cnt * 4 - ob, ileft,
235 memcpy(buf + ob, mi.addr + oi, todo);
240 writesl(ss->base + SS_RXFIFO, buf,
245 if (oi == mi.length) {
251 spaces = readl(ss->base + SS_FCSR);
252 rx_cnt = SS_RXFIFO_SPACES(spaces);
253 tx_cnt = SS_TXFIFO_SPACES(spaces);
254 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
256 oi, mi.length, ileft, areq->nbytes, rx_cnt,
257 oo, mo.length, oleft, areq->nbytes, tx_cnt,
262 /* todo in 4bytes word */
263 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
265 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
268 if (oo == mo.length) {
274 * read obl bytes in bufo, we read at maximum for
275 * emptying the device
277 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
282 * how many bytes we can copy ?
283 * no more than remaining SG size
284 * no more than remaining buffer
285 * no need to test against oleft
287 todo = min(mo.length - oo, obl - obo);
288 memcpy(mo.addr + oo, bufo + obo, todo);
292 if (oo == mo.length) {
297 /* bufo must be fully used here */
301 for (i = 0; i < 4 && i < ivsize / 4; i++) {
302 v = readl(ss->base + SS_IV0 + i * 4);
303 *(u32 *)(areq->info + i * 4) = v;
310 writel(0, ss->base + SS_CTL);
311 spin_unlock_bh(&ss->slock);
317 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
319 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
320 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
321 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
323 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
325 return sun4i_ss_cipher_poll(areq);
328 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
330 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
331 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
332 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
334 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
336 return sun4i_ss_cipher_poll(areq);
340 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
343 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
344 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
346 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
348 return sun4i_ss_cipher_poll(areq);
351 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
353 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
354 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
355 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
357 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
359 return sun4i_ss_cipher_poll(areq);
363 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
365 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
366 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
367 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
369 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
371 return sun4i_ss_cipher_poll(areq);
374 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
376 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
377 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
378 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
380 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
382 return sun4i_ss_cipher_poll(areq);
386 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
388 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
389 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
390 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
392 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
394 return sun4i_ss_cipher_poll(areq);
397 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
399 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
400 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
401 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
403 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
405 return sun4i_ss_cipher_poll(areq);
409 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
411 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
412 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
413 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
415 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
417 return sun4i_ss_cipher_poll(areq);
420 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
422 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
423 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
424 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
426 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
428 return sun4i_ss_cipher_poll(areq);
432 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
434 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
435 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
436 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
438 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
440 return sun4i_ss_cipher_poll(areq);
443 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
445 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
446 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
447 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
449 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
451 return sun4i_ss_cipher_poll(areq);
454 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
456 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
457 struct crypto_alg *alg = tfm->__crt_alg;
458 struct sun4i_ss_alg_template *algt;
460 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
462 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
465 tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
470 /* check and set the AES key, prepare the mode to be used */
471 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
474 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
475 struct sun4i_ss_ctx *ss = op->ss;
479 op->keymode = SS_AES_128BITS;
482 op->keymode = SS_AES_192BITS;
485 op->keymode = SS_AES_256BITS;
488 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
489 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
493 memcpy(op->key, key, keylen);
497 /* check and set the DES key, prepare the mode to be used */
498 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
501 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
502 struct sun4i_ss_ctx *ss = op->ss;
504 u32 tmp[DES_EXPKEY_WORDS];
507 if (unlikely(keylen != DES_KEY_SIZE)) {
508 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
509 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
513 flags = crypto_ablkcipher_get_flags(tfm);
515 ret = des_ekey(tmp, key);
516 if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
517 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
518 dev_dbg(ss->dev, "Weak key %u\n", keylen);
523 memcpy(op->key, key, keylen);
527 /* check and set the 3DES key, prepare the mode to be used */
528 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
531 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
532 struct sun4i_ss_ctx *ss = op->ss;
534 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
535 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
536 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
540 memcpy(op->key, key, keylen);