2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/internal/skcipher.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/sha.h>
25 #include <crypto/ctr.h>
26 #include <crypto/authenc.h>
27 #include <crypto/aes.h>
28 #include <crypto/des.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/version.h>
31 #include "ssi_config.h"
32 #include "ssi_driver.h"
33 #include "ssi_buffer_mgr.h"
35 #include "ssi_request_mgr.h"
37 #include "ssi_sysfs.h"
38 #include "ssi_sram_mgr.h"
39 #include "ssi_fips_local.h"
41 #define template_aead template_u.aead
43 #define MAX_AEAD_SETKEY_SEQ 12
44 #define MAX_AEAD_PROCESS_SEQ 23
46 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
47 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
49 #define AES_CCM_RFC4309_NONCE_SIZE 3
50 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
53 /* Value of each ICV_CMP byte (of 8) in case of success */
54 #define ICV_VERIF_OK 0x01
56 struct ssi_aead_handle {
57 ssi_sram_addr_t sram_workspace_addr;
58 struct list_head aead_list;
62 struct ssi_drvdata *drvdata;
63 uint8_t ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
65 dma_addr_t enckey_dma_addr;
68 uint8_t *padded_authkey;
69 uint8_t *ipad_opad; /* IPAD, OPAD*/
70 dma_addr_t padded_authkey_dma_addr;
71 dma_addr_t ipad_opad_dma_addr;
74 uint8_t *xcbc_keys; /* K1,K2,K3 */
75 dma_addr_t xcbc_keys_dma_addr;
78 unsigned int enc_keylen;
79 unsigned int auth_keylen;
80 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
81 enum drv_cipher_mode cipher_mode;
82 enum FlowMode flow_mode;
83 enum drv_hash_mode auth_mode;
86 static inline bool valid_assoclen(struct aead_request *req)
88 return ((req->assoclen == 16) || (req->assoclen == 20));
91 static void ssi_aead_exit(struct crypto_aead *tfm)
93 struct device *dev = NULL;
94 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
96 SSI_LOG_DEBUG("Clearing context @%p for %s\n",
97 crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
99 dev = &ctx->drvdata->plat_dev->dev;
100 /* Unmap enckey buffer */
101 if (ctx->enckey != NULL) {
102 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr);
103 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
104 SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
105 (unsigned long long)ctx->enckey_dma_addr);
106 ctx->enckey_dma_addr = 0;
110 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
111 if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
112 SSI_RESTORE_DMA_ADDR_TO_48BIT(
113 ctx->auth_state.xcbc.xcbc_keys_dma_addr);
114 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
115 ctx->auth_state.xcbc.xcbc_keys,
116 ctx->auth_state.xcbc.xcbc_keys_dma_addr);
118 SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
119 (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
120 ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
121 ctx->auth_state.xcbc.xcbc_keys = NULL;
122 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
123 if (ctx->auth_state.hmac.ipad_opad != NULL) {
124 SSI_RESTORE_DMA_ADDR_TO_48BIT(
125 ctx->auth_state.hmac.ipad_opad_dma_addr);
126 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
127 ctx->auth_state.hmac.ipad_opad,
128 ctx->auth_state.hmac.ipad_opad_dma_addr);
129 SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
130 (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
131 ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
132 ctx->auth_state.hmac.ipad_opad = NULL;
134 if (ctx->auth_state.hmac.padded_authkey != NULL) {
135 SSI_RESTORE_DMA_ADDR_TO_48BIT(
136 ctx->auth_state.hmac.padded_authkey_dma_addr);
137 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
138 ctx->auth_state.hmac.padded_authkey,
139 ctx->auth_state.hmac.padded_authkey_dma_addr);
140 SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
141 (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
142 ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
143 ctx->auth_state.hmac.padded_authkey = NULL;
148 static int ssi_aead_init(struct crypto_aead *tfm)
151 struct aead_alg *alg = crypto_aead_alg(tfm);
152 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
153 struct ssi_crypto_alg *ssi_alg =
154 container_of(alg, struct ssi_crypto_alg, aead_alg);
155 SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
157 CHECK_AND_RETURN_UPON_FIPS_ERROR();
159 /* Initialize modes in instance */
160 ctx->cipher_mode = ssi_alg->cipher_mode;
161 ctx->flow_mode = ssi_alg->flow_mode;
162 ctx->auth_mode = ssi_alg->auth_mode;
163 ctx->drvdata = ssi_alg->drvdata;
164 dev = &ctx->drvdata->plat_dev->dev;
165 crypto_aead_set_reqsize(tfm,sizeof(struct aead_req_ctx));
167 /* Allocate key buffer, cache line aligned */
168 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
169 &ctx->enckey_dma_addr, GFP_KERNEL);
170 if (ctx->enckey == NULL) {
171 SSI_LOG_ERR("Failed allocating key buffer\n");
174 SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr, AES_MAX_KEY_SIZE);
175 SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
177 /* Set default authlen value */
179 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
180 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
181 /* (and temporary for user key - up to 256b) */
182 ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
183 CC_AES_128_BIT_KEY_SIZE * 3,
184 &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
185 if (ctx->auth_state.xcbc.xcbc_keys == NULL) {
186 SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
189 SSI_UPDATE_DMA_ADDR_TO_48BIT(
190 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
191 CC_AES_128_BIT_KEY_SIZE * 3);
192 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
193 /* Allocate dma-coherent buffer for IPAD + OPAD */
194 ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
195 2 * MAX_HMAC_DIGEST_SIZE,
196 &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
197 if (ctx->auth_state.hmac.ipad_opad == NULL) {
198 SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
201 SSI_UPDATE_DMA_ADDR_TO_48BIT(
202 ctx->auth_state.hmac.ipad_opad_dma_addr,
203 2 * MAX_HMAC_DIGEST_SIZE);
204 SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
205 ctx->auth_state.hmac.ipad_opad);
207 ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
209 &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
210 if (ctx->auth_state.hmac.padded_authkey == NULL) {
211 SSI_LOG_ERR("failed to allocate padded_authkey\n");
214 SSI_UPDATE_DMA_ADDR_TO_48BIT(
215 ctx->auth_state.hmac.padded_authkey_dma_addr,
216 MAX_HMAC_BLOCK_SIZE);
218 ctx->auth_state.hmac.ipad_opad = NULL;
219 ctx->auth_state.hmac.padded_authkey = NULL;
230 static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
232 struct aead_request *areq = (struct aead_request *)ssi_req;
233 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
234 struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
235 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
237 DECL_CYCLE_COUNT_RESOURCES;
241 ssi_buffer_mgr_unmap_aead_request(dev, areq);
243 /* Restore ordinary iv pointer */
244 areq->iv = areq_ctx->backup_iv;
246 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
247 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
248 ctx->authsize) != 0) {
249 SSI_LOG_DEBUG("Payload authentication failure, "
250 "(auth-size=%d, cipher=%d).\n",
251 ctx->authsize, ctx->cipher_mode);
252 /* In case of payload authentication failure, MUST NOT
253 revealed the decrypted message --> zero its memory. */
254 ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
258 if (unlikely(areq_ctx->is_icv_fragmented == true))
259 ssi_buffer_mgr_copy_scatterlist_portion(
260 areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen+areq_ctx->dstOffset,
261 areq->cryptlen+areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
263 /* If an IV was generated, copy it back to the user provided buffer. */
264 if (areq_ctx->backup_giv != NULL) {
265 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
266 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
267 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
268 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
273 END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
274 aead_request_complete(areq, err);
277 static int xcbc_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
279 /* Load the AES key */
280 HW_DESC_INIT(&desc[0]);
281 /* We are using for the source/user key the same buffer as for the output keys,
282 because after this key loading it is not needed anymore */
283 HW_DESC_SET_DIN_TYPE(&desc[0], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, NS_BIT);
284 HW_DESC_SET_CIPHER_MODE(&desc[0], DRV_CIPHER_ECB);
285 HW_DESC_SET_CIPHER_CONFIG0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
286 HW_DESC_SET_KEY_SIZE_AES(&desc[0], ctx->auth_keylen);
287 HW_DESC_SET_FLOW_MODE(&desc[0], S_DIN_to_AES);
288 HW_DESC_SET_SETUP_MODE(&desc[0], SETUP_LOAD_KEY0);
290 HW_DESC_INIT(&desc[1]);
291 HW_DESC_SET_DIN_CONST(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
292 HW_DESC_SET_FLOW_MODE(&desc[1], DIN_AES_DOUT);
293 HW_DESC_SET_DOUT_DLLI(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT, 0);
295 HW_DESC_INIT(&desc[2]);
296 HW_DESC_SET_DIN_CONST(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
297 HW_DESC_SET_FLOW_MODE(&desc[2], DIN_AES_DOUT);
298 HW_DESC_SET_DOUT_DLLI(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
300 AES_KEYSIZE_128, NS_BIT, 0);
302 HW_DESC_INIT(&desc[3]);
303 HW_DESC_SET_DIN_CONST(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
304 HW_DESC_SET_FLOW_MODE(&desc[3], DIN_AES_DOUT);
305 HW_DESC_SET_DOUT_DLLI(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
306 + 2 * AES_KEYSIZE_128),
307 AES_KEYSIZE_128, NS_BIT, 0);
312 static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
314 unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
315 unsigned int digest_ofs = 0;
316 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
317 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
318 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
319 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
324 /* calc derived HMAC key */
325 for (i = 0; i < 2; i++) {
326 /* Load hash initial state */
327 HW_DESC_INIT(&desc[idx]);
328 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
329 HW_DESC_SET_DIN_SRAM(&desc[idx],
330 ssi_ahash_get_larval_digest_sram_addr(
331 ctx->drvdata, ctx->auth_mode),
333 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
334 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
337 /* Load the hash current length*/
338 HW_DESC_INIT(&desc[idx]);
339 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
340 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
341 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
342 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
345 /* Prepare ipad key */
346 HW_DESC_INIT(&desc[idx]);
347 HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
348 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
349 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
350 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
353 /* Perform HASH update */
354 HW_DESC_INIT(&desc[idx]);
355 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
356 ctx->auth_state.hmac.padded_authkey_dma_addr,
357 SHA256_BLOCK_SIZE, NS_BIT);
358 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
359 HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
360 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
364 HW_DESC_INIT(&desc[idx]);
365 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
366 HW_DESC_SET_DOUT_DLLI(&desc[idx],
367 (ctx->auth_state.hmac.ipad_opad_dma_addr +
369 digest_size, NS_BIT, 0);
370 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
371 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
372 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
375 digest_ofs += digest_size;
381 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
383 SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
384 ctx->enc_keylen, ctx->auth_keylen);
386 switch (ctx->auth_mode) {
388 case DRV_HASH_SHA256:
390 case DRV_HASH_XCBC_MAC:
391 if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
392 (ctx->auth_keylen != AES_KEYSIZE_192) &&
393 (ctx->auth_keylen != AES_KEYSIZE_256))
396 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397 if (ctx->auth_keylen > 0)
401 SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
404 /* Check cipher key size */
405 if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
406 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407 SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
411 } else { /* Default assumed to be AES ciphers */
412 if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
413 (ctx->enc_keylen != AES_KEYSIZE_192) &&
414 (ctx->enc_keylen != AES_KEYSIZE_256)) {
415 SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
421 return 0; /* All tests of keys sizes passed */
423 /*This function prepers the user key so it can pass to the hmac processing
424 (copy to intenral buffer or hash in case of key longer than block */
426 ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
428 dma_addr_t key_dma_addr = 0;
429 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
430 struct device *dev = &ctx->drvdata->plat_dev->dev;
431 uint32_t larval_addr = ssi_ahash_get_larval_digest_sram_addr(
432 ctx->drvdata, ctx->auth_mode);
433 struct ssi_crypto_req ssi_req = {};
434 unsigned int blocksize;
435 unsigned int digestsize;
436 unsigned int hashmode;
437 unsigned int idx = 0;
439 HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
440 dma_addr_t padded_authkey_dma_addr =
441 ctx->auth_state.hmac.padded_authkey_dma_addr;
443 switch (ctx->auth_mode) { /* auth_key required and >0 */
445 blocksize = SHA1_BLOCK_SIZE;
446 digestsize = SHA1_DIGEST_SIZE;
447 hashmode = DRV_HASH_HW_SHA1;
449 case DRV_HASH_SHA256:
451 blocksize = SHA256_BLOCK_SIZE;
452 digestsize = SHA256_DIGEST_SIZE;
453 hashmode = DRV_HASH_HW_SHA256;
456 if (likely(keylen != 0)) {
457 key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
458 if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
459 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
460 " DMA failed\n", key, keylen);
463 SSI_UPDATE_DMA_ADDR_TO_48BIT(key_dma_addr, keylen);
464 if (keylen > blocksize) {
465 /* Load hash initial state */
466 HW_DESC_INIT(&desc[idx]);
467 HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
468 HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, digestsize);
469 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
470 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
473 /* Load the hash current length*/
474 HW_DESC_INIT(&desc[idx]);
475 HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
476 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
477 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
478 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
479 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
482 HW_DESC_INIT(&desc[idx]);
483 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
486 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
490 HW_DESC_INIT(&desc[idx]);
491 HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
492 HW_DESC_SET_DOUT_DLLI(&desc[idx],
493 padded_authkey_dma_addr,
496 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
497 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
498 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx],
499 HASH_PADDING_DISABLED);
500 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
501 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
504 HW_DESC_INIT(&desc[idx]);
505 HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
506 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
507 HW_DESC_SET_DOUT_DLLI(&desc[idx],
508 (padded_authkey_dma_addr + digestsize),
509 (blocksize - digestsize),
513 HW_DESC_INIT(&desc[idx]);
514 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
517 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
518 HW_DESC_SET_DOUT_DLLI(&desc[idx],
519 (padded_authkey_dma_addr),
523 if ((blocksize - keylen) != 0) {
524 HW_DESC_INIT(&desc[idx]);
525 HW_DESC_SET_DIN_CONST(&desc[idx], 0,
526 (blocksize - keylen));
527 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
528 HW_DESC_SET_DOUT_DLLI(&desc[idx],
529 (padded_authkey_dma_addr + keylen),
530 (blocksize - keylen),
536 HW_DESC_INIT(&desc[idx]);
537 HW_DESC_SET_DIN_CONST(&desc[idx], 0,
538 (blocksize - keylen));
539 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
540 HW_DESC_SET_DOUT_DLLI(&desc[idx],
541 padded_authkey_dma_addr,
547 #ifdef ENABLE_CYCLE_COUNT
548 ssi_req.op_type = STAT_OP_TYPE_SETKEY;
551 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
552 if (unlikely(rc != 0))
553 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
555 if (likely(key_dma_addr != 0)) {
556 SSI_RESTORE_DMA_ADDR_TO_48BIT(key_dma_addr);
557 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
565 ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
567 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
568 struct rtattr *rta = (struct rtattr *)key;
569 struct ssi_crypto_req ssi_req = {};
570 struct crypto_authenc_key_param *param;
571 HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
572 int seq_len = 0, rc = -EINVAL;
573 DECL_CYCLE_COUNT_RESOURCES;
575 SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
576 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
578 CHECK_AND_RETURN_UPON_FIPS_ERROR();
579 /* STAT_PHASE_0: Init and sanity checks */
582 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
583 if (!RTA_OK(rta, keylen))
585 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
587 if (RTA_PAYLOAD(rta) < sizeof(*param))
589 param = RTA_DATA(rta);
590 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
591 key += RTA_ALIGN(rta->rta_len);
592 keylen -= RTA_ALIGN(rta->rta_len);
593 if (keylen < ctx->enc_keylen)
595 ctx->auth_keylen = keylen - ctx->enc_keylen;
597 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
598 /* the nonce is stored in bytes at end of key */
599 if (ctx->enc_keylen <
600 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
602 /* Copy nonce from last 4 bytes in CTR key to
603 * first 4 bytes in CTR IV */
604 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
605 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
606 /* Set CTR key size */
607 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
609 } else { /* non-authenc - has just one key */
610 ctx->enc_keylen = keylen;
611 ctx->auth_keylen = 0;
614 rc = validate_keys_sizes(ctx);
615 if (unlikely(rc != 0))
618 END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
619 /* STAT_PHASE_1: Copy key to ctx */
622 /* Get key material */
623 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
624 if (ctx->enc_keylen == 24)
625 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
626 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
627 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
628 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
629 rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
634 END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
636 /* STAT_PHASE_2: Create sequence */
639 switch (ctx->auth_mode) {
641 case DRV_HASH_SHA256:
642 seq_len = hmac_setkey(desc, ctx);
644 case DRV_HASH_XCBC_MAC:
645 seq_len = xcbc_setkey(desc, ctx);
647 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
648 break; /* No auth. key setup */
650 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
655 END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_2);
657 /* STAT_PHASE_3: Submit sequence to HW */
660 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
661 #ifdef ENABLE_CYCLE_COUNT
662 ssi_req.op_type = STAT_OP_TYPE_SETKEY;
664 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
665 if (unlikely(rc != 0)) {
666 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
671 /* Update STAT_PHASE_3 */
672 END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_3);
676 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
682 #if SSI_CC_HAS_AES_CCM
683 static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
685 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
692 memcpy(ctx->ctr_nonce, key + keylen, 3);
694 rc = ssi_aead_setkey(tfm, key, keylen);
698 #endif /*SSI_CC_HAS_AES_CCM*/
700 static int ssi_aead_setauthsize(
701 struct crypto_aead *authenc,
702 unsigned int authsize)
704 struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
706 CHECK_AND_RETURN_UPON_FIPS_ERROR();
707 /* Unsupported auth. sizes */
708 if ((authsize == 0) ||
709 (authsize >crypto_aead_maxauthsize(authenc))) {
713 ctx->authsize = authsize;
714 SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
719 #if SSI_CC_HAS_AES_CCM
720 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
721 unsigned int authsize)
732 return ssi_aead_setauthsize(authenc, authsize);
735 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
736 unsigned int authsize)
751 return ssi_aead_setauthsize(authenc, authsize);
753 #endif /*SSI_CC_HAS_AES_CCM*/
756 ssi_aead_create_assoc_desc(
757 struct aead_request *areq,
758 unsigned int flow_mode,
760 unsigned int *seq_size)
762 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
763 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
764 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
765 enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
766 unsigned int idx = *seq_size;
768 switch (assoc_dma_type) {
769 case SSI_DMA_BUF_DLLI:
770 SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
771 HW_DESC_INIT(&desc[idx]);
772 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
773 sg_dma_address(areq->src),
774 areq->assoclen, NS_BIT);
775 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
776 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
777 HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
779 case SSI_DMA_BUF_MLLI:
780 SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
781 HW_DESC_INIT(&desc[idx]);
782 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
783 areq_ctx->assoc.sram_addr,
784 areq_ctx->assoc.mlli_nents,
786 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
787 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
788 HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
790 case SSI_DMA_BUF_NULL:
792 SSI_LOG_ERR("Invalid ASSOC buffer type\n");
799 ssi_aead_process_authenc_data_desc(
800 struct aead_request *areq,
801 unsigned int flow_mode,
803 unsigned int *seq_size,
806 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
807 enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
808 unsigned int idx = *seq_size;
810 switch (data_dma_type) {
811 case SSI_DMA_BUF_DLLI:
813 struct scatterlist *cipher =
814 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
815 areq_ctx->dstSgl : areq_ctx->srcSgl;
817 unsigned int offset =
818 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
819 areq_ctx->dstOffset : areq_ctx->srcOffset;
820 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
821 HW_DESC_INIT(&desc[idx]);
822 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
823 (sg_dma_address(cipher)+ offset), areq_ctx->cryptlen,
825 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
828 case SSI_DMA_BUF_MLLI:
830 /* DOUBLE-PASS flow (as default)
831 * assoc. + iv + data -compact in one table
832 * if assoclen is ZERO only IV perform */
833 ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
834 uint32_t mlli_nents = areq_ctx->assoc.mlli_nents;
836 if (likely(areq_ctx->is_single_pass == true)) {
837 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT){
838 mlli_addr = areq_ctx->dst.sram_addr;
839 mlli_nents = areq_ctx->dst.mlli_nents;
841 mlli_addr = areq_ctx->src.sram_addr;
842 mlli_nents = areq_ctx->src.mlli_nents;
846 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
847 HW_DESC_INIT(&desc[idx]);
848 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
849 mlli_addr, mlli_nents, NS_BIT);
850 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
853 case SSI_DMA_BUF_NULL:
855 SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
862 ssi_aead_process_cipher_data_desc(
863 struct aead_request *areq,
864 unsigned int flow_mode,
866 unsigned int *seq_size)
868 unsigned int idx = *seq_size;
869 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
870 enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
872 if (areq_ctx->cryptlen == 0)
873 return; /*null processing*/
875 switch (data_dma_type) {
876 case SSI_DMA_BUF_DLLI:
877 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
878 HW_DESC_INIT(&desc[idx]);
879 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
880 (sg_dma_address(areq_ctx->srcSgl)+areq_ctx->srcOffset),
881 areq_ctx->cryptlen, NS_BIT);
882 HW_DESC_SET_DOUT_DLLI(&desc[idx],
883 (sg_dma_address(areq_ctx->dstSgl)+areq_ctx->dstOffset),
884 areq_ctx->cryptlen, NS_BIT, 0);
885 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
887 case SSI_DMA_BUF_MLLI:
888 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
889 HW_DESC_INIT(&desc[idx]);
890 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
891 areq_ctx->src.sram_addr,
892 areq_ctx->src.mlli_nents, NS_BIT);
893 HW_DESC_SET_DOUT_MLLI(&desc[idx],
894 areq_ctx->dst.sram_addr,
895 areq_ctx->dst.mlli_nents, NS_BIT, 0);
896 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
898 case SSI_DMA_BUF_NULL:
900 SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
906 static inline void ssi_aead_process_digest_result_desc(
907 struct aead_request *req,
909 unsigned int *seq_size)
911 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
912 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
913 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
914 unsigned int idx = *seq_size;
915 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
916 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
917 int direct = req_ctx->gen_ctx.op_type;
919 /* Get final ICV result */
920 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
921 HW_DESC_INIT(&desc[idx]);
922 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
923 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
924 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->icv_dma_addr,
925 ctx->authsize, NS_BIT, 1);
926 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
927 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
928 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
929 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
931 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
932 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
933 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
936 /* Get ICV out from hardware */
937 HW_DESC_INIT(&desc[idx]);
938 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
939 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
940 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
941 ctx->authsize, NS_BIT, 1);
942 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
943 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
944 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
945 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
946 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
947 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
949 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
956 static inline void ssi_aead_setup_cipher_desc(
957 struct aead_request *req,
959 unsigned int *seq_size)
961 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
962 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
963 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
964 unsigned int hw_iv_size = req_ctx->hw_iv_size;
965 unsigned int idx = *seq_size;
966 int direct = req_ctx->gen_ctx.op_type;
968 /* Setup cipher state */
969 HW_DESC_INIT(&desc[idx]);
970 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
971 HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
972 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
973 req_ctx->gen_ctx.iv_dma_addr, hw_iv_size, NS_BIT);
974 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
975 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
977 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
979 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
983 HW_DESC_INIT(&desc[idx]);
984 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
985 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
986 HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
987 if (ctx->flow_mode == S_DIN_to_AES) {
988 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
989 ((ctx->enc_keylen == 24) ?
990 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT);
991 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
993 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
994 ctx->enc_keylen, NS_BIT);
995 HW_DESC_SET_KEY_SIZE_DES(&desc[idx], ctx->enc_keylen);
997 HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
1003 static inline void ssi_aead_process_cipher(
1004 struct aead_request *req,
1006 unsigned int *seq_size,
1007 unsigned int data_flow_mode)
1009 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1010 int direct = req_ctx->gen_ctx.op_type;
1011 unsigned int idx = *seq_size;
1013 if (req_ctx->cryptlen == 0)
1014 return; /*null processing*/
1016 ssi_aead_setup_cipher_desc(req, desc, &idx);
1017 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
1018 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1019 /* We must wait for DMA to write all cipher */
1020 HW_DESC_INIT(&desc[idx]);
1021 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1022 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1029 static inline void ssi_aead_hmac_setup_digest_desc(
1030 struct aead_request *req,
1032 unsigned int *seq_size)
1034 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1036 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1037 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1038 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1039 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1040 unsigned int idx = *seq_size;
1042 /* Loading hash ipad xor key state */
1043 HW_DESC_INIT(&desc[idx]);
1044 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1045 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1046 ctx->auth_state.hmac.ipad_opad_dma_addr,
1047 digest_size, NS_BIT);
1048 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1049 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1052 /* Load init. digest len (64 bytes) */
1053 HW_DESC_INIT(&desc[idx]);
1054 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1055 HW_DESC_SET_DIN_SRAM(&desc[idx],
1056 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
1058 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1059 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1065 static inline void ssi_aead_xcbc_setup_digest_desc(
1066 struct aead_request *req,
1068 unsigned int *seq_size)
1070 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1071 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1072 unsigned int idx = *seq_size;
1074 /* Loading MAC state */
1075 HW_DESC_INIT(&desc[idx]);
1076 HW_DESC_SET_DIN_CONST(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1077 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1078 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1079 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1080 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1081 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1082 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1085 /* Setup XCBC MAC K1 */
1086 HW_DESC_INIT(&desc[idx]);
1087 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1088 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1089 AES_KEYSIZE_128, NS_BIT);
1090 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1091 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1092 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1093 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1094 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1095 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1098 /* Setup XCBC MAC K2 */
1099 HW_DESC_INIT(&desc[idx]);
1100 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1101 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1103 AES_KEYSIZE_128, NS_BIT);
1104 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1105 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1106 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1107 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1108 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1109 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1112 /* Setup XCBC MAC K3 */
1113 HW_DESC_INIT(&desc[idx]);
1114 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1115 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1116 2 * AES_KEYSIZE_128),
1117 AES_KEYSIZE_128, NS_BIT);
1118 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
1119 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1120 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1121 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1122 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1123 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1129 static inline void ssi_aead_process_digest_header_desc(
1130 struct aead_request *req,
1132 unsigned int *seq_size)
1134 unsigned int idx = *seq_size;
1135 /* Hash associated data */
1136 if (req->assoclen > 0)
1137 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1143 static inline void ssi_aead_process_digest_scheme_desc(
1144 struct aead_request *req,
1146 unsigned int *seq_size)
1148 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1149 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1150 struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1151 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1152 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1153 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1154 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1155 unsigned int idx = *seq_size;
1157 HW_DESC_INIT(&desc[idx]);
1158 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1159 HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1161 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1162 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
1163 HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
1166 /* Get final ICV result */
1167 HW_DESC_INIT(&desc[idx]);
1168 HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1170 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1171 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1172 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1173 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1176 /* Loading hash opad xor key state */
1177 HW_DESC_INIT(&desc[idx]);
1178 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1179 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1180 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1181 digest_size, NS_BIT);
1182 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1183 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1186 /* Load init. digest len (64 bytes) */
1187 HW_DESC_INIT(&desc[idx]);
1188 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1189 HW_DESC_SET_DIN_SRAM(&desc[idx],
1190 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
1192 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1193 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1194 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1197 /* Perform HASH update */
1198 HW_DESC_INIT(&desc[idx]);
1199 HW_DESC_SET_DIN_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1201 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1207 static inline void ssi_aead_load_mlli_to_sram(
1208 struct aead_request *req,
1210 unsigned int *seq_size)
1212 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1213 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1217 (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1218 (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
1219 (req_ctx->is_single_pass == false))) {
1220 SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1221 (unsigned int)ctx->drvdata->mlli_sram_addr,
1222 req_ctx->mlli_params.mlli_len);
1223 /* Copy MLLI table host-to-sram */
1224 HW_DESC_INIT(&desc[*seq_size]);
1225 HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
1226 req_ctx->mlli_params.mlli_dma_addr,
1227 req_ctx->mlli_params.mlli_len, NS_BIT);
1228 HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
1229 ctx->drvdata->mlli_sram_addr,
1230 req_ctx->mlli_params.mlli_len);
1231 HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
1236 static inline enum FlowMode ssi_aead_get_data_flow_mode(
1237 enum drv_crypto_direction direct,
1238 enum FlowMode setup_flow_mode,
1239 bool is_single_pass)
1241 enum FlowMode data_flow_mode;
1243 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1244 if (setup_flow_mode == S_DIN_to_AES)
1245 data_flow_mode = likely(is_single_pass) ?
1246 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1248 data_flow_mode = likely(is_single_pass) ?
1249 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1250 } else { /* Decrypt */
1251 if (setup_flow_mode == S_DIN_to_AES)
1252 data_flow_mode = likely(is_single_pass) ?
1253 AES_and_HASH : DIN_AES_DOUT;
1255 data_flow_mode = likely(is_single_pass) ?
1256 DES_and_HASH : DIN_DES_DOUT;
1259 return data_flow_mode;
1262 static inline void ssi_aead_hmac_authenc(
1263 struct aead_request *req,
1265 unsigned int *seq_size)
1267 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1268 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1269 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1270 int direct = req_ctx->gen_ctx.op_type;
1271 unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1272 direct, ctx->flow_mode, req_ctx->is_single_pass);
1274 if (req_ctx->is_single_pass == true) {
1278 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1279 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1280 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1281 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1282 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1283 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1289 * Fallback for unsupported single-pass modes,
1290 * i.e. using assoc. data of non-word-multiple */
1291 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292 /* encrypt first.. */
1293 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1294 /* authenc after..*/
1295 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1296 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1297 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1298 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1300 } else { /*DECRYPT*/
1301 /* authenc first..*/
1302 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1303 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1304 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1305 /* decrypt after.. */
1306 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1307 /* read the digest result with setting the completion bit
1308 must be after the cipher operation */
1309 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1314 ssi_aead_xcbc_authenc(
1315 struct aead_request *req,
1317 unsigned int *seq_size)
1319 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1320 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1321 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1322 int direct = req_ctx->gen_ctx.op_type;
1323 unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1324 direct, ctx->flow_mode, req_ctx->is_single_pass);
1326 if (req_ctx->is_single_pass == true) {
1330 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1331 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1332 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1333 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1334 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1340 * Fallback for unsupported single-pass modes,
1341 * i.e. using assoc. data of non-word-multiple */
1342 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1343 /* encrypt first.. */
1344 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1345 /* authenc after.. */
1346 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1347 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1348 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1349 } else { /*DECRYPT*/
1350 /* authenc first.. */
1351 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1352 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1353 /* decrypt after..*/
1354 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1355 /* read the digest result with setting the completion bit
1356 must be after the cipher operation */
1357 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1361 static int validate_data_size(struct ssi_aead_ctx *ctx,
1362 enum drv_crypto_direction direct, struct aead_request *req)
1364 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1365 unsigned int assoclen = req->assoclen;
1366 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1367 (req->cryptlen - ctx->authsize) : req->cryptlen;
1369 if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1370 (req->cryptlen < ctx->authsize)))
1373 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1375 switch (ctx->flow_mode) {
1377 if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
1378 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
1380 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1382 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1384 if (areq_ctx->plaintext_authenticate_only == true)
1385 areq_ctx->is_single_pass = false;
1389 if (!IS_ALIGNED(assoclen, sizeof(uint32_t)))
1390 areq_ctx->is_single_pass = false;
1392 if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
1393 !IS_ALIGNED(cipherlen, sizeof(uint32_t)))
1394 areq_ctx->is_single_pass = false;
1398 if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
1400 if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
1401 areq_ctx->is_single_pass = false;
1404 SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
1414 #if SSI_CC_HAS_AES_CCM
1415 static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
1417 unsigned int len = 0;
1418 if ( headerSize == 0 ) {
1421 if ( headerSize < ((1UL << 16) - (1UL << 8) )) {
1424 pA0Buff[0] = (headerSize >> 8) & 0xFF;
1425 pA0Buff[1] = headerSize & 0xFF;
1431 pA0Buff[2] = (headerSize >> 24) & 0xFF;
1432 pA0Buff[3] = (headerSize >> 16) & 0xFF;
1433 pA0Buff[4] = (headerSize >> 8) & 0xFF;
1434 pA0Buff[5] = headerSize & 0xFF;
1440 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1444 memset(block, 0, csize);
1449 else if (msglen > (1 << (8 * csize)))
1452 data = cpu_to_be32(msglen);
1453 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1458 static inline int ssi_aead_ccm(
1459 struct aead_request *req,
1461 unsigned int *seq_size)
1463 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1464 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1465 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1466 unsigned int idx = *seq_size;
1467 unsigned int cipher_flow_mode;
1468 dma_addr_t mac_result;
1471 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1472 cipher_flow_mode = AES_to_HASH_and_DOUT;
1473 mac_result = req_ctx->mac_buf_dma_addr;
1474 } else { /* Encrypt */
1475 cipher_flow_mode = AES_and_HASH;
1476 mac_result = req_ctx->icv_dma_addr;
1480 HW_DESC_INIT(&desc[idx]);
1481 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
1482 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1483 ((ctx->enc_keylen == 24) ?
1484 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
1486 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1487 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1488 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1492 /* load ctr state */
1493 HW_DESC_INIT(&desc[idx]);
1494 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
1495 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1496 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1497 req_ctx->gen_ctx.iv_dma_addr,
1498 AES_BLOCK_SIZE, NS_BIT);
1499 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1500 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1501 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1505 HW_DESC_INIT(&desc[idx]);
1506 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
1507 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1508 ((ctx->enc_keylen == 24) ?
1509 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
1511 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1512 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1513 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1514 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1515 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1518 /* load MAC state */
1519 HW_DESC_INIT(&desc[idx]);
1520 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
1521 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1522 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1523 req_ctx->mac_buf_dma_addr,
1524 AES_BLOCK_SIZE, NS_BIT);
1525 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1526 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1527 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1528 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1532 /* process assoc data */
1533 if (req->assoclen > 0) {
1534 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1536 HW_DESC_INIT(&desc[idx]);
1537 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1538 sg_dma_address(&req_ctx->ccm_adata_sg),
1539 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size,
1541 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1545 /* process the cipher */
1546 if (req_ctx->cryptlen != 0) {
1547 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
1550 /* Read temporal MAC */
1551 HW_DESC_INIT(&desc[idx]);
1552 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
1553 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
1554 ctx->authsize, NS_BIT, 0);
1555 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1556 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1557 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1558 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1561 /* load AES-CTR state (for last MAC calculation)*/
1562 HW_DESC_INIT(&desc[idx]);
1563 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
1564 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1565 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1566 req_ctx->ccm_iv0_dma_addr ,
1567 AES_BLOCK_SIZE, NS_BIT);
1568 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1569 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1570 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1573 HW_DESC_INIT(&desc[idx]);
1574 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1575 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1578 /* encrypt the "T" value and store MAC in mac_state */
1579 HW_DESC_INIT(&desc[idx]);
1580 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1581 req_ctx->mac_buf_dma_addr , ctx->authsize, NS_BIT);
1582 HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result , ctx->authsize, NS_BIT, 1);
1583 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1584 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1591 static int config_ccm_adata(struct aead_request *req) {
1592 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1595 //unsigned int size_of_a = 0, rem_a_size = 0;
1596 unsigned int lp = req->iv[0];
1597 /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
1598 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1599 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1600 uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1601 uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1602 uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1603 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1604 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1606 (req->cryptlen - ctx->authsize);
1608 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1609 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE*3);
1611 /* taken from crypto/ccm.c */
1612 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1613 if (2 > l || l > 8) {
1614 SSI_LOG_ERR("illegal iv value %X\n",req->iv[0]);
1617 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1619 /* format control info per RFC 3610 and
1620 * NIST Special Publication 800-38C
1622 *b0 |= (8 * ((m - 2) / 2));
1623 if (req->assoclen > 0)
1624 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1626 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1630 /* END of "taken from crypto/ccm.c" */
1632 /* l(a) - size of associated data. */
1633 req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
1635 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1638 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE) ;
1639 ctr_count_0[15] = 0;
1644 static void ssi_rfc4309_ccm_process(struct aead_request *req)
1646 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1647 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1648 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1651 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1652 areq_ctx->ctr_iv[0] = 3; /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
1654 /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
1655 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
1656 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
1657 req->iv = areq_ctx->ctr_iv;
1658 req->assoclen -= CCM_BLOCK_IV_SIZE;
1660 #endif /*SSI_CC_HAS_AES_CCM*/
1662 #if SSI_CC_HAS_AES_GCM
1664 static inline void ssi_aead_gcm_setup_ghash_desc(
1665 struct aead_request *req,
1667 unsigned int *seq_size)
1669 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1670 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1671 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1672 unsigned int idx = *seq_size;
1674 /* load key to AES*/
1675 HW_DESC_INIT(&desc[idx]);
1676 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
1677 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1678 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1679 ctx->enc_keylen, NS_BIT);
1680 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1681 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1682 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1685 /* process one zero block to generate hkey */
1686 HW_DESC_INIT(&desc[idx]);
1687 HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
1688 HW_DESC_SET_DOUT_DLLI(&desc[idx],
1689 req_ctx->hkey_dma_addr,
1692 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1695 /* Memory Barrier */
1696 HW_DESC_INIT(&desc[idx]);
1697 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1698 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1701 /* Load GHASH subkey */
1702 HW_DESC_INIT(&desc[idx]);
1703 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1704 req_ctx->hkey_dma_addr,
1705 AES_BLOCK_SIZE, NS_BIT);
1706 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1707 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1708 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1709 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1710 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1711 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1714 /* Configure Hash Engine to work with GHASH.
1715 Since it was not possible to extend HASH submodes to add GHASH,
1716 The following command is necessary in order to select GHASH (according to HW designers)*/
1717 HW_DESC_INIT(&desc[idx]);
1718 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1719 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1720 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1721 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1722 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1723 HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
1724 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1725 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1726 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1729 /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
1730 HW_DESC_INIT(&desc[idx]);
1731 HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
1732 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1733 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1734 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1735 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1736 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1737 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1743 static inline void ssi_aead_gcm_setup_gctr_desc(
1744 struct aead_request *req,
1746 unsigned int *seq_size)
1748 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1749 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1750 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1751 unsigned int idx = *seq_size;
1753 /* load key to AES*/
1754 HW_DESC_INIT(&desc[idx]);
1755 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1756 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1757 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1758 ctx->enc_keylen, NS_BIT);
1759 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1760 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1761 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1764 if ((req_ctx->cryptlen != 0) && (req_ctx->plaintext_authenticate_only==false)){
1765 /* load AES/CTR initial CTR value inc by 2*/
1766 HW_DESC_INIT(&desc[idx]);
1767 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1768 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1769 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1770 req_ctx->gcm_iv_inc2_dma_addr,
1771 AES_BLOCK_SIZE, NS_BIT);
1772 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1773 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1774 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1781 static inline void ssi_aead_process_gcm_result_desc(
1782 struct aead_request *req,
1784 unsigned int *seq_size)
1786 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1787 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1788 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1789 dma_addr_t mac_result;
1790 unsigned int idx = *seq_size;
1792 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1793 mac_result = req_ctx->mac_buf_dma_addr;
1794 } else { /* Encrypt */
1795 mac_result = req_ctx->icv_dma_addr;
1798 /* process(ghash) gcm_block_len */
1799 HW_DESC_INIT(&desc[idx]);
1800 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1801 req_ctx->gcm_block_len_dma_addr,
1802 AES_BLOCK_SIZE, NS_BIT);
1803 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1806 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1807 HW_DESC_INIT(&desc[idx]);
1808 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1809 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1810 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
1811 AES_BLOCK_SIZE, NS_BIT, 0);
1812 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1813 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1814 HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1818 /* load AES/CTR initial CTR value inc by 1*/
1819 HW_DESC_INIT(&desc[idx]);
1820 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1821 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1822 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1823 req_ctx->gcm_iv_inc1_dma_addr,
1824 AES_BLOCK_SIZE, NS_BIT);
1825 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1826 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1827 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1830 /* Memory Barrier */
1831 HW_DESC_INIT(&desc[idx]);
1832 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1833 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1836 /* process GCTR on stored GHASH and store MAC in mac_state*/
1837 HW_DESC_INIT(&desc[idx]);
1838 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1839 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1840 req_ctx->mac_buf_dma_addr,
1841 AES_BLOCK_SIZE, NS_BIT);
1842 HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1843 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1844 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1850 static inline int ssi_aead_gcm(
1851 struct aead_request *req,
1853 unsigned int *seq_size)
1855 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1856 unsigned int idx = *seq_size;
1857 unsigned int cipher_flow_mode;
1859 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1860 cipher_flow_mode = AES_and_HASH;
1861 } else { /* Encrypt */
1862 cipher_flow_mode = AES_to_HASH_and_DOUT;
1866 //in RFC4543 no data to encrypt. just copy data from src to dest.
1867 if (req_ctx->plaintext_authenticate_only==true){
1868 ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
1869 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1870 /* process(ghash) assoc data */
1871 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1872 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1873 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1878 // for gcm and rfc4106.
1879 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1880 /* process(ghash) assoc data */
1881 if (req->assoclen > 0)
1882 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1883 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1884 /* process(gctr+ghash) */
1885 if (req_ctx->cryptlen != 0)
1886 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
1887 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1894 static inline void ssi_aead_dump_gcm(
1896 struct aead_request *req)
1898 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1899 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1900 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1902 if (ctx->cipher_mode != DRV_CIPHER_GCTR)
1905 if (title != NULL) {
1906 SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
1907 SSI_LOG_DEBUG("%s\n", title);
1910 SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d \n", \
1911 ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen );
1913 if ( ctx->enckey != NULL ) {
1914 dump_byte_array("mac key",ctx->enckey, 16);
1917 dump_byte_array("req->iv",req->iv, AES_BLOCK_SIZE);
1919 dump_byte_array("gcm_iv_inc1",req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
1921 dump_byte_array("gcm_iv_inc2",req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
1923 dump_byte_array("hkey",req_ctx->hkey, AES_BLOCK_SIZE);
1925 dump_byte_array("mac_buf",req_ctx->mac_buf, AES_BLOCK_SIZE);
1927 dump_byte_array("gcm_len_block",req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
1929 if (req->src!=NULL && req->cryptlen) {
1930 dump_byte_array("req->src",sg_virt(req->src), req->cryptlen+req->assoclen);
1933 if (req->dst!=NULL) {
1934 dump_byte_array("req->dst",sg_virt(req->dst), req->cryptlen+ctx->authsize+req->assoclen);
1939 static int config_gcm_context(struct aead_request *req) {
1940 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1941 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1942 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1944 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1945 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1947 (req->cryptlen - ctx->authsize);
1948 __be32 counter = cpu_to_be32(2);
1950 SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d \n", cryptlen, req->assoclen, ctx->authsize);
1952 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1954 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1956 memcpy(req->iv + 12, &counter, 4);
1957 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1959 counter = cpu_to_be32(1);
1960 memcpy(req->iv + 12, &counter, 4);
1961 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1964 if (req_ctx->plaintext_authenticate_only == false)
1967 temp64 = cpu_to_be64(req->assoclen * 8);
1968 memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
1969 temp64 = cpu_to_be64(cryptlen * 8);
1970 memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
1972 else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
1974 temp64 = cpu_to_be64((req->assoclen+GCM_BLOCK_RFC4_IV_SIZE+cryptlen) * 8);
1975 memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
1977 memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
1983 static void ssi_rfc4_gcm_process(struct aead_request *req)
1985 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1986 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1987 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1989 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1990 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
1991 req->iv = areq_ctx->ctr_iv;
1992 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1996 #endif /*SSI_CC_HAS_AES_GCM*/
1998 static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
2002 HwDesc_s desc[MAX_AEAD_PROCESS_SEQ];
2003 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2004 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2005 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2006 struct device *dev = &ctx->drvdata->plat_dev->dev;
2007 struct ssi_crypto_req ssi_req = {};
2009 DECL_CYCLE_COUNT_RESOURCES;
2011 SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
2012 ((direct==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"), ctx, req, req->iv,
2013 sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
2014 CHECK_AND_RETURN_UPON_FIPS_ERROR();
2016 /* STAT_PHASE_0: Init and sanity checks */
2017 START_CYCLE_COUNT();
2019 /* Check data length according to mode */
2020 if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
2021 SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
2022 req->cryptlen, req->assoclen);
2023 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
2027 /* Setup DX request structure */
2028 ssi_req.user_cb = (void *)ssi_aead_complete;
2029 ssi_req.user_arg = (void *)req;
2031 #ifdef ENABLE_CYCLE_COUNT
2032 ssi_req.op_type = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
2033 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
2035 /* Setup request context */
2036 areq_ctx->gen_ctx.op_type = direct;
2037 areq_ctx->req_authsize = ctx->authsize;
2038 areq_ctx->cipher_mode = ctx->cipher_mode;
2040 END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
2042 /* STAT_PHASE_1: Map buffers */
2043 START_CYCLE_COUNT();
2045 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2046 /* Build CTR IV - Copy nonce from last 4 bytes in
2047 * CTR key to first 4 bytes in CTR IV */
2048 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
2049 if (areq_ctx->backup_giv == NULL) /*User none-generated IV*/
2050 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
2051 req->iv, CTR_RFC3686_IV_SIZE);
2052 /* Initialize counter portion of counter block */
2053 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
2054 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2056 /* Replace with counter iv */
2057 req->iv = areq_ctx->ctr_iv;
2058 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
2059 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
2060 (ctx->cipher_mode == DRV_CIPHER_GCTR) ) {
2061 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
2062 if (areq_ctx->ctr_iv != req->iv) {
2063 memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
2064 req->iv = areq_ctx->ctr_iv;
2067 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
2070 #if SSI_CC_HAS_AES_CCM
2071 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2072 rc = config_ccm_adata(req);
2073 if (unlikely(rc != 0)) {
2074 SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
2078 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2081 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2082 #endif /*SSI_CC_HAS_AES_CCM*/
2084 #if SSI_CC_HAS_AES_GCM
2085 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2086 rc = config_gcm_context(req);
2087 if (unlikely(rc != 0)) {
2088 SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
2092 #endif /*SSI_CC_HAS_AES_GCM*/
2094 rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
2095 if (unlikely(rc != 0)) {
2096 SSI_LOG_ERR("map_request() failed\n");
2100 /* do we need to generate IV? */
2101 if (areq_ctx->backup_giv != NULL) {
2103 /* set the DMA mapped IV address*/
2104 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2105 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
2106 ssi_req.ivgen_dma_addr_len = 1;
2107 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2108 /* In ccm, the IV needs to exist both inside B0 and inside the counter.
2109 It is also copied to iv_dma_addr for other reasons (like returning
2111 So, using 3 (identical) IV outputs. */
2112 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
2113 ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2114 ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2115 ssi_req.ivgen_dma_addr_len = 3;
2117 ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
2118 ssi_req.ivgen_dma_addr_len = 1;
2121 /* set the IV size (8/16 B long)*/
2122 ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
2125 END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
2127 /* STAT_PHASE_2: Create sequence */
2128 START_CYCLE_COUNT();
2130 /* Load MLLI tables to SRAM if necessary */
2131 ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
2133 /*TODO: move seq len by reference */
2134 switch (ctx->auth_mode) {
2136 case DRV_HASH_SHA256:
2137 ssi_aead_hmac_authenc(req, desc, &seq_len);
2139 case DRV_HASH_XCBC_MAC:
2140 ssi_aead_xcbc_authenc(req, desc, &seq_len);
2142 #if ( SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM )
2144 #if SSI_CC_HAS_AES_CCM
2145 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2146 ssi_aead_ccm(req, desc, &seq_len);
2148 #endif /*SSI_CC_HAS_AES_CCM*/
2149 #if SSI_CC_HAS_AES_GCM
2150 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2151 ssi_aead_gcm(req, desc, &seq_len);
2153 #endif /*SSI_CC_HAS_AES_GCM*/
2157 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
2158 ssi_buffer_mgr_unmap_aead_request(dev, req);
2163 END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
2165 /* STAT_PHASE_3: Lock HW and push sequence */
2166 START_CYCLE_COUNT();
2168 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
2170 if (unlikely(rc != -EINPROGRESS)) {
2171 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
2172 ssi_buffer_mgr_unmap_aead_request(dev, req);
2176 END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
2181 static int ssi_aead_encrypt(struct aead_request *req)
2183 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2186 /* No generated IV required */
2187 areq_ctx->backup_iv = req->iv;
2188 areq_ctx->backup_giv = NULL;
2189 areq_ctx->is_gcm4543 = false;
2191 areq_ctx->plaintext_authenticate_only = false;
2193 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2194 if (rc != -EINPROGRESS)
2195 req->iv = areq_ctx->backup_iv;
2200 #if SSI_CC_HAS_AES_CCM
2201 static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
2203 /* Very similar to ssi_aead_encrypt() above. */
2205 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2208 if (!valid_assoclen(req)) {
2209 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen );
2213 /* No generated IV required */
2214 areq_ctx->backup_iv = req->iv;
2215 areq_ctx->backup_giv = NULL;
2216 areq_ctx->is_gcm4543 = true;
2218 ssi_rfc4309_ccm_process(req);
2220 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2221 if (rc != -EINPROGRESS)
2222 req->iv = areq_ctx->backup_iv;
2226 #endif /* SSI_CC_HAS_AES_CCM */
2228 static int ssi_aead_decrypt(struct aead_request *req)
2230 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2233 /* No generated IV required */
2234 areq_ctx->backup_iv = req->iv;
2235 areq_ctx->backup_giv = NULL;
2236 areq_ctx->is_gcm4543 = false;
2238 areq_ctx->plaintext_authenticate_only = false;
2240 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2241 if (rc != -EINPROGRESS)
2242 req->iv = areq_ctx->backup_iv;
2248 #if SSI_CC_HAS_AES_CCM
2249 static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
2251 /* Very similar to ssi_aead_decrypt() above. */
2253 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2256 if (!valid_assoclen(req)) {
2257 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2261 /* No generated IV required */
2262 areq_ctx->backup_iv = req->iv;
2263 areq_ctx->backup_giv = NULL;
2265 areq_ctx->is_gcm4543 = true;
2266 ssi_rfc4309_ccm_process(req);
2268 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2269 if (rc != -EINPROGRESS)
2270 req->iv = areq_ctx->backup_iv;
2275 #endif /* SSI_CC_HAS_AES_CCM */
2277 #if SSI_CC_HAS_AES_GCM
2279 static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2281 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2284 SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p \n", keylen, key );
2290 memcpy(ctx->ctr_nonce, key + keylen, 4);
2292 rc = ssi_aead_setkey(tfm, key, keylen);
2297 static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2299 struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2302 SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p \n", keylen, key );
2308 memcpy(ctx->ctr_nonce, key + keylen, 4);
2310 rc = ssi_aead_setkey(tfm, key, keylen);
2315 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
2316 unsigned int authsize)
2331 return ssi_aead_setauthsize(authenc, authsize);
2334 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2335 unsigned int authsize)
2337 SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d \n", authsize );
2348 return ssi_aead_setauthsize(authenc, authsize);
2351 static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2352 unsigned int authsize)
2354 SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d \n", authsize );
2359 return ssi_aead_setauthsize(authenc, authsize);
2362 static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
2364 /* Very similar to ssi_aead_encrypt() above. */
2366 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2369 if (!valid_assoclen(req)) {
2370 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2374 /* No generated IV required */
2375 areq_ctx->backup_iv = req->iv;
2376 areq_ctx->backup_giv = NULL;
2378 areq_ctx->plaintext_authenticate_only = false;
2380 ssi_rfc4_gcm_process(req);
2381 areq_ctx->is_gcm4543 = true;
2383 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2384 if (rc != -EINPROGRESS)
2385 req->iv = areq_ctx->backup_iv;
2390 static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
2392 /* Very similar to ssi_aead_encrypt() above. */
2394 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2397 //plaintext is not encryped with rfc4543
2398 areq_ctx->plaintext_authenticate_only = true;
2400 /* No generated IV required */
2401 areq_ctx->backup_iv = req->iv;
2402 areq_ctx->backup_giv = NULL;
2404 ssi_rfc4_gcm_process(req);
2405 areq_ctx->is_gcm4543 = true;
2407 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2408 if (rc != -EINPROGRESS)
2409 req->iv = areq_ctx->backup_iv;
2414 static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
2416 /* Very similar to ssi_aead_decrypt() above. */
2418 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2421 if (!valid_assoclen(req)) {
2422 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2426 /* No generated IV required */
2427 areq_ctx->backup_iv = req->iv;
2428 areq_ctx->backup_giv = NULL;
2430 areq_ctx->plaintext_authenticate_only = false;
2432 ssi_rfc4_gcm_process(req);
2433 areq_ctx->is_gcm4543 = true;
2435 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2436 if (rc != -EINPROGRESS)
2437 req->iv = areq_ctx->backup_iv;
2442 static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
2444 /* Very similar to ssi_aead_decrypt() above. */
2446 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2449 //plaintext is not decryped with rfc4543
2450 areq_ctx->plaintext_authenticate_only = true;
2452 /* No generated IV required */
2453 areq_ctx->backup_iv = req->iv;
2454 areq_ctx->backup_giv = NULL;
2456 ssi_rfc4_gcm_process(req);
2457 areq_ctx->is_gcm4543 = true;
2459 rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2460 if (rc != -EINPROGRESS)
2461 req->iv = areq_ctx->backup_iv;
2465 #endif /* SSI_CC_HAS_AES_GCM */
2467 /* DX Block aead alg */
2468 static struct ssi_alg_template aead_algs[] = {
2470 .name = "authenc(hmac(sha1),cbc(aes))",
2471 .driver_name = "authenc-hmac-sha1-cbc-aes-dx",
2472 .blocksize = AES_BLOCK_SIZE,
2473 .type = CRYPTO_ALG_TYPE_AEAD,
2475 .setkey = ssi_aead_setkey,
2476 .setauthsize = ssi_aead_setauthsize,
2477 .encrypt = ssi_aead_encrypt,
2478 .decrypt = ssi_aead_decrypt,
2479 .init = ssi_aead_init,
2480 .exit = ssi_aead_exit,
2481 .ivsize = AES_BLOCK_SIZE,
2482 .maxauthsize = SHA1_DIGEST_SIZE,
2484 .cipher_mode = DRV_CIPHER_CBC,
2485 .flow_mode = S_DIN_to_AES,
2486 .auth_mode = DRV_HASH_SHA1,
2489 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2490 .driver_name = "authenc-hmac-sha1-cbc-des3-dx",
2491 .blocksize = DES3_EDE_BLOCK_SIZE,
2492 .type = CRYPTO_ALG_TYPE_AEAD,
2494 .setkey = ssi_aead_setkey,
2495 .setauthsize = ssi_aead_setauthsize,
2496 .encrypt = ssi_aead_encrypt,
2497 .decrypt = ssi_aead_decrypt,
2498 .init = ssi_aead_init,
2499 .exit = ssi_aead_exit,
2500 .ivsize = DES3_EDE_BLOCK_SIZE,
2501 .maxauthsize = SHA1_DIGEST_SIZE,
2503 .cipher_mode = DRV_CIPHER_CBC,
2504 .flow_mode = S_DIN_to_DES,
2505 .auth_mode = DRV_HASH_SHA1,
2508 .name = "authenc(hmac(sha256),cbc(aes))",
2509 .driver_name = "authenc-hmac-sha256-cbc-aes-dx",
2510 .blocksize = AES_BLOCK_SIZE,
2511 .type = CRYPTO_ALG_TYPE_AEAD,
2513 .setkey = ssi_aead_setkey,
2514 .setauthsize = ssi_aead_setauthsize,
2515 .encrypt = ssi_aead_encrypt,
2516 .decrypt = ssi_aead_decrypt,
2517 .init = ssi_aead_init,
2518 .exit = ssi_aead_exit,
2519 .ivsize = AES_BLOCK_SIZE,
2520 .maxauthsize = SHA256_DIGEST_SIZE,
2522 .cipher_mode = DRV_CIPHER_CBC,
2523 .flow_mode = S_DIN_to_AES,
2524 .auth_mode = DRV_HASH_SHA256,
2527 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2528 .driver_name = "authenc-hmac-sha256-cbc-des3-dx",
2529 .blocksize = DES3_EDE_BLOCK_SIZE,
2530 .type = CRYPTO_ALG_TYPE_AEAD,
2532 .setkey = ssi_aead_setkey,
2533 .setauthsize = ssi_aead_setauthsize,
2534 .encrypt = ssi_aead_encrypt,
2535 .decrypt = ssi_aead_decrypt,
2536 .init = ssi_aead_init,
2537 .exit = ssi_aead_exit,
2538 .ivsize = DES3_EDE_BLOCK_SIZE,
2539 .maxauthsize = SHA256_DIGEST_SIZE,
2541 .cipher_mode = DRV_CIPHER_CBC,
2542 .flow_mode = S_DIN_to_DES,
2543 .auth_mode = DRV_HASH_SHA256,
2546 .name = "authenc(xcbc(aes),cbc(aes))",
2547 .driver_name = "authenc-xcbc-aes-cbc-aes-dx",
2548 .blocksize = AES_BLOCK_SIZE,
2549 .type = CRYPTO_ALG_TYPE_AEAD,
2551 .setkey = ssi_aead_setkey,
2552 .setauthsize = ssi_aead_setauthsize,
2553 .encrypt = ssi_aead_encrypt,
2554 .decrypt = ssi_aead_decrypt,
2555 .init = ssi_aead_init,
2556 .exit = ssi_aead_exit,
2557 .ivsize = AES_BLOCK_SIZE,
2558 .maxauthsize = AES_BLOCK_SIZE,
2560 .cipher_mode = DRV_CIPHER_CBC,
2561 .flow_mode = S_DIN_to_AES,
2562 .auth_mode = DRV_HASH_XCBC_MAC,
2565 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2566 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
2568 .type = CRYPTO_ALG_TYPE_AEAD,
2570 .setkey = ssi_aead_setkey,
2571 .setauthsize = ssi_aead_setauthsize,
2572 .encrypt = ssi_aead_encrypt,
2573 .decrypt = ssi_aead_decrypt,
2574 .init = ssi_aead_init,
2575 .exit = ssi_aead_exit,
2576 .ivsize = CTR_RFC3686_IV_SIZE,
2577 .maxauthsize = SHA1_DIGEST_SIZE,
2579 .cipher_mode = DRV_CIPHER_CTR,
2580 .flow_mode = S_DIN_to_AES,
2581 .auth_mode = DRV_HASH_SHA1,
2584 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2585 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
2587 .type = CRYPTO_ALG_TYPE_AEAD,
2589 .setkey = ssi_aead_setkey,
2590 .setauthsize = ssi_aead_setauthsize,
2591 .encrypt = ssi_aead_encrypt,
2592 .decrypt = ssi_aead_decrypt,
2593 .init = ssi_aead_init,
2594 .exit = ssi_aead_exit,
2595 .ivsize = CTR_RFC3686_IV_SIZE,
2596 .maxauthsize = SHA256_DIGEST_SIZE,
2598 .cipher_mode = DRV_CIPHER_CTR,
2599 .flow_mode = S_DIN_to_AES,
2600 .auth_mode = DRV_HASH_SHA256,
2603 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2604 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
2606 .type = CRYPTO_ALG_TYPE_AEAD,
2608 .setkey = ssi_aead_setkey,
2609 .setauthsize = ssi_aead_setauthsize,
2610 .encrypt = ssi_aead_encrypt,
2611 .decrypt = ssi_aead_decrypt,
2612 .init = ssi_aead_init,
2613 .exit = ssi_aead_exit,
2614 .ivsize = CTR_RFC3686_IV_SIZE,
2615 .maxauthsize = AES_BLOCK_SIZE,
2617 .cipher_mode = DRV_CIPHER_CTR,
2618 .flow_mode = S_DIN_to_AES,
2619 .auth_mode = DRV_HASH_XCBC_MAC,
2621 #if SSI_CC_HAS_AES_CCM
2624 .driver_name = "ccm-aes-dx",
2626 .type = CRYPTO_ALG_TYPE_AEAD,
2628 .setkey = ssi_aead_setkey,
2629 .setauthsize = ssi_ccm_setauthsize,
2630 .encrypt = ssi_aead_encrypt,
2631 .decrypt = ssi_aead_decrypt,
2632 .init = ssi_aead_init,
2633 .exit = ssi_aead_exit,
2634 .ivsize = AES_BLOCK_SIZE,
2635 .maxauthsize = AES_BLOCK_SIZE,
2637 .cipher_mode = DRV_CIPHER_CCM,
2638 .flow_mode = S_DIN_to_AES,
2639 .auth_mode = DRV_HASH_NULL,
2642 .name = "rfc4309(ccm(aes))",
2643 .driver_name = "rfc4309-ccm-aes-dx",
2645 .type = CRYPTO_ALG_TYPE_AEAD,
2647 .setkey = ssi_rfc4309_ccm_setkey,
2648 .setauthsize = ssi_rfc4309_ccm_setauthsize,
2649 .encrypt = ssi_rfc4309_ccm_encrypt,
2650 .decrypt = ssi_rfc4309_ccm_decrypt,
2651 .init = ssi_aead_init,
2652 .exit = ssi_aead_exit,
2653 .ivsize = CCM_BLOCK_IV_SIZE,
2654 .maxauthsize = AES_BLOCK_SIZE,
2656 .cipher_mode = DRV_CIPHER_CCM,
2657 .flow_mode = S_DIN_to_AES,
2658 .auth_mode = DRV_HASH_NULL,
2660 #endif /*SSI_CC_HAS_AES_CCM*/
2661 #if SSI_CC_HAS_AES_GCM
2664 .driver_name = "gcm-aes-dx",
2666 .type = CRYPTO_ALG_TYPE_AEAD,
2668 .setkey = ssi_aead_setkey,
2669 .setauthsize = ssi_gcm_setauthsize,
2670 .encrypt = ssi_aead_encrypt,
2671 .decrypt = ssi_aead_decrypt,
2672 .init = ssi_aead_init,
2673 .exit = ssi_aead_exit,
2675 .maxauthsize = AES_BLOCK_SIZE,
2677 .cipher_mode = DRV_CIPHER_GCTR,
2678 .flow_mode = S_DIN_to_AES,
2679 .auth_mode = DRV_HASH_NULL,
2682 .name = "rfc4106(gcm(aes))",
2683 .driver_name = "rfc4106-gcm-aes-dx",
2685 .type = CRYPTO_ALG_TYPE_AEAD,
2687 .setkey = ssi_rfc4106_gcm_setkey,
2688 .setauthsize = ssi_rfc4106_gcm_setauthsize,
2689 .encrypt = ssi_rfc4106_gcm_encrypt,
2690 .decrypt = ssi_rfc4106_gcm_decrypt,
2691 .init = ssi_aead_init,
2692 .exit = ssi_aead_exit,
2693 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2694 .maxauthsize = AES_BLOCK_SIZE,
2696 .cipher_mode = DRV_CIPHER_GCTR,
2697 .flow_mode = S_DIN_to_AES,
2698 .auth_mode = DRV_HASH_NULL,
2701 .name = "rfc4543(gcm(aes))",
2702 .driver_name = "rfc4543-gcm-aes-dx",
2704 .type = CRYPTO_ALG_TYPE_AEAD,
2706 .setkey = ssi_rfc4543_gcm_setkey,
2707 .setauthsize = ssi_rfc4543_gcm_setauthsize,
2708 .encrypt = ssi_rfc4543_gcm_encrypt,
2709 .decrypt = ssi_rfc4543_gcm_decrypt,
2710 .init = ssi_aead_init,
2711 .exit = ssi_aead_exit,
2712 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2713 .maxauthsize = AES_BLOCK_SIZE,
2715 .cipher_mode = DRV_CIPHER_GCTR,
2716 .flow_mode = S_DIN_to_AES,
2717 .auth_mode = DRV_HASH_NULL,
2719 #endif /*SSI_CC_HAS_AES_GCM*/
2722 static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
2724 struct ssi_crypto_alg *t_alg;
2725 struct aead_alg *alg;
2727 t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
2729 SSI_LOG_ERR("failed to allocate t_alg\n");
2730 return ERR_PTR(-ENOMEM);
2732 alg = &template->template_aead;
2734 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2735 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2736 template->driver_name);
2737 alg->base.cra_module = THIS_MODULE;
2738 alg->base.cra_priority = SSI_CRA_PRIO;
2740 alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
2741 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2743 alg->init = ssi_aead_init;
2744 alg->exit = ssi_aead_exit;
2746 t_alg->aead_alg = *alg;
2748 t_alg->cipher_mode = template->cipher_mode;
2749 t_alg->flow_mode = template->flow_mode;
2750 t_alg->auth_mode = template->auth_mode;
2755 int ssi_aead_free(struct ssi_drvdata *drvdata)
2757 struct ssi_crypto_alg *t_alg, *n;
2758 struct ssi_aead_handle *aead_handle =
2759 (struct ssi_aead_handle *)drvdata->aead_handle;
2761 if (aead_handle != NULL) {
2762 /* Remove registered algs */
2763 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2764 crypto_unregister_aead(&t_alg->aead_alg);
2765 list_del(&t_alg->entry);
2769 drvdata->aead_handle = NULL;
2775 int ssi_aead_alloc(struct ssi_drvdata *drvdata)
2777 struct ssi_aead_handle *aead_handle;
2778 struct ssi_crypto_alg *t_alg;
2782 aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
2783 if (aead_handle == NULL) {
2788 drvdata->aead_handle = aead_handle;
2790 aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
2791 drvdata, MAX_HMAC_DIGEST_SIZE);
2792 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2793 SSI_LOG_ERR("SRAM pool exhausted\n");
2798 INIT_LIST_HEAD(&aead_handle->aead_list);
2801 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2802 t_alg = ssi_aead_create_alg(&aead_algs[alg]);
2803 if (IS_ERR(t_alg)) {
2804 rc = PTR_ERR(t_alg);
2805 SSI_LOG_ERR("%s alg allocation failed\n",
2806 aead_algs[alg].driver_name);
2809 t_alg->drvdata = drvdata;
2810 rc = crypto_register_aead(&t_alg->aead_alg);
2811 if (unlikely(rc != 0)) {
2812 SSI_LOG_ERR("%s alg registration failed\n",
2813 t_alg->aead_alg.base.cra_driver_name);
2816 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2817 SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
2826 ssi_aead_free(drvdata);