]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/ccree/ssi_aead.c
Merge tag 'drm-misc-next-fixes-2017-05-05' of git://anongit.freedesktop.org/git/drm...
[karo-tx-linux.git] / drivers / staging / ccree / ssi_aead.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  * 
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  * 
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  * 
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/internal/skcipher.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/sha.h>
25 #include <crypto/ctr.h>
26 #include <crypto/authenc.h>
27 #include <crypto/aes.h>
28 #include <crypto/des.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/version.h>
31 #include "ssi_config.h"
32 #include "ssi_driver.h"
33 #include "ssi_buffer_mgr.h"
34 #include "ssi_aead.h"
35 #include "ssi_request_mgr.h"
36 #include "ssi_hash.h"
37 #include "ssi_sysfs.h"
38 #include "ssi_sram_mgr.h"
39 #include "ssi_fips_local.h"
40
41 #define template_aead   template_u.aead
42
43 #define MAX_AEAD_SETKEY_SEQ 12
44 #define MAX_AEAD_PROCESS_SEQ 23
45
46 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
47 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
48
49 #define AES_CCM_RFC4309_NONCE_SIZE 3
50 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
51
52
53 /* Value of each ICV_CMP byte (of 8) in case of success */
54 #define ICV_VERIF_OK 0x01       
55
56 struct ssi_aead_handle {
57         ssi_sram_addr_t sram_workspace_addr;
58         struct list_head aead_list;
59 };
60
61 struct ssi_aead_ctx {
62         struct ssi_drvdata *drvdata;
63         uint8_t ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
64         uint8_t *enckey;
65         dma_addr_t enckey_dma_addr;
66         union {
67                 struct {
68                         uint8_t *padded_authkey;
69                         uint8_t *ipad_opad; /* IPAD, OPAD*/
70                         dma_addr_t padded_authkey_dma_addr;
71                         dma_addr_t ipad_opad_dma_addr;
72                 } hmac;
73                 struct {
74                         uint8_t *xcbc_keys; /* K1,K2,K3 */
75                         dma_addr_t xcbc_keys_dma_addr;
76                 } xcbc;
77         } auth_state;
78         unsigned int enc_keylen;
79         unsigned int auth_keylen;
80         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
81         enum drv_cipher_mode cipher_mode;
82         enum FlowMode flow_mode;
83         enum drv_hash_mode auth_mode;
84 };
85
86 static inline bool valid_assoclen(struct aead_request *req)
87 {
88         return ((req->assoclen == 16) || (req->assoclen == 20));
89 }
90
91 static void ssi_aead_exit(struct crypto_aead *tfm)
92 {
93         struct device *dev = NULL;
94         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
95
96         SSI_LOG_DEBUG("Clearing context @%p for %s\n",
97                 crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
98
99         dev = &ctx->drvdata->plat_dev->dev;
100         /* Unmap enckey buffer */
101         if (ctx->enckey != NULL) {
102                 SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr);
103                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
104                 SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
105                         (unsigned long long)ctx->enckey_dma_addr);
106                 ctx->enckey_dma_addr = 0;
107                 ctx->enckey = NULL;
108         }
109         
110         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
111                 if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
112                         SSI_RESTORE_DMA_ADDR_TO_48BIT(
113                                 ctx->auth_state.xcbc.xcbc_keys_dma_addr);
114                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
115                                 ctx->auth_state.xcbc.xcbc_keys, 
116                                 ctx->auth_state.xcbc.xcbc_keys_dma_addr);
117                 }
118                 SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
119                         (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
120                 ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
121                 ctx->auth_state.xcbc.xcbc_keys = NULL;
122         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
123                 if (ctx->auth_state.hmac.ipad_opad != NULL) {
124                         SSI_RESTORE_DMA_ADDR_TO_48BIT(
125                                 ctx->auth_state.hmac.ipad_opad_dma_addr);
126                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
127                                 ctx->auth_state.hmac.ipad_opad,
128                                 ctx->auth_state.hmac.ipad_opad_dma_addr);
129                         SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
130                                 (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
131                         ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
132                         ctx->auth_state.hmac.ipad_opad = NULL;
133                 }
134                 if (ctx->auth_state.hmac.padded_authkey != NULL) {
135                         SSI_RESTORE_DMA_ADDR_TO_48BIT(
136                                 ctx->auth_state.hmac.padded_authkey_dma_addr);
137                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
138                                 ctx->auth_state.hmac.padded_authkey,
139                                 ctx->auth_state.hmac.padded_authkey_dma_addr);
140                         SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
141                                 (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
142                         ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
143                         ctx->auth_state.hmac.padded_authkey = NULL;
144                 }
145         }
146 }
147
148 static int ssi_aead_init(struct crypto_aead *tfm)
149 {
150         struct device *dev;
151         struct aead_alg *alg = crypto_aead_alg(tfm);
152         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
153         struct ssi_crypto_alg *ssi_alg =
154                         container_of(alg, struct ssi_crypto_alg, aead_alg);
155         SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
156
157         CHECK_AND_RETURN_UPON_FIPS_ERROR();
158
159         /* Initialize modes in instance */
160         ctx->cipher_mode = ssi_alg->cipher_mode;
161         ctx->flow_mode = ssi_alg->flow_mode;
162         ctx->auth_mode = ssi_alg->auth_mode;
163         ctx->drvdata = ssi_alg->drvdata;
164         dev = &ctx->drvdata->plat_dev->dev;
165         crypto_aead_set_reqsize(tfm,sizeof(struct aead_req_ctx));
166
167         /* Allocate key buffer, cache line aligned */
168         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
169                 &ctx->enckey_dma_addr, GFP_KERNEL);
170         if (ctx->enckey == NULL) {
171                 SSI_LOG_ERR("Failed allocating key buffer\n");
172                 goto init_failed;
173         }
174         SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr, AES_MAX_KEY_SIZE);
175         SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
176
177         /* Set default authlen value */
178
179         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
180                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
181                 /* (and temporary for user key - up to 256b) */
182                 ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
183                         CC_AES_128_BIT_KEY_SIZE * 3,
184                         &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
185                 if (ctx->auth_state.xcbc.xcbc_keys == NULL) {
186                         SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
187                         goto init_failed;
188                 }
189                 SSI_UPDATE_DMA_ADDR_TO_48BIT(
190                         ctx->auth_state.xcbc.xcbc_keys_dma_addr,
191                         CC_AES_128_BIT_KEY_SIZE * 3);
192         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
193                 /* Allocate dma-coherent buffer for IPAD + OPAD */
194                 ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
195                         2 * MAX_HMAC_DIGEST_SIZE,
196                         &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
197                 if (ctx->auth_state.hmac.ipad_opad == NULL) {
198                         SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
199                         goto init_failed;
200                 }
201                 SSI_UPDATE_DMA_ADDR_TO_48BIT(
202                         ctx->auth_state.hmac.ipad_opad_dma_addr,
203                         2 * MAX_HMAC_DIGEST_SIZE);
204                 SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
205                         ctx->auth_state.hmac.ipad_opad);
206         
207                 ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
208                         MAX_HMAC_BLOCK_SIZE,
209                         &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
210                 if (ctx->auth_state.hmac.padded_authkey == NULL) {
211                         SSI_LOG_ERR("failed to allocate padded_authkey\n");
212                         goto init_failed;
213                 }       
214                 SSI_UPDATE_DMA_ADDR_TO_48BIT(
215                         ctx->auth_state.hmac.padded_authkey_dma_addr,
216                         MAX_HMAC_BLOCK_SIZE);
217         } else {
218                 ctx->auth_state.hmac.ipad_opad = NULL;
219                 ctx->auth_state.hmac.padded_authkey = NULL;
220         }
221
222         return 0;
223
224 init_failed:
225         ssi_aead_exit(tfm);
226         return -ENOMEM;
227 }
228  
229
230 static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
231 {
232         struct aead_request *areq = (struct aead_request *)ssi_req;
233         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
234         struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
235         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
236         int err = 0;
237         DECL_CYCLE_COUNT_RESOURCES;
238
239         START_CYCLE_COUNT();
240
241         ssi_buffer_mgr_unmap_aead_request(dev, areq);
242
243         /* Restore ordinary iv pointer */
244         areq->iv = areq_ctx->backup_iv;
245
246         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
247                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
248                         ctx->authsize) != 0) {
249                         SSI_LOG_DEBUG("Payload authentication failure, "
250                                 "(auth-size=%d, cipher=%d).\n",
251                                 ctx->authsize, ctx->cipher_mode);
252                         /* In case of payload authentication failure, MUST NOT
253                            revealed the decrypted message --> zero its memory. */
254                         ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
255                         err = -EBADMSG;
256                 }
257         } else { /*ENCRYPT*/
258                 if (unlikely(areq_ctx->is_icv_fragmented == true))
259                         ssi_buffer_mgr_copy_scatterlist_portion(
260                                 areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen+areq_ctx->dstOffset,
261                                 areq->cryptlen+areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
262
263                 /* If an IV was generated, copy it back to the user provided buffer. */
264                 if (areq_ctx->backup_giv != NULL) {
265                         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
266                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
267                         } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
268                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
269                         }
270                 }
271         }
272
273         END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
274         aead_request_complete(areq, err);
275 }
276
277 static int xcbc_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
278 {
279         /* Load the AES key */
280         HW_DESC_INIT(&desc[0]);
281         /* We are using for the source/user key the same buffer as for the output keys,
282            because after this key loading it is not needed anymore */
283         HW_DESC_SET_DIN_TYPE(&desc[0], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, NS_BIT);
284         HW_DESC_SET_CIPHER_MODE(&desc[0], DRV_CIPHER_ECB);
285         HW_DESC_SET_CIPHER_CONFIG0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
286         HW_DESC_SET_KEY_SIZE_AES(&desc[0], ctx->auth_keylen);
287         HW_DESC_SET_FLOW_MODE(&desc[0], S_DIN_to_AES);
288         HW_DESC_SET_SETUP_MODE(&desc[0], SETUP_LOAD_KEY0);
289
290         HW_DESC_INIT(&desc[1]);
291         HW_DESC_SET_DIN_CONST(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
292         HW_DESC_SET_FLOW_MODE(&desc[1], DIN_AES_DOUT);
293         HW_DESC_SET_DOUT_DLLI(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT, 0);
294
295         HW_DESC_INIT(&desc[2]);
296         HW_DESC_SET_DIN_CONST(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
297         HW_DESC_SET_FLOW_MODE(&desc[2], DIN_AES_DOUT);
298         HW_DESC_SET_DOUT_DLLI(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
299                                          + AES_KEYSIZE_128),
300                               AES_KEYSIZE_128, NS_BIT, 0);
301
302         HW_DESC_INIT(&desc[3]);
303         HW_DESC_SET_DIN_CONST(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
304         HW_DESC_SET_FLOW_MODE(&desc[3], DIN_AES_DOUT);
305         HW_DESC_SET_DOUT_DLLI(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
306                                           + 2 * AES_KEYSIZE_128),
307                               AES_KEYSIZE_128, NS_BIT, 0);
308
309         return 4;
310 }
311
312 static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
313 {
314         unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
315         unsigned int digest_ofs = 0;
316         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 
317                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
318         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
319                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
320
321         int idx = 0;
322         int i;
323
324         /* calc derived HMAC key */
325         for (i = 0; i < 2; i++) {
326                 /* Load hash initial state */
327                 HW_DESC_INIT(&desc[idx]);
328                 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
329                 HW_DESC_SET_DIN_SRAM(&desc[idx],
330                         ssi_ahash_get_larval_digest_sram_addr(
331                                 ctx->drvdata, ctx->auth_mode),
332                         digest_size);
333                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
334                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
335                 idx++;
336
337                 /* Load the hash current length*/
338                 HW_DESC_INIT(&desc[idx]);
339                 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
340                 HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
341                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
342                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
343                 idx++;
344
345                 /* Prepare ipad key */
346                 HW_DESC_INIT(&desc[idx]);
347                 HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
348                 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
349                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
350                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
351                 idx++;
352
353                 /* Perform HASH update */
354                 HW_DESC_INIT(&desc[idx]);
355                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
356                                    ctx->auth_state.hmac.padded_authkey_dma_addr,
357                                      SHA256_BLOCK_SIZE, NS_BIT);
358                 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
359                 HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
360                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
361                 idx++;
362
363                 /* Get the digset */
364                 HW_DESC_INIT(&desc[idx]);
365                 HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
366                 HW_DESC_SET_DOUT_DLLI(&desc[idx], 
367                                       (ctx->auth_state.hmac.ipad_opad_dma_addr +
368                                        digest_ofs),
369                                       digest_size, NS_BIT, 0);
370                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
371                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
372                 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
373                 idx++;
374
375                 digest_ofs += digest_size;
376         }
377
378         return idx;
379 }
380
381 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
382 {
383         SSI_LOG_DEBUG("enc_keylen=%u  authkeylen=%u\n",
384                 ctx->enc_keylen, ctx->auth_keylen);
385
386         switch (ctx->auth_mode) {
387         case DRV_HASH_SHA1:
388         case DRV_HASH_SHA256:
389                 break;
390         case DRV_HASH_XCBC_MAC:
391                 if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
392                     (ctx->auth_keylen != AES_KEYSIZE_192) &&
393                     (ctx->auth_keylen != AES_KEYSIZE_256))
394                         return -ENOTSUPP;
395                 break;
396         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
397                 if (ctx->auth_keylen > 0)
398                         return -EINVAL;
399                 break;
400         default:
401                 SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
402                 return -EINVAL;
403         }
404         /* Check cipher key size */
405         if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
406                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
407                         SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
408                                 ctx->enc_keylen);
409                         return -EINVAL;
410                 }
411         } else { /* Default assumed to be AES ciphers */
412                 if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
413                     (ctx->enc_keylen != AES_KEYSIZE_192) &&
414                     (ctx->enc_keylen != AES_KEYSIZE_256)) {
415                         SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
416                                 ctx->enc_keylen);
417                         return -EINVAL;
418                 }
419         }
420
421         return 0; /* All tests of keys sizes passed */
422 }
423 /*This function prepers the user key so it can pass to the hmac processing 
424   (copy to intenral buffer or hash in case of key longer than block */
425 static int
426 ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
427 {
428         dma_addr_t key_dma_addr = 0;
429         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
430         struct device *dev = &ctx->drvdata->plat_dev->dev;
431         uint32_t larval_addr = ssi_ahash_get_larval_digest_sram_addr(
432                                         ctx->drvdata, ctx->auth_mode);
433         struct ssi_crypto_req ssi_req = {};
434         unsigned int blocksize;
435         unsigned int digestsize;
436         unsigned int hashmode;
437         unsigned int idx = 0;
438         int rc = 0;
439         HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
440         dma_addr_t padded_authkey_dma_addr = 
441                 ctx->auth_state.hmac.padded_authkey_dma_addr;
442
443         switch (ctx->auth_mode) { /* auth_key required and >0 */
444         case DRV_HASH_SHA1:
445                 blocksize = SHA1_BLOCK_SIZE;
446                 digestsize = SHA1_DIGEST_SIZE;
447                 hashmode = DRV_HASH_HW_SHA1;
448                 break;
449         case DRV_HASH_SHA256:
450         default:
451                 blocksize = SHA256_BLOCK_SIZE;
452                 digestsize = SHA256_DIGEST_SIZE;
453                 hashmode = DRV_HASH_HW_SHA256;
454         }
455
456         if (likely(keylen != 0)) {
457                 key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
458                 if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
459                         SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
460                                    " DMA failed\n", key, keylen);
461                         return -ENOMEM;
462                 }
463                 SSI_UPDATE_DMA_ADDR_TO_48BIT(key_dma_addr, keylen);
464                 if (keylen > blocksize) {
465                         /* Load hash initial state */
466                         HW_DESC_INIT(&desc[idx]);
467                         HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
468                         HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, digestsize);
469                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
470                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
471                         idx++;
472         
473                         /* Load the hash current length*/
474                         HW_DESC_INIT(&desc[idx]);
475                         HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
476                         HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
477                         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
478                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
479                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
480                         idx++;
481         
482                         HW_DESC_INIT(&desc[idx]);
483                         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
484                                              key_dma_addr, 
485                                              keylen, NS_BIT);
486                         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
487                         idx++;
488         
489                         /* Get hashed key */
490                         HW_DESC_INIT(&desc[idx]);
491                         HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode); 
492                         HW_DESC_SET_DOUT_DLLI(&desc[idx],
493                                          padded_authkey_dma_addr,
494                                          digestsize,
495                                          NS_BIT, 0);
496                         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
497                         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
498                         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx],
499                                                         HASH_PADDING_DISABLED);
500                         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
501                                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
502                         idx++;
503         
504                         HW_DESC_INIT(&desc[idx]);
505                         HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
506                         HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
507                         HW_DESC_SET_DOUT_DLLI(&desc[idx], 
508                                               (padded_authkey_dma_addr + digestsize),
509                                               (blocksize - digestsize),
510                                               NS_BIT, 0);
511                         idx++;
512                 } else {
513                         HW_DESC_INIT(&desc[idx]);
514                         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
515                                              key_dma_addr, 
516                                              keylen, NS_BIT);
517                         HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
518                         HW_DESC_SET_DOUT_DLLI(&desc[idx], 
519                                               (padded_authkey_dma_addr),
520                                               keylen, NS_BIT, 0);
521                         idx++;
522         
523                         if ((blocksize - keylen) != 0) {
524                                 HW_DESC_INIT(&desc[idx]);
525                                 HW_DESC_SET_DIN_CONST(&desc[idx], 0,
526                                                       (blocksize - keylen));
527                                 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
528                                 HW_DESC_SET_DOUT_DLLI(&desc[idx], 
529                                         (padded_authkey_dma_addr + keylen),
530                                         (blocksize - keylen),
531                                         NS_BIT, 0);
532                                 idx++;
533                         }
534                 }
535         } else {
536                 HW_DESC_INIT(&desc[idx]);
537                 HW_DESC_SET_DIN_CONST(&desc[idx], 0,
538                                       (blocksize - keylen));
539                 HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
540                 HW_DESC_SET_DOUT_DLLI(&desc[idx], 
541                         padded_authkey_dma_addr,
542                         blocksize,
543                         NS_BIT, 0);
544                 idx++;
545         }
546
547 #ifdef ENABLE_CYCLE_COUNT
548         ssi_req.op_type = STAT_OP_TYPE_SETKEY;
549 #endif
550
551         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
552         if (unlikely(rc != 0))
553                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
554
555         if (likely(key_dma_addr != 0)) {
556                 SSI_RESTORE_DMA_ADDR_TO_48BIT(key_dma_addr);
557                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
558         }
559
560         return rc;
561 }
562
563
564 static int
565 ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
566 {
567         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
568         struct rtattr *rta = (struct rtattr *)key;
569         struct ssi_crypto_req ssi_req = {};
570         struct crypto_authenc_key_param *param;
571         HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
572         int seq_len = 0, rc = -EINVAL;
573         DECL_CYCLE_COUNT_RESOURCES;
574
575         SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
576                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
577
578         CHECK_AND_RETURN_UPON_FIPS_ERROR();
579         /* STAT_PHASE_0: Init and sanity checks */
580         START_CYCLE_COUNT();
581
582         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
583                 if (!RTA_OK(rta, keylen))
584                         goto badkey;
585                 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
586                         goto badkey;
587                 if (RTA_PAYLOAD(rta) < sizeof(*param))
588                         goto badkey;
589                 param = RTA_DATA(rta);
590                 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
591                 key += RTA_ALIGN(rta->rta_len);
592                 keylen -= RTA_ALIGN(rta->rta_len);
593                 if (keylen < ctx->enc_keylen)
594                         goto badkey;
595                 ctx->auth_keylen = keylen - ctx->enc_keylen;
596
597                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
598                         /* the nonce is stored in bytes at end of key */
599                         if (ctx->enc_keylen <
600                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
601                                 goto badkey;
602                         /* Copy nonce from last 4 bytes in CTR key to
603                         *  first 4 bytes in CTR IV */
604                         memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
605                                 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
606                         /* Set CTR key size */
607                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
608                 }
609         } else { /* non-authenc - has just one key */
610                 ctx->enc_keylen = keylen;
611                 ctx->auth_keylen = 0;
612         }
613
614         rc = validate_keys_sizes(ctx);
615         if (unlikely(rc != 0))
616                 goto badkey;
617
618         END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
619         /* STAT_PHASE_1: Copy key to ctx */
620         START_CYCLE_COUNT();
621
622         /* Get key material */
623         memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
624         if (ctx->enc_keylen == 24)
625                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
626         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
627                 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
628         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
629                 rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
630                 if (rc != 0)
631                         goto badkey;
632         }
633
634         END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
635         
636         /* STAT_PHASE_2: Create sequence */
637         START_CYCLE_COUNT();
638
639         switch (ctx->auth_mode) {
640         case DRV_HASH_SHA1:
641         case DRV_HASH_SHA256:
642                 seq_len = hmac_setkey(desc, ctx);
643                 break;
644         case DRV_HASH_XCBC_MAC:
645                 seq_len = xcbc_setkey(desc, ctx);
646                 break;
647         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
648                 break; /* No auth. key setup */
649         default:
650                 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
651                 rc = -ENOTSUPP;
652                 goto badkey;
653         }
654
655         END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_2);
656
657         /* STAT_PHASE_3: Submit sequence to HW */
658         START_CYCLE_COUNT();
659         
660         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
661 #ifdef ENABLE_CYCLE_COUNT
662                 ssi_req.op_type = STAT_OP_TYPE_SETKEY;
663 #endif
664                 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
665                 if (unlikely(rc != 0)) {
666                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
667                         goto setkey_error;
668                 }
669         }
670
671         /* Update STAT_PHASE_3 */
672         END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_3);
673         return rc;
674
675 badkey:
676         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
677
678 setkey_error:
679         return rc;
680 }
681
682 #if SSI_CC_HAS_AES_CCM
683 static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
684 {
685         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
686         int rc = 0;
687         
688         if (keylen < 3)
689                 return -EINVAL;
690
691         keylen -= 3;
692         memcpy(ctx->ctr_nonce, key + keylen, 3);
693
694         rc = ssi_aead_setkey(tfm, key, keylen);
695
696         return rc;
697 }
698 #endif /*SSI_CC_HAS_AES_CCM*/
699
700 static int ssi_aead_setauthsize(
701         struct crypto_aead *authenc,
702         unsigned int authsize)
703 {
704         struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
705         
706         CHECK_AND_RETURN_UPON_FIPS_ERROR();
707         /* Unsupported auth. sizes */
708         if ((authsize == 0) ||
709             (authsize >crypto_aead_maxauthsize(authenc))) {
710                 return -ENOTSUPP;
711         }
712
713         ctx->authsize = authsize;
714         SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
715
716         return 0;
717 }
718
719 #if SSI_CC_HAS_AES_CCM
720 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
721                                       unsigned int authsize)
722 {
723         switch (authsize) {
724         case 8:
725         case 12:
726         case 16:
727                 break;
728         default:
729                 return -EINVAL;
730         }
731
732         return ssi_aead_setauthsize(authenc, authsize);
733 }
734
735 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
736                                       unsigned int authsize)
737 {
738         switch (authsize) {
739         case 4:
740         case 6:
741         case 8:
742         case 10:
743         case 12:
744         case 14:
745         case 16:
746                 break;
747         default:
748                 return -EINVAL;
749         }
750
751         return ssi_aead_setauthsize(authenc, authsize);
752 }
753 #endif /*SSI_CC_HAS_AES_CCM*/
754
755 static inline void 
756 ssi_aead_create_assoc_desc(
757         struct aead_request *areq, 
758         unsigned int flow_mode,
759         HwDesc_s desc[], 
760         unsigned int *seq_size)
761 {
762         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
763         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
764         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
765         enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
766         unsigned int idx = *seq_size;
767
768         switch (assoc_dma_type) {
769         case SSI_DMA_BUF_DLLI:
770                 SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
771                 HW_DESC_INIT(&desc[idx]);
772                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
773                         sg_dma_address(areq->src),
774                         areq->assoclen, NS_BIT);
775                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
776                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
777                         HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
778                 break;
779         case SSI_DMA_BUF_MLLI:
780                 SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
781                 HW_DESC_INIT(&desc[idx]);
782                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
783                                      areq_ctx->assoc.sram_addr,
784                                      areq_ctx->assoc.mlli_nents,
785                                      NS_BIT);
786                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
787                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
788                         HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
789                 break;
790         case SSI_DMA_BUF_NULL:
791         default:
792                 SSI_LOG_ERR("Invalid ASSOC buffer type\n");
793         }
794
795         *seq_size = (++idx);
796 }
797
798 static inline void
799 ssi_aead_process_authenc_data_desc(
800         struct aead_request *areq, 
801         unsigned int flow_mode,
802         HwDesc_s desc[], 
803         unsigned int *seq_size,
804         int direct)
805 {
806         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
807         enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
808         unsigned int idx = *seq_size;
809
810         switch (data_dma_type) {
811         case SSI_DMA_BUF_DLLI:
812         {
813                 struct scatterlist *cipher =
814                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
815                         areq_ctx->dstSgl : areq_ctx->srcSgl;
816
817                 unsigned int offset = 
818                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
819                         areq_ctx->dstOffset : areq_ctx->srcOffset;
820                 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
821                 HW_DESC_INIT(&desc[idx]);
822                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
823                         (sg_dma_address(cipher)+ offset), areq_ctx->cryptlen,
824                         NS_BIT);
825                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
826                 break;
827         }
828         case SSI_DMA_BUF_MLLI:
829         {
830                 /* DOUBLE-PASS flow (as default)
831                  * assoc. + iv + data -compact in one table
832                  * if assoclen is ZERO only IV perform */
833                 ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
834                 uint32_t mlli_nents = areq_ctx->assoc.mlli_nents;
835
836                 if (likely(areq_ctx->is_single_pass == true)) {
837                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT){
838                                 mlli_addr = areq_ctx->dst.sram_addr;
839                                 mlli_nents = areq_ctx->dst.mlli_nents;
840                         } else {
841                                 mlli_addr = areq_ctx->src.sram_addr;
842                                 mlli_nents = areq_ctx->src.mlli_nents;
843                         }
844                 }
845
846                 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
847                 HW_DESC_INIT(&desc[idx]);
848                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
849                         mlli_addr, mlli_nents, NS_BIT);
850                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
851                 break;
852         }
853         case SSI_DMA_BUF_NULL:
854         default:
855                 SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
856         }
857
858         *seq_size = (++idx);
859 }
860
861 static inline void
862 ssi_aead_process_cipher_data_desc(
863         struct aead_request *areq, 
864         unsigned int flow_mode,
865         HwDesc_s desc[], 
866         unsigned int *seq_size)
867 {
868         unsigned int idx = *seq_size;
869         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
870         enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
871
872         if (areq_ctx->cryptlen == 0)
873                 return; /*null processing*/
874
875         switch (data_dma_type) {
876         case SSI_DMA_BUF_DLLI:
877                 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
878                 HW_DESC_INIT(&desc[idx]);
879                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
880                         (sg_dma_address(areq_ctx->srcSgl)+areq_ctx->srcOffset),
881                         areq_ctx->cryptlen, NS_BIT);
882                 HW_DESC_SET_DOUT_DLLI(&desc[idx],
883                         (sg_dma_address(areq_ctx->dstSgl)+areq_ctx->dstOffset),
884                         areq_ctx->cryptlen, NS_BIT, 0);
885                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
886                 break;
887         case SSI_DMA_BUF_MLLI:
888                 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
889                 HW_DESC_INIT(&desc[idx]);
890                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
891                         areq_ctx->src.sram_addr,
892                         areq_ctx->src.mlli_nents, NS_BIT);
893                 HW_DESC_SET_DOUT_MLLI(&desc[idx],
894                         areq_ctx->dst.sram_addr,
895                         areq_ctx->dst.mlli_nents, NS_BIT, 0);
896                 HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
897                 break;
898         case SSI_DMA_BUF_NULL:
899         default:
900                 SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
901         }
902
903         *seq_size = (++idx);
904 }
905
906 static inline void ssi_aead_process_digest_result_desc(
907         struct aead_request *req,
908         HwDesc_s desc[],
909         unsigned int *seq_size)
910 {
911         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
912         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
913         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
914         unsigned int idx = *seq_size;
915         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
916                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
917         int direct = req_ctx->gen_ctx.op_type;
918
919         /* Get final ICV result */
920         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
921                 HW_DESC_INIT(&desc[idx]);
922                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
923                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
924                 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->icv_dma_addr,
925                         ctx->authsize, NS_BIT, 1);
926                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
927                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
928                         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
929                         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 
930                 } else {
931                         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
932                                 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
933                         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
934                 }
935         } else { /*Decrypt*/
936                 /* Get ICV out from hardware */
937                 HW_DESC_INIT(&desc[idx]);
938                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
939                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
940                 HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
941                         ctx->authsize, NS_BIT, 1);
942                 HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
943                 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
944                 HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
945                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
946                         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
947                         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
948                 } else {
949                         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
950                 }
951         }
952
953         *seq_size = (++idx);
954 }
955
956 static inline void ssi_aead_setup_cipher_desc(
957         struct aead_request *req,
958         HwDesc_s desc[],
959         unsigned int *seq_size)
960 {
961         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
962         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
963         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
964         unsigned int hw_iv_size = req_ctx->hw_iv_size;
965         unsigned int idx = *seq_size;
966         int direct = req_ctx->gen_ctx.op_type;
967
968         /* Setup cipher state */
969         HW_DESC_INIT(&desc[idx]);
970         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
971         HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
972         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
973                 req_ctx->gen_ctx.iv_dma_addr, hw_iv_size, NS_BIT);
974         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
975                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
976         } else {
977                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
978         }
979         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
980         idx++;
981
982         /* Setup enc. key */
983         HW_DESC_INIT(&desc[idx]);
984         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
985         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
986         HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
987         if (ctx->flow_mode == S_DIN_to_AES) {
988                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
989                         ((ctx->enc_keylen == 24) ?
990                          CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT);
991                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
992         } else {
993                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
994                         ctx->enc_keylen, NS_BIT);
995                 HW_DESC_SET_KEY_SIZE_DES(&desc[idx], ctx->enc_keylen);
996         }
997         HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
998         idx++;
999
1000         *seq_size = idx;
1001 }
1002
1003 static inline void ssi_aead_process_cipher(
1004         struct aead_request *req,
1005         HwDesc_s desc[],
1006         unsigned int *seq_size,
1007         unsigned int data_flow_mode)
1008 {
1009         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1010         int direct = req_ctx->gen_ctx.op_type;
1011         unsigned int idx = *seq_size;
1012
1013         if (req_ctx->cryptlen == 0)
1014                 return; /*null processing*/
1015
1016         ssi_aead_setup_cipher_desc(req, desc, &idx);
1017         ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
1018         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1019                 /* We must wait for DMA to write all cipher */
1020                 HW_DESC_INIT(&desc[idx]);
1021                 HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1022                 HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1023                 idx++;
1024         }
1025
1026         *seq_size = idx;
1027 }
1028
1029 static inline void ssi_aead_hmac_setup_digest_desc(
1030         struct aead_request *req,
1031         HwDesc_s desc[],
1032         unsigned int *seq_size)
1033 {
1034         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1036         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1037                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1038         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
1039                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1040         unsigned int idx = *seq_size;
1041
1042         /* Loading hash ipad xor key state */
1043         HW_DESC_INIT(&desc[idx]);
1044         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1045         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1046                 ctx->auth_state.hmac.ipad_opad_dma_addr,
1047                 digest_size, NS_BIT);
1048         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1049         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1050         idx++;
1051
1052         /* Load init. digest len (64 bytes) */
1053         HW_DESC_INIT(&desc[idx]);
1054         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1055         HW_DESC_SET_DIN_SRAM(&desc[idx],
1056                 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
1057                 HASH_LEN_SIZE);
1058         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1059         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1060         idx++;
1061
1062         *seq_size = idx;
1063 }
1064
1065 static inline void ssi_aead_xcbc_setup_digest_desc(
1066         struct aead_request *req,
1067         HwDesc_s desc[],
1068         unsigned int *seq_size)
1069 {
1070         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1071         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1072         unsigned int idx = *seq_size;
1073
1074         /* Loading MAC state */
1075         HW_DESC_INIT(&desc[idx]);
1076         HW_DESC_SET_DIN_CONST(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1077         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1078         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1079         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1080         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1081         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1082         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1083         idx++;
1084
1085         /* Setup XCBC MAC K1 */
1086         HW_DESC_INIT(&desc[idx]);
1087         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1088                              ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1089                              AES_KEYSIZE_128, NS_BIT);
1090         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1091         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1092         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1093         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1094         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1095         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1096         idx++;
1097
1098         /* Setup XCBC MAC K2 */
1099         HW_DESC_INIT(&desc[idx]);
1100         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1101                              (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 
1102                               AES_KEYSIZE_128),
1103                              AES_KEYSIZE_128, NS_BIT);
1104         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1105         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1106         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1107         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1108         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1109         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1110         idx++;
1111
1112         /* Setup XCBC MAC K3 */
1113         HW_DESC_INIT(&desc[idx]);
1114         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1115                              (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1116                               2 * AES_KEYSIZE_128),
1117                              AES_KEYSIZE_128, NS_BIT);
1118         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
1119         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
1120         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1121         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1122         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1123         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1124         idx++;
1125
1126         *seq_size = idx;
1127 }
1128
1129 static inline void ssi_aead_process_digest_header_desc(
1130         struct aead_request *req,
1131         HwDesc_s desc[],
1132         unsigned int *seq_size)
1133 {
1134         unsigned int idx = *seq_size;
1135         /* Hash associated data */
1136         if (req->assoclen > 0)
1137                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1138
1139         /* Hash IV */
1140         *seq_size = idx;
1141 }
1142
1143 static inline void ssi_aead_process_digest_scheme_desc(
1144         struct aead_request *req,
1145         HwDesc_s desc[],
1146         unsigned int *seq_size)
1147 {
1148         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1149         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1150         struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1151         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1152                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1153         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
1154                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1155         unsigned int idx = *seq_size;
1156
1157         HW_DESC_INIT(&desc[idx]);
1158         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1159         HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1160                         HASH_LEN_SIZE);
1161         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1162         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
1163         HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
1164         idx++;
1165
1166         /* Get final ICV result */
1167         HW_DESC_INIT(&desc[idx]);
1168         HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1169                         digest_size);
1170         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1171         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1172         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1173         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1174         idx++;
1175
1176         /* Loading hash opad xor key state */
1177         HW_DESC_INIT(&desc[idx]);
1178         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1179         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1180                 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1181                 digest_size, NS_BIT);
1182         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1183         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1184         idx++;
1185
1186         /* Load init. digest len (64 bytes) */
1187         HW_DESC_INIT(&desc[idx]);
1188         HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
1189         HW_DESC_SET_DIN_SRAM(&desc[idx],
1190                 ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
1191                 HASH_LEN_SIZE);
1192         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
1193         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1194         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1195         idx++;
1196
1197         /* Perform HASH update */
1198         HW_DESC_INIT(&desc[idx]);
1199         HW_DESC_SET_DIN_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
1200                         digest_size);
1201         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1202         idx++;
1203
1204         *seq_size = idx;
1205 }
1206
1207 static inline void ssi_aead_load_mlli_to_sram(
1208         struct aead_request *req,
1209         HwDesc_s desc[],
1210         unsigned int *seq_size)
1211 {
1212         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1213         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215
1216         if (unlikely(
1217                 (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1218                 (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
1219                 (req_ctx->is_single_pass == false))) {
1220                 SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1221                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1222                         req_ctx->mlli_params.mlli_len);
1223                 /* Copy MLLI table host-to-sram */
1224                 HW_DESC_INIT(&desc[*seq_size]);
1225                 HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
1226                         req_ctx->mlli_params.mlli_dma_addr,
1227                         req_ctx->mlli_params.mlli_len, NS_BIT);
1228                 HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
1229                         ctx->drvdata->mlli_sram_addr,
1230                         req_ctx->mlli_params.mlli_len);
1231                 HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
1232                 (*seq_size)++;
1233         }
1234 }
1235
1236 static inline enum FlowMode ssi_aead_get_data_flow_mode(
1237         enum drv_crypto_direction direct,
1238         enum FlowMode setup_flow_mode,
1239         bool is_single_pass)
1240 {
1241         enum FlowMode data_flow_mode;
1242
1243         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1244                 if (setup_flow_mode == S_DIN_to_AES)
1245                         data_flow_mode = likely(is_single_pass) ?
1246                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1247                 else
1248                         data_flow_mode = likely(is_single_pass) ?
1249                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1250         } else { /* Decrypt */
1251                 if (setup_flow_mode == S_DIN_to_AES)
1252                         data_flow_mode = likely(is_single_pass) ?
1253                                         AES_and_HASH : DIN_AES_DOUT;
1254                 else
1255                         data_flow_mode = likely(is_single_pass) ?
1256                                         DES_and_HASH : DIN_DES_DOUT;
1257         }
1258
1259         return data_flow_mode;
1260 }
1261
1262 static inline void ssi_aead_hmac_authenc(
1263         struct aead_request *req,
1264         HwDesc_s desc[],
1265         unsigned int *seq_size)
1266 {
1267         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1268         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1269         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1270         int direct = req_ctx->gen_ctx.op_type;
1271         unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1272                 direct, ctx->flow_mode, req_ctx->is_single_pass);
1273
1274         if (req_ctx->is_single_pass == true) {
1275                 /**
1276                  * Single-pass flow
1277                  */
1278                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1279                 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1280                 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1281                 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1282                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1283                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1284                 return;
1285         }
1286
1287         /** 
1288          * Double-pass flow
1289          * Fallback for unsupported single-pass modes, 
1290          * i.e. using assoc. data of non-word-multiple */
1291         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292                 /* encrypt first.. */
1293                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1294                 /* authenc after..*/
1295                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1296                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1297                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1298                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1299
1300         } else { /*DECRYPT*/
1301                 /* authenc first..*/
1302                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1303                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1304                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1305                 /* decrypt after.. */
1306                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1307                 /* read the digest result with setting the completion bit
1308                    must be after the cipher operation */
1309                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1310         }
1311 }
1312
1313 static inline void
1314 ssi_aead_xcbc_authenc(
1315         struct aead_request *req,
1316         HwDesc_s desc[],
1317         unsigned int *seq_size)
1318 {
1319         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1320         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1321         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1322         int direct = req_ctx->gen_ctx.op_type;
1323         unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1324                 direct, ctx->flow_mode, req_ctx->is_single_pass);
1325
1326         if (req_ctx->is_single_pass == true) {
1327                 /**
1328                  * Single-pass flow
1329                  */
1330                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1331                 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1332                 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1333                 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1334                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1335                 return;
1336         }
1337
1338         /** 
1339          * Double-pass flow
1340          * Fallback for unsupported single-pass modes, 
1341          * i.e. using assoc. data of non-word-multiple */
1342         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1343                 /* encrypt first.. */
1344                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1345                 /* authenc after.. */
1346                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1347                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1348                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1349         } else { /*DECRYPT*/
1350                 /* authenc first.. */
1351                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1352                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1353                 /* decrypt after..*/
1354                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1355                 /* read the digest result with setting the completion bit
1356                    must be after the cipher operation */
1357                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1358         }
1359 }
1360
1361 static int validate_data_size(struct ssi_aead_ctx *ctx,
1362         enum drv_crypto_direction direct, struct aead_request *req)
1363 {
1364         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1365         unsigned int assoclen = req->assoclen;
1366         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1367                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1368
1369         if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1370                 (req->cryptlen < ctx->authsize)))
1371                 goto data_size_err;
1372
1373         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1374
1375         switch (ctx->flow_mode) {
1376         case S_DIN_to_AES:
1377                 if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
1378                         !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
1379                         goto data_size_err;
1380                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1381                         break;
1382                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1383                 {
1384                         if (areq_ctx->plaintext_authenticate_only == true)
1385                                 areq_ctx->is_single_pass = false; 
1386                         break;
1387                 }
1388
1389                 if (!IS_ALIGNED(assoclen, sizeof(uint32_t)))
1390                         areq_ctx->is_single_pass = false;
1391
1392                 if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
1393                     !IS_ALIGNED(cipherlen, sizeof(uint32_t)))
1394                         areq_ctx->is_single_pass = false;
1395
1396                 break;
1397         case S_DIN_to_DES:
1398                 if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
1399                         goto data_size_err;
1400                 if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
1401                         areq_ctx->is_single_pass = false;
1402                 break;
1403         default:
1404                 SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
1405                 goto data_size_err;
1406         }
1407
1408         return 0;
1409
1410 data_size_err:
1411         return -EINVAL;
1412 }
1413
1414 #if SSI_CC_HAS_AES_CCM
1415 static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
1416 {
1417         unsigned int len = 0;
1418         if ( headerSize == 0 ) {
1419                 return 0;
1420         } 
1421         if ( headerSize < ((1UL << 16) - (1UL << 8) )) {
1422                 len = 2;
1423
1424                 pA0Buff[0] = (headerSize >> 8) & 0xFF;
1425                 pA0Buff[1] = headerSize & 0xFF;
1426         } else {
1427                 len = 6;
1428
1429                 pA0Buff[0] = 0xFF;
1430                 pA0Buff[1] = 0xFE;
1431                 pA0Buff[2] = (headerSize >> 24) & 0xFF;
1432                 pA0Buff[3] = (headerSize >> 16) & 0xFF;
1433                 pA0Buff[4] = (headerSize >> 8) & 0xFF;
1434                 pA0Buff[5] = headerSize & 0xFF;
1435         }
1436
1437         return len;
1438 }
1439
1440 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1441 {
1442         __be32 data;
1443
1444         memset(block, 0, csize);
1445         block += csize;
1446
1447         if (csize >= 4)
1448                 csize = 4;
1449         else if (msglen > (1 << (8 * csize)))
1450                 return -EOVERFLOW;
1451
1452         data = cpu_to_be32(msglen);
1453         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1454
1455         return 0;
1456 }
1457
1458 static inline int ssi_aead_ccm(
1459         struct aead_request *req,
1460         HwDesc_s desc[],
1461         unsigned int *seq_size)
1462 {
1463         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1464         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1465         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1466         unsigned int idx = *seq_size;
1467         unsigned int cipher_flow_mode;
1468         dma_addr_t mac_result;
1469
1470
1471         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1472                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1473                 mac_result = req_ctx->mac_buf_dma_addr;
1474         } else { /* Encrypt */
1475                 cipher_flow_mode = AES_and_HASH;
1476                 mac_result = req_ctx->icv_dma_addr;
1477         }
1478
1479         /* load key */
1480         HW_DESC_INIT(&desc[idx]);       
1481         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);    
1482         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
1483                         ((ctx->enc_keylen == 24) ? 
1484                          CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), 
1485                          NS_BIT);
1486         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1487         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1488         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1490         idx++;
1491
1492         /* load ctr state */
1493         HW_DESC_INIT(&desc[idx]);
1494         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
1495         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1496         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1497                         req_ctx->gen_ctx.iv_dma_addr, 
1498                              AES_BLOCK_SIZE, NS_BIT);
1499         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 
1500         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1501         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1502         idx++;
1503
1504         /* load MAC key */
1505         HW_DESC_INIT(&desc[idx]);       
1506         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);        
1507         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
1508                         ((ctx->enc_keylen == 24) ? 
1509                          CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), 
1510                          NS_BIT);
1511         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1512         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1513         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1514         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1515         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1516         idx++;
1517
1518         /* load MAC state */
1519         HW_DESC_INIT(&desc[idx]);
1520         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
1521         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1522         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1523                         req_ctx->mac_buf_dma_addr, 
1524                              AES_BLOCK_SIZE, NS_BIT);
1525         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 
1526         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1527         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1528         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1529         idx++;
1530
1531
1532         /* process assoc data */
1533         if (req->assoclen > 0) {
1534                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1535         } else {
1536                 HW_DESC_INIT(&desc[idx]);
1537                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
1538                                       sg_dma_address(&req_ctx->ccm_adata_sg),
1539                                      AES_BLOCK_SIZE + req_ctx->ccm_hdr_size,
1540                                      NS_BIT);
1541                 HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1542                 idx++;
1543         }
1544
1545         /* process the cipher */
1546         if (req_ctx->cryptlen != 0) {
1547                 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
1548         }
1549
1550         /* Read temporal MAC */
1551         HW_DESC_INIT(&desc[idx]);
1552         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
1553         HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
1554                               ctx->authsize, NS_BIT, 0);
1555         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1556         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1557         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1558         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1559         idx++;
1560
1561         /* load AES-CTR state (for last MAC calculation)*/
1562         HW_DESC_INIT(&desc[idx]);
1563         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
1564         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1565         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1566                              req_ctx->ccm_iv0_dma_addr ,
1567                              AES_BLOCK_SIZE, NS_BIT);
1568         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1569         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1570         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1571         idx++;
1572
1573         HW_DESC_INIT(&desc[idx]);
1574         HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1575         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1576         idx++;
1577
1578         /* encrypt the "T" value and store MAC in mac_state */
1579         HW_DESC_INIT(&desc[idx]);
1580         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1581                         req_ctx->mac_buf_dma_addr , ctx->authsize, NS_BIT);
1582         HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result , ctx->authsize, NS_BIT, 1);
1583         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1584         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1585         idx++;  
1586
1587         *seq_size = idx;
1588         return 0;
1589 }
1590
1591 static int config_ccm_adata(struct aead_request *req) {
1592         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1595         //unsigned int size_of_a = 0, rem_a_size = 0;
1596         unsigned int lp = req->iv[0];
1597         /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
1598         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1599         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1600         uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1601         uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1602         uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1603         unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 
1604                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
1605                                 req->cryptlen : 
1606                                 (req->cryptlen - ctx->authsize);
1607         int rc;
1608         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1609         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE*3);
1610
1611         /* taken from crypto/ccm.c */
1612         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1613         if (2 > l || l > 8) {
1614                 SSI_LOG_ERR("illegal iv value %X\n",req->iv[0]);
1615                 return -EINVAL;
1616         }
1617         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1618
1619         /* format control info per RFC 3610 and
1620          * NIST Special Publication 800-38C
1621          */
1622         *b0 |= (8 * ((m - 2) / 2));
1623         if (req->assoclen > 0)
1624                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1625         
1626         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1627         if (rc != 0) {
1628                 return rc;
1629         }
1630          /* END of "taken from crypto/ccm.c" */
1631         
1632         /* l(a) - size of associated data. */
1633         req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
1634
1635         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1636         req->iv [15] = 1;
1637
1638         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE) ;
1639         ctr_count_0[15] = 0;
1640
1641         return 0;
1642 }
1643
1644 static void ssi_rfc4309_ccm_process(struct aead_request *req)
1645 {
1646         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1647         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1648         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1649
1650         /* L' */
1651         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1652         areq_ctx->ctr_iv[0] = 3;  /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
1653
1654         /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
1655         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
1656         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET,    req->iv,        CCM_BLOCK_IV_SIZE);
1657         req->iv = areq_ctx->ctr_iv;     
1658         req->assoclen -= CCM_BLOCK_IV_SIZE;
1659 }
1660 #endif /*SSI_CC_HAS_AES_CCM*/
1661
1662 #if SSI_CC_HAS_AES_GCM
1663
1664 static inline void ssi_aead_gcm_setup_ghash_desc(
1665         struct aead_request *req,
1666         HwDesc_s desc[],
1667         unsigned int *seq_size)
1668 {
1669         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1670         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1671         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1672         unsigned int idx = *seq_size;
1673
1674         /* load key to AES*/
1675         HW_DESC_INIT(&desc[idx]);       
1676         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);    
1677         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1678         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
1679                         ctx->enc_keylen, NS_BIT); 
1680         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1681         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1682         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1683         idx++;
1684
1685         /* process one zero block to generate hkey */
1686         HW_DESC_INIT(&desc[idx]);
1687         HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
1688         HW_DESC_SET_DOUT_DLLI(&desc[idx],
1689                                   req_ctx->hkey_dma_addr,
1690                                   AES_BLOCK_SIZE,
1691                                   NS_BIT, 0); 
1692         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1693         idx++;
1694
1695         /* Memory Barrier */
1696         HW_DESC_INIT(&desc[idx]);
1697         HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1698         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1699         idx++;
1700
1701         /* Load GHASH subkey */
1702         HW_DESC_INIT(&desc[idx]);
1703         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1704                         req_ctx->hkey_dma_addr, 
1705                                  AES_BLOCK_SIZE, NS_BIT);
1706         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1707         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1708         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1709         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH); 
1710         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);   
1711         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1712         idx++;
1713
1714         /* Configure Hash Engine to work with GHASH.
1715            Since it was not possible to extend HASH submodes to add GHASH,
1716            The following command is necessary in order to select GHASH (according to HW designers)*/
1717         HW_DESC_INIT(&desc[idx]);
1718         HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1719         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1720         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1721         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1722         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH); 
1723         HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
1724         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1725         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
1726         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1727         idx++;
1728
1729         /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
1730         HW_DESC_INIT(&desc[idx]);
1731         HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
1732         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1733         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
1734         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1735         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1736         HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
1737         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
1738         idx++;
1739
1740         *seq_size = idx;
1741 }
1742
1743 static inline void ssi_aead_gcm_setup_gctr_desc(
1744         struct aead_request *req,
1745         HwDesc_s desc[],
1746         unsigned int *seq_size)
1747 {
1748         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1749         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1750         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1751         unsigned int idx = *seq_size;
1752
1753         /* load key to AES*/
1754         HW_DESC_INIT(&desc[idx]);       
1755         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);   
1756         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1757         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
1758                         ctx->enc_keylen, NS_BIT); 
1759         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1760         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
1761         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1762         idx++;
1763
1764         if ((req_ctx->cryptlen != 0) && (req_ctx->plaintext_authenticate_only==false)){
1765                 /* load AES/CTR initial CTR value inc by 2*/
1766                 HW_DESC_INIT(&desc[idx]);
1767                 HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1768                 HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1769                 HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1770                                 req_ctx->gcm_iv_inc2_dma_addr, 
1771                                          AES_BLOCK_SIZE, NS_BIT);
1772                 HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);   
1773                 HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1774                 HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1775                 idx++;
1776         }
1777
1778         *seq_size = idx;
1779 }
1780
1781 static inline void ssi_aead_process_gcm_result_desc(
1782         struct aead_request *req,
1783         HwDesc_s desc[],
1784         unsigned int *seq_size)
1785 {
1786         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1787         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1788         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1789         dma_addr_t mac_result; 
1790         unsigned int idx = *seq_size;
1791
1792         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1793                 mac_result = req_ctx->mac_buf_dma_addr;
1794         } else { /* Encrypt */
1795                 mac_result = req_ctx->icv_dma_addr;
1796         }
1797
1798         /* process(ghash) gcm_block_len */
1799         HW_DESC_INIT(&desc[idx]);
1800         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
1801                 req_ctx->gcm_block_len_dma_addr,
1802                 AES_BLOCK_SIZE, NS_BIT);
1803         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
1804         idx++;
1805
1806         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1807         HW_DESC_INIT(&desc[idx]);
1808         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
1809         HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1810         HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
1811                                   AES_BLOCK_SIZE, NS_BIT, 0);
1812         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
1813         HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
1814         HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
1815
1816         idx++; 
1817
1818         /* load AES/CTR initial CTR value inc by 1*/
1819         HW_DESC_INIT(&desc[idx]);
1820         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1821         HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
1822         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1823                                  req_ctx->gcm_iv_inc1_dma_addr, 
1824                                  AES_BLOCK_SIZE, NS_BIT);
1825         HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);   
1826         HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
1827         HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
1828         idx++;
1829
1830         /* Memory Barrier */
1831         HW_DESC_INIT(&desc[idx]);
1832         HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
1833         HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
1834         idx++;
1835
1836         /* process GCTR on stored GHASH and store MAC in mac_state*/
1837         HW_DESC_INIT(&desc[idx]);
1838         HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
1839         HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
1840                 req_ctx->mac_buf_dma_addr,
1841                 AES_BLOCK_SIZE, NS_BIT);
1842         HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1843         HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
1844         HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
1845         idx++;  
1846
1847         *seq_size = idx;
1848 }
1849
1850 static inline int ssi_aead_gcm(
1851         struct aead_request *req,
1852         HwDesc_s desc[],
1853         unsigned int *seq_size)
1854 {
1855         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1856         unsigned int idx = *seq_size;
1857         unsigned int cipher_flow_mode;
1858
1859         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1860                 cipher_flow_mode = AES_and_HASH;
1861         } else { /* Encrypt */
1862                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1863         }
1864
1865
1866         //in RFC4543 no data to encrypt. just copy data from src to dest.
1867         if (req_ctx->plaintext_authenticate_only==true){     
1868                 ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
1869                 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1870                 /* process(ghash) assoc data */
1871                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1872                 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1873                 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1874                 idx = *seq_size;
1875                 return 0;
1876         }
1877
1878         // for gcm and rfc4106.
1879         ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1880         /* process(ghash) assoc data */
1881         if (req->assoclen > 0)
1882                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1883         ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1884         /* process(gctr+ghash) */
1885         if (req_ctx->cryptlen != 0)
1886                 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size); 
1887         ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1888
1889         idx = *seq_size;
1890         return 0;
1891 }
1892
1893 #ifdef CC_DEBUG
1894 static inline void ssi_aead_dump_gcm(
1895         const char* title,
1896         struct aead_request *req)
1897 {
1898         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1899         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1900         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1901
1902         if (ctx->cipher_mode != DRV_CIPHER_GCTR)
1903                 return;
1904
1905         if (title != NULL) {
1906                 SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
1907                 SSI_LOG_DEBUG("%s\n", title);
1908         }
1909
1910         SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d \n", \
1911                                  ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen );
1912
1913         if ( ctx->enckey != NULL ) {
1914                 dump_byte_array("mac key",ctx->enckey, 16);
1915         }
1916
1917         dump_byte_array("req->iv",req->iv, AES_BLOCK_SIZE);
1918
1919         dump_byte_array("gcm_iv_inc1",req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
1920
1921         dump_byte_array("gcm_iv_inc2",req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
1922
1923         dump_byte_array("hkey",req_ctx->hkey, AES_BLOCK_SIZE);
1924
1925         dump_byte_array("mac_buf",req_ctx->mac_buf, AES_BLOCK_SIZE);
1926
1927         dump_byte_array("gcm_len_block",req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
1928
1929         if (req->src!=NULL && req->cryptlen) {
1930                 dump_byte_array("req->src",sg_virt(req->src), req->cryptlen+req->assoclen);
1931         }
1932
1933         if (req->dst!=NULL) {
1934                 dump_byte_array("req->dst",sg_virt(req->dst), req->cryptlen+ctx->authsize+req->assoclen);
1935     }
1936 }
1937 #endif
1938
1939 static int config_gcm_context(struct aead_request *req) {
1940         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1941         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1942         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1943         
1944         unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 
1945                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
1946                                 req->cryptlen : 
1947                                 (req->cryptlen - ctx->authsize);
1948         __be32 counter = cpu_to_be32(2);
1949
1950         SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d \n", cryptlen, req->assoclen, ctx->authsize);
1951
1952         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1953
1954         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1955
1956         memcpy(req->iv + 12, &counter, 4);
1957         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1958
1959         counter = cpu_to_be32(1);
1960         memcpy(req->iv + 12, &counter, 4);
1961         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1962
1963
1964         if (req_ctx->plaintext_authenticate_only == false)
1965         {
1966                 __be64 temp64;
1967                 temp64 = cpu_to_be64(req->assoclen * 8);
1968                 memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
1969                 temp64 = cpu_to_be64(cryptlen * 8);
1970                 memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
1971         }
1972         else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
1973                 __be64 temp64;
1974                 temp64 = cpu_to_be64((req->assoclen+GCM_BLOCK_RFC4_IV_SIZE+cryptlen) * 8);
1975                 memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
1976                 temp64 = 0;
1977                 memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
1978         }
1979
1980         return 0;
1981 }
1982
1983 static void ssi_rfc4_gcm_process(struct aead_request *req)
1984 {
1985         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1986         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1987         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1988
1989         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1990         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET,    req->iv, GCM_BLOCK_RFC4_IV_SIZE);
1991         req->iv = areq_ctx->ctr_iv;     
1992         req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1993 }
1994
1995
1996 #endif /*SSI_CC_HAS_AES_GCM*/
1997
1998 static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
1999 {
2000         int rc = 0;
2001         int seq_len = 0;
2002         HwDesc_s desc[MAX_AEAD_PROCESS_SEQ]; 
2003         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2004         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2005         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2006         struct device *dev = &ctx->drvdata->plat_dev->dev;
2007         struct ssi_crypto_req ssi_req = {};
2008
2009         DECL_CYCLE_COUNT_RESOURCES;
2010
2011         SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
2012                 ((direct==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"), ctx, req, req->iv,
2013                 sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
2014         CHECK_AND_RETURN_UPON_FIPS_ERROR();
2015
2016         /* STAT_PHASE_0: Init and sanity checks */
2017         START_CYCLE_COUNT();
2018         
2019         /* Check data length according to mode */
2020         if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
2021                 SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
2022                                 req->cryptlen, req->assoclen);
2023                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
2024                 return -EINVAL;
2025         }
2026
2027         /* Setup DX request structure */
2028         ssi_req.user_cb = (void *)ssi_aead_complete;
2029         ssi_req.user_arg = (void *)req;
2030
2031 #ifdef ENABLE_CYCLE_COUNT
2032         ssi_req.op_type = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
2033                 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
2034 #endif
2035         /* Setup request context */
2036         areq_ctx->gen_ctx.op_type = direct;
2037         areq_ctx->req_authsize = ctx->authsize;
2038         areq_ctx->cipher_mode = ctx->cipher_mode;
2039
2040         END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
2041
2042         /* STAT_PHASE_1: Map buffers */
2043         START_CYCLE_COUNT();
2044         
2045         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2046                 /* Build CTR IV - Copy nonce from last 4 bytes in
2047                 *  CTR key to first 4 bytes in CTR IV */
2048                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
2049                 if (areq_ctx->backup_giv == NULL) /*User none-generated IV*/
2050                         memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
2051                                 req->iv, CTR_RFC3686_IV_SIZE);
2052                 /* Initialize counter portion of counter block */
2053                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
2054                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2055
2056                 /* Replace with counter iv */
2057                 req->iv = areq_ctx->ctr_iv;
2058                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
2059         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) || 
2060                    (ctx->cipher_mode == DRV_CIPHER_GCTR) ) {
2061                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
2062                 if (areq_ctx->ctr_iv != req->iv) {
2063                         memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
2064                         req->iv = areq_ctx->ctr_iv;
2065                 }
2066         }  else {
2067                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
2068         }
2069
2070 #if SSI_CC_HAS_AES_CCM
2071         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2072                 rc = config_ccm_adata(req);
2073                 if (unlikely(rc != 0)) {
2074                         SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
2075                         goto exit; 
2076                 }
2077         } else {
2078                 areq_ctx->ccm_hdr_size = ccm_header_size_null;          
2079         }
2080 #else
2081         areq_ctx->ccm_hdr_size = ccm_header_size_null;          
2082 #endif /*SSI_CC_HAS_AES_CCM*/
2083
2084 #if SSI_CC_HAS_AES_GCM 
2085         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2086                 rc = config_gcm_context(req);
2087                 if (unlikely(rc != 0)) {
2088                         SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
2089                         goto exit; 
2090                 }
2091         } 
2092 #endif /*SSI_CC_HAS_AES_GCM*/
2093
2094         rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
2095         if (unlikely(rc != 0)) {
2096                 SSI_LOG_ERR("map_request() failed\n");
2097                 goto exit;
2098         }
2099
2100         /* do we need to generate IV? */
2101         if (areq_ctx->backup_giv != NULL) {
2102
2103                 /* set the DMA mapped IV address*/
2104                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2105                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
2106                         ssi_req.ivgen_dma_addr_len = 1;
2107                 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2108                         /* In ccm, the IV needs to exist both inside B0 and inside the counter.
2109                            It is also copied to iv_dma_addr for other reasons (like returning
2110                            it to the user).
2111                            So, using 3 (identical) IV outputs. */
2112                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
2113                         ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET          + CCM_BLOCK_IV_OFFSET;
2114                         ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2115                         ssi_req.ivgen_dma_addr_len = 3;
2116                 } else {
2117                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
2118                         ssi_req.ivgen_dma_addr_len = 1;
2119                 }
2120
2121                 /* set the IV size (8/16 B long)*/
2122                 ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
2123         }
2124
2125         END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
2126
2127         /* STAT_PHASE_2: Create sequence */
2128         START_CYCLE_COUNT();
2129
2130         /* Load MLLI tables to SRAM if necessary */
2131         ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
2132
2133         /*TODO: move seq len by reference */
2134         switch (ctx->auth_mode) {
2135         case DRV_HASH_SHA1:
2136         case DRV_HASH_SHA256:
2137                 ssi_aead_hmac_authenc(req, desc, &seq_len);
2138                 break;
2139         case DRV_HASH_XCBC_MAC:
2140                 ssi_aead_xcbc_authenc(req, desc, &seq_len);
2141                 break;
2142 #if ( SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM )
2143         case DRV_HASH_NULL:
2144 #if SSI_CC_HAS_AES_CCM
2145                 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2146                         ssi_aead_ccm(req, desc, &seq_len);
2147                 }
2148 #endif /*SSI_CC_HAS_AES_CCM*/
2149 #if SSI_CC_HAS_AES_GCM
2150                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2151                         ssi_aead_gcm(req, desc, &seq_len);
2152                 }
2153 #endif /*SSI_CC_HAS_AES_GCM*/
2154                         break;
2155 #endif
2156         default:        
2157                 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
2158                 ssi_buffer_mgr_unmap_aead_request(dev, req);
2159                 rc = -ENOTSUPP;
2160                 goto exit;
2161         }
2162
2163         END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
2164
2165         /* STAT_PHASE_3: Lock HW and push sequence */
2166         START_CYCLE_COUNT();
2167
2168         rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
2169
2170         if (unlikely(rc != -EINPROGRESS)) {
2171                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
2172                 ssi_buffer_mgr_unmap_aead_request(dev, req);
2173         }
2174
2175         
2176         END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
2177 exit:
2178         return rc;
2179 }
2180
2181 static int ssi_aead_encrypt(struct aead_request *req)
2182 {
2183         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2184         int rc;
2185
2186         /* No generated IV required */
2187         areq_ctx->backup_iv = req->iv;
2188         areq_ctx->backup_giv = NULL;
2189         areq_ctx->is_gcm4543 = false;
2190
2191         areq_ctx->plaintext_authenticate_only = false;
2192
2193         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2194         if (rc != -EINPROGRESS)
2195                 req->iv = areq_ctx->backup_iv;
2196
2197         return rc;
2198 }
2199
2200 #if SSI_CC_HAS_AES_CCM
2201 static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
2202 {
2203         /* Very similar to ssi_aead_encrypt() above. */
2204
2205         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2206         int rc = -EINVAL;
2207
2208         if (!valid_assoclen(req)) {
2209                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen );
2210                 goto out;
2211         }
2212
2213         /* No generated IV required */
2214         areq_ctx->backup_iv = req->iv;
2215         areq_ctx->backup_giv = NULL;
2216         areq_ctx->is_gcm4543 = true;
2217         
2218         ssi_rfc4309_ccm_process(req);
2219         
2220         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2221         if (rc != -EINPROGRESS)
2222                 req->iv = areq_ctx->backup_iv;
2223 out:
2224         return rc;
2225 }
2226 #endif /* SSI_CC_HAS_AES_CCM */
2227
2228 static int ssi_aead_decrypt(struct aead_request *req)
2229 {
2230         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2231         int rc;
2232
2233         /* No generated IV required */
2234         areq_ctx->backup_iv = req->iv;
2235         areq_ctx->backup_giv = NULL;
2236         areq_ctx->is_gcm4543 = false;
2237
2238         areq_ctx->plaintext_authenticate_only = false;
2239
2240         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2241         if (rc != -EINPROGRESS)
2242                 req->iv = areq_ctx->backup_iv;
2243
2244         return rc;
2245
2246 }
2247
2248 #if SSI_CC_HAS_AES_CCM
2249 static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
2250 {
2251         /* Very similar to ssi_aead_decrypt() above. */
2252
2253         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2254         int rc = -EINVAL;
2255
2256         if (!valid_assoclen(req)) {
2257                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2258                 goto out;
2259         }
2260
2261         /* No generated IV required */
2262         areq_ctx->backup_iv = req->iv;
2263         areq_ctx->backup_giv = NULL;
2264         
2265         areq_ctx->is_gcm4543 = true;
2266         ssi_rfc4309_ccm_process(req);
2267         
2268         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2269         if (rc != -EINPROGRESS)
2270                 req->iv = areq_ctx->backup_iv;
2271
2272 out:
2273         return rc;
2274 }
2275 #endif /* SSI_CC_HAS_AES_CCM */
2276
2277 #if SSI_CC_HAS_AES_GCM
2278
2279 static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2280 {
2281         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2282         int rc = 0;
2283         
2284         SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey()  keylen %d, key %p \n", keylen, key );
2285
2286         if (keylen < 4)
2287                 return -EINVAL;
2288
2289         keylen -= 4;
2290         memcpy(ctx->ctr_nonce, key + keylen, 4);
2291
2292         rc = ssi_aead_setkey(tfm, key, keylen);
2293
2294         return rc;
2295 }
2296
2297 static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2298 {
2299         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2300         int rc = 0;
2301         
2302         SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey()  keylen %d, key %p \n", keylen, key );
2303
2304         if (keylen < 4)
2305                 return -EINVAL;
2306
2307         keylen -= 4;
2308         memcpy(ctx->ctr_nonce, key + keylen, 4);
2309
2310         rc = ssi_aead_setkey(tfm, key, keylen);
2311
2312         return rc;
2313 }
2314
2315 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
2316                                       unsigned int authsize)
2317 {
2318         switch (authsize) {
2319         case 4:
2320         case 8:
2321         case 12:
2322         case 13:
2323         case 14:
2324         case 15:
2325         case 16:
2326                 break;
2327         default:
2328                 return -EINVAL;
2329         }
2330
2331         return ssi_aead_setauthsize(authenc, authsize);
2332 }
2333
2334 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2335                                       unsigned int authsize)
2336 {
2337         SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize()  authsize %d \n", authsize );
2338
2339         switch (authsize) {
2340         case 8:
2341         case 12:
2342         case 16:
2343                 break;
2344         default:
2345                 return -EINVAL;
2346         }
2347
2348         return ssi_aead_setauthsize(authenc, authsize);
2349 }
2350
2351 static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2352                                       unsigned int authsize)
2353 {
2354         SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize()  authsize %d \n", authsize );
2355
2356         if (authsize != 16)
2357                 return -EINVAL;
2358
2359         return ssi_aead_setauthsize(authenc, authsize);
2360 }
2361
2362 static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
2363 {
2364         /* Very similar to ssi_aead_encrypt() above. */
2365
2366         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2367         int rc = -EINVAL;
2368
2369         if (!valid_assoclen(req)) {
2370                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2371                 goto out;
2372         }
2373
2374         /* No generated IV required */
2375         areq_ctx->backup_iv = req->iv;
2376         areq_ctx->backup_giv = NULL;
2377         
2378         areq_ctx->plaintext_authenticate_only = false;
2379
2380         ssi_rfc4_gcm_process(req);
2381         areq_ctx->is_gcm4543 = true;
2382
2383         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2384         if (rc != -EINPROGRESS)
2385                 req->iv = areq_ctx->backup_iv;
2386 out:
2387         return rc;
2388 }
2389
2390 static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
2391 {
2392         /* Very similar to ssi_aead_encrypt() above. */
2393
2394         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2395         int rc;
2396         
2397         //plaintext is not encryped with rfc4543
2398         areq_ctx->plaintext_authenticate_only = true;
2399
2400         /* No generated IV required */
2401         areq_ctx->backup_iv = req->iv;
2402         areq_ctx->backup_giv = NULL;
2403         
2404         ssi_rfc4_gcm_process(req);
2405         areq_ctx->is_gcm4543 = true;
2406
2407         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2408         if (rc != -EINPROGRESS)
2409                 req->iv = areq_ctx->backup_iv;
2410
2411         return rc;
2412 }
2413
2414 static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
2415 {
2416         /* Very similar to ssi_aead_decrypt() above. */
2417
2418         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2419         int rc = -EINVAL;
2420
2421         if (!valid_assoclen(req)) {
2422                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2423                 goto out;
2424         }
2425
2426         /* No generated IV required */
2427         areq_ctx->backup_iv = req->iv;
2428         areq_ctx->backup_giv = NULL;
2429         
2430         areq_ctx->plaintext_authenticate_only = false;
2431
2432         ssi_rfc4_gcm_process(req);
2433         areq_ctx->is_gcm4543 = true;
2434
2435         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2436         if (rc != -EINPROGRESS)
2437                 req->iv = areq_ctx->backup_iv;
2438 out:
2439         return rc;
2440 }
2441
2442 static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
2443 {
2444         /* Very similar to ssi_aead_decrypt() above. */
2445
2446         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2447         int rc;
2448
2449         //plaintext is not decryped with rfc4543
2450         areq_ctx->plaintext_authenticate_only = true;
2451
2452         /* No generated IV required */
2453         areq_ctx->backup_iv = req->iv;
2454         areq_ctx->backup_giv = NULL;
2455         
2456         ssi_rfc4_gcm_process(req);
2457         areq_ctx->is_gcm4543 = true;
2458
2459         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2460         if (rc != -EINPROGRESS)
2461                 req->iv = areq_ctx->backup_iv;
2462
2463         return rc;
2464 }
2465 #endif /* SSI_CC_HAS_AES_GCM */
2466
2467 /* DX Block aead alg */
2468 static struct ssi_alg_template aead_algs[] = {
2469         {
2470                 .name = "authenc(hmac(sha1),cbc(aes))",
2471                 .driver_name = "authenc-hmac-sha1-cbc-aes-dx",
2472                 .blocksize = AES_BLOCK_SIZE,
2473                 .type = CRYPTO_ALG_TYPE_AEAD,
2474                 .template_aead = {
2475                         .setkey = ssi_aead_setkey,
2476                         .setauthsize = ssi_aead_setauthsize,
2477                         .encrypt = ssi_aead_encrypt,
2478                         .decrypt = ssi_aead_decrypt,
2479                         .init = ssi_aead_init,
2480                         .exit = ssi_aead_exit,
2481                         .ivsize = AES_BLOCK_SIZE,
2482                         .maxauthsize = SHA1_DIGEST_SIZE,
2483                 },
2484                 .cipher_mode = DRV_CIPHER_CBC,
2485                 .flow_mode = S_DIN_to_AES,
2486                 .auth_mode = DRV_HASH_SHA1,
2487         },
2488         {
2489                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2490                 .driver_name = "authenc-hmac-sha1-cbc-des3-dx",
2491                 .blocksize = DES3_EDE_BLOCK_SIZE,
2492                 .type = CRYPTO_ALG_TYPE_AEAD,
2493                 .template_aead = {
2494                         .setkey = ssi_aead_setkey,
2495                         .setauthsize = ssi_aead_setauthsize,
2496                         .encrypt = ssi_aead_encrypt,
2497                         .decrypt = ssi_aead_decrypt,
2498                         .init = ssi_aead_init,
2499                         .exit = ssi_aead_exit,
2500                         .ivsize = DES3_EDE_BLOCK_SIZE,
2501                         .maxauthsize = SHA1_DIGEST_SIZE,
2502                 },
2503                 .cipher_mode = DRV_CIPHER_CBC,
2504                 .flow_mode = S_DIN_to_DES,
2505                 .auth_mode = DRV_HASH_SHA1,
2506         },
2507         {
2508                 .name = "authenc(hmac(sha256),cbc(aes))",
2509                 .driver_name = "authenc-hmac-sha256-cbc-aes-dx",
2510                 .blocksize = AES_BLOCK_SIZE,
2511                 .type = CRYPTO_ALG_TYPE_AEAD,
2512                 .template_aead = {
2513                         .setkey = ssi_aead_setkey,
2514                         .setauthsize = ssi_aead_setauthsize,
2515                         .encrypt = ssi_aead_encrypt,
2516                         .decrypt = ssi_aead_decrypt,
2517                         .init = ssi_aead_init,
2518                         .exit = ssi_aead_exit,
2519                         .ivsize = AES_BLOCK_SIZE,
2520                         .maxauthsize = SHA256_DIGEST_SIZE,
2521                 },
2522                 .cipher_mode = DRV_CIPHER_CBC,
2523                 .flow_mode = S_DIN_to_AES,
2524                 .auth_mode = DRV_HASH_SHA256,
2525         },
2526         {
2527                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2528                 .driver_name = "authenc-hmac-sha256-cbc-des3-dx",
2529                 .blocksize = DES3_EDE_BLOCK_SIZE,
2530                 .type = CRYPTO_ALG_TYPE_AEAD,
2531                 .template_aead = {
2532                         .setkey = ssi_aead_setkey,
2533                         .setauthsize = ssi_aead_setauthsize,
2534                         .encrypt = ssi_aead_encrypt,
2535                         .decrypt = ssi_aead_decrypt,
2536                         .init = ssi_aead_init,
2537                         .exit = ssi_aead_exit,
2538                         .ivsize = DES3_EDE_BLOCK_SIZE,
2539                         .maxauthsize = SHA256_DIGEST_SIZE,
2540                 },
2541                 .cipher_mode = DRV_CIPHER_CBC,
2542                 .flow_mode = S_DIN_to_DES,
2543                 .auth_mode = DRV_HASH_SHA256,
2544         },
2545         {
2546                 .name = "authenc(xcbc(aes),cbc(aes))",
2547                 .driver_name = "authenc-xcbc-aes-cbc-aes-dx",
2548                 .blocksize = AES_BLOCK_SIZE,
2549                 .type = CRYPTO_ALG_TYPE_AEAD,
2550                 .template_aead = {
2551                         .setkey = ssi_aead_setkey,
2552                         .setauthsize = ssi_aead_setauthsize,
2553                         .encrypt = ssi_aead_encrypt,
2554                         .decrypt = ssi_aead_decrypt,
2555                         .init = ssi_aead_init,
2556                         .exit = ssi_aead_exit,
2557                         .ivsize = AES_BLOCK_SIZE,
2558                         .maxauthsize = AES_BLOCK_SIZE,
2559                 },
2560                 .cipher_mode = DRV_CIPHER_CBC,
2561                 .flow_mode = S_DIN_to_AES,
2562                 .auth_mode = DRV_HASH_XCBC_MAC,
2563         },
2564         {
2565                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2566                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
2567                 .blocksize = 1,
2568                 .type = CRYPTO_ALG_TYPE_AEAD,
2569                 .template_aead = {
2570                         .setkey = ssi_aead_setkey,
2571                         .setauthsize = ssi_aead_setauthsize,
2572                         .encrypt = ssi_aead_encrypt,
2573                         .decrypt = ssi_aead_decrypt,
2574                         .init = ssi_aead_init,
2575                         .exit = ssi_aead_exit,
2576                         .ivsize = CTR_RFC3686_IV_SIZE,
2577                         .maxauthsize = SHA1_DIGEST_SIZE,
2578                 },
2579                 .cipher_mode = DRV_CIPHER_CTR,
2580                 .flow_mode = S_DIN_to_AES,
2581                 .auth_mode = DRV_HASH_SHA1,
2582         },
2583         {
2584                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2585                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
2586                 .blocksize = 1,
2587                 .type = CRYPTO_ALG_TYPE_AEAD,
2588                 .template_aead = {
2589                         .setkey = ssi_aead_setkey,
2590                         .setauthsize = ssi_aead_setauthsize,
2591                         .encrypt = ssi_aead_encrypt,
2592                         .decrypt = ssi_aead_decrypt,
2593                         .init = ssi_aead_init,
2594                         .exit = ssi_aead_exit,
2595                         .ivsize = CTR_RFC3686_IV_SIZE,
2596                         .maxauthsize = SHA256_DIGEST_SIZE,
2597                 },
2598                 .cipher_mode = DRV_CIPHER_CTR,
2599                 .flow_mode = S_DIN_to_AES,
2600                 .auth_mode = DRV_HASH_SHA256,
2601         },
2602         {
2603                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2604                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
2605                 .blocksize = 1,
2606                 .type = CRYPTO_ALG_TYPE_AEAD,
2607                 .template_aead = {
2608                         .setkey = ssi_aead_setkey,
2609                         .setauthsize = ssi_aead_setauthsize,
2610                         .encrypt = ssi_aead_encrypt,
2611                         .decrypt = ssi_aead_decrypt,
2612                         .init = ssi_aead_init,
2613                         .exit = ssi_aead_exit,
2614                         .ivsize = CTR_RFC3686_IV_SIZE,
2615                         .maxauthsize = AES_BLOCK_SIZE,
2616                 },
2617                 .cipher_mode = DRV_CIPHER_CTR,
2618                 .flow_mode = S_DIN_to_AES,
2619                 .auth_mode = DRV_HASH_XCBC_MAC,
2620         },
2621 #if SSI_CC_HAS_AES_CCM
2622         {
2623                 .name = "ccm(aes)",
2624                 .driver_name = "ccm-aes-dx",
2625                 .blocksize = 1,
2626                 .type = CRYPTO_ALG_TYPE_AEAD,
2627                 .template_aead = {
2628                         .setkey = ssi_aead_setkey,
2629                         .setauthsize = ssi_ccm_setauthsize,
2630                         .encrypt = ssi_aead_encrypt,
2631                         .decrypt = ssi_aead_decrypt,
2632                         .init = ssi_aead_init,
2633                         .exit = ssi_aead_exit,
2634                         .ivsize = AES_BLOCK_SIZE,
2635                         .maxauthsize = AES_BLOCK_SIZE,
2636                 },
2637                 .cipher_mode = DRV_CIPHER_CCM,
2638                 .flow_mode = S_DIN_to_AES,
2639                 .auth_mode = DRV_HASH_NULL,
2640         },
2641         {
2642                 .name = "rfc4309(ccm(aes))",
2643                 .driver_name = "rfc4309-ccm-aes-dx",
2644                 .blocksize = 1,
2645                 .type = CRYPTO_ALG_TYPE_AEAD,
2646                 .template_aead = {
2647                         .setkey = ssi_rfc4309_ccm_setkey,
2648                         .setauthsize = ssi_rfc4309_ccm_setauthsize,
2649                         .encrypt = ssi_rfc4309_ccm_encrypt,
2650                         .decrypt = ssi_rfc4309_ccm_decrypt,
2651                         .init = ssi_aead_init,
2652                         .exit = ssi_aead_exit,
2653                         .ivsize = CCM_BLOCK_IV_SIZE,
2654                         .maxauthsize = AES_BLOCK_SIZE,
2655                 },
2656                 .cipher_mode = DRV_CIPHER_CCM,
2657                 .flow_mode = S_DIN_to_AES,
2658                 .auth_mode = DRV_HASH_NULL,
2659         },
2660 #endif /*SSI_CC_HAS_AES_CCM*/
2661 #if SSI_CC_HAS_AES_GCM
2662         {
2663                 .name = "gcm(aes)",
2664                 .driver_name = "gcm-aes-dx",
2665                 .blocksize = 1,
2666                 .type = CRYPTO_ALG_TYPE_AEAD,
2667                 .template_aead = {
2668                         .setkey = ssi_aead_setkey,
2669                         .setauthsize = ssi_gcm_setauthsize,
2670                         .encrypt = ssi_aead_encrypt,
2671                         .decrypt = ssi_aead_decrypt,
2672                         .init = ssi_aead_init,
2673                         .exit = ssi_aead_exit,
2674                         .ivsize = 12,
2675                         .maxauthsize = AES_BLOCK_SIZE,
2676                 },
2677                 .cipher_mode = DRV_CIPHER_GCTR,
2678                 .flow_mode = S_DIN_to_AES,
2679                 .auth_mode = DRV_HASH_NULL,
2680         },
2681         {
2682                 .name = "rfc4106(gcm(aes))",
2683                 .driver_name = "rfc4106-gcm-aes-dx",
2684                 .blocksize = 1,
2685                 .type = CRYPTO_ALG_TYPE_AEAD,
2686                 .template_aead = {
2687                         .setkey = ssi_rfc4106_gcm_setkey,
2688                         .setauthsize = ssi_rfc4106_gcm_setauthsize,
2689                         .encrypt = ssi_rfc4106_gcm_encrypt,
2690                         .decrypt = ssi_rfc4106_gcm_decrypt,
2691                         .init = ssi_aead_init,
2692                         .exit = ssi_aead_exit,
2693                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2694                         .maxauthsize = AES_BLOCK_SIZE,
2695                 },
2696                 .cipher_mode = DRV_CIPHER_GCTR,
2697                 .flow_mode = S_DIN_to_AES,
2698                 .auth_mode = DRV_HASH_NULL,
2699         },
2700         {
2701                 .name = "rfc4543(gcm(aes))",
2702                 .driver_name = "rfc4543-gcm-aes-dx",
2703                 .blocksize = 1,
2704                 .type = CRYPTO_ALG_TYPE_AEAD,
2705                 .template_aead = {
2706                         .setkey = ssi_rfc4543_gcm_setkey,
2707                         .setauthsize = ssi_rfc4543_gcm_setauthsize,
2708                         .encrypt = ssi_rfc4543_gcm_encrypt,
2709                         .decrypt = ssi_rfc4543_gcm_decrypt,
2710                         .init = ssi_aead_init,
2711                         .exit = ssi_aead_exit,
2712                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2713                         .maxauthsize = AES_BLOCK_SIZE,
2714                 },
2715                 .cipher_mode = DRV_CIPHER_GCTR,
2716                 .flow_mode = S_DIN_to_AES,
2717                 .auth_mode = DRV_HASH_NULL,
2718         }, 
2719 #endif /*SSI_CC_HAS_AES_GCM*/
2720 };
2721
2722 static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
2723 {
2724         struct ssi_crypto_alg *t_alg;
2725         struct aead_alg *alg;
2726
2727         t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
2728         if (!t_alg) {
2729                 SSI_LOG_ERR("failed to allocate t_alg\n");
2730                 return ERR_PTR(-ENOMEM);
2731         }
2732         alg = &template->template_aead;
2733
2734         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2735         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2736                  template->driver_name);
2737         alg->base.cra_module = THIS_MODULE;
2738         alg->base.cra_priority = SSI_CRA_PRIO;
2739
2740         alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
2741         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2742                          template->type;
2743         alg->init = ssi_aead_init;
2744         alg->exit = ssi_aead_exit;
2745
2746         t_alg->aead_alg = *alg;
2747
2748         t_alg->cipher_mode = template->cipher_mode;
2749         t_alg->flow_mode = template->flow_mode;
2750         t_alg->auth_mode = template->auth_mode;
2751
2752         return t_alg;
2753 }
2754
2755 int ssi_aead_free(struct ssi_drvdata *drvdata)
2756 {
2757         struct ssi_crypto_alg *t_alg, *n;
2758         struct ssi_aead_handle *aead_handle =
2759                 (struct ssi_aead_handle *)drvdata->aead_handle;
2760
2761         if (aead_handle != NULL) {
2762                 /* Remove registered algs */
2763                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2764                         crypto_unregister_aead(&t_alg->aead_alg);
2765                         list_del(&t_alg->entry);
2766                         kfree(t_alg);
2767                 }
2768                 kfree(aead_handle);
2769                 drvdata->aead_handle = NULL;
2770         }
2771
2772         return 0;
2773 }
2774
2775 int ssi_aead_alloc(struct ssi_drvdata *drvdata)
2776 {
2777         struct ssi_aead_handle *aead_handle;
2778         struct ssi_crypto_alg *t_alg;
2779         int rc = -ENOMEM;
2780         int alg;
2781
2782         aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
2783         if (aead_handle == NULL) {
2784                 rc = -ENOMEM;
2785                 goto fail0;
2786         }
2787
2788         drvdata->aead_handle = aead_handle;
2789
2790         aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
2791                 drvdata, MAX_HMAC_DIGEST_SIZE);
2792         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2793                 SSI_LOG_ERR("SRAM pool exhausted\n");
2794                 rc = -ENOMEM;
2795                 goto fail1;
2796         }
2797
2798         INIT_LIST_HEAD(&aead_handle->aead_list);
2799
2800         /* Linux crypto */
2801         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2802                 t_alg = ssi_aead_create_alg(&aead_algs[alg]);
2803                 if (IS_ERR(t_alg)) {
2804                         rc = PTR_ERR(t_alg);
2805                         SSI_LOG_ERR("%s alg allocation failed\n",
2806                                  aead_algs[alg].driver_name);
2807                         goto fail1;
2808                 }
2809                 t_alg->drvdata = drvdata;
2810                 rc = crypto_register_aead(&t_alg->aead_alg);
2811                 if (unlikely(rc != 0)) {
2812                         SSI_LOG_ERR("%s alg registration failed\n",
2813                                 t_alg->aead_alg.base.cra_driver_name);
2814                         goto fail2;
2815                 } else {
2816                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2817                         SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
2818                 }
2819         }
2820
2821         return 0;
2822
2823 fail2:
2824         kfree(t_alg);
2825 fail1:
2826         ssi_aead_free(drvdata);
2827 fail0:
2828         return rc;
2829 }
2830
2831
2832