From cf932f0e99222cc325f0badc1132439d6f278ebe Mon Sep 17 00:00:00 2001 From: Steve Cornelius Date: Fri, 22 Jun 2012 16:32:08 -0700 Subject: [PATCH] ENGR00215228-6: Externalize scatter-gather handling for multiple API modules. Moved scatter-gather list management outside of single API module in anticipation of multiple API modules which may be switch selectable. This includes a number of list management optimizations, as well as some aead descriptor optimizations. Signed-off-by: Steve Cornelius --- drivers/crypto/caam/caamalg.c | 731 +++++++++++++++++----------------- 1 file changed, 362 insertions(+), 369 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 549d52675786..49702b2586fa 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -50,6 +50,8 @@ #include "desc_constr.h" #include "jr.h" #include "error.h" +#include "sg_sw_sec4.h" +#include "key_gen.h" /* * crypto alg @@ -202,7 +204,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, { u32 *key_jump_cmd; - init_sh_desc(desc, HDR_SHARE_WAIT); + init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | @@ -458,126 +460,12 @@ static int aead_setauthsize(struct crypto_aead *authenc, return 0; } -struct split_key_result { - struct completion completion; - int err; -}; - -static void split_key_done(struct device *dev, u32 *desc, u32 err, - void *context) -{ - struct split_key_result *res = context; - -#ifdef DEBUG - dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); -#endif - - if (err) { - char tmp[CAAM_ERROR_STR_MAX]; - - dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); - } - - res->err = err; - - complete(&res->completion); -} - -/* -get a split ipad/opad key - -Split key generation----------------------------------------------- - -[00] 0xb0810008 jobdesc: stidx=1 share=never len=8 -[01] 0x04000014 key: class2->keyreg len=20 - @0xffe01000 -[03] 0x84410014 operation: cls2-op sha1 hmac init dec -[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm -[05] 0xa4000001 jump: class2 local all ->1 [06] -[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 - @0xffe04000 -*/ -static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) +static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, + u32 authkeylen) { - struct device *jrdev = ctx->jrdev; - u32 *desc; - struct split_key_result result; - dma_addr_t dma_addr_in, dma_addr_out; - int ret = 0; - - desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); - - init_job_desc(desc, 0); - - dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_in)) { - dev_err(jrdev, "unable to map key input memory\n"); - kfree(desc); - return -ENOMEM; - } - dma_sync_single_for_device(jrdev, dma_addr_in, authkeylen, - DMA_TO_DEVICE); - - append_key(desc, dma_addr_in, authkeylen, CLASS_2 | - KEY_DEST_CLASS_REG); - - /* Sets MDHA up into an HMAC-INIT */ - append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | - OP_ALG_AS_INIT); - - /* - * do a FIFO_LOAD of zero, this will trigger the internal key expansion - into both pads inside MDHA - */ - append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); - - /* - * FIFO_STORE with the explicit split-key content store - * (0x26 output type) - */ - dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_out)) { - dev_err(jrdev, "unable to map key output memory\n"); - kfree(desc); - return -ENOMEM; - } - append_fifo_store(desc, dma_addr_out, ctx->split_key_len, - LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); -#endif - - result.err = 0; - init_completion(&result.completion); - - ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); - if (!ret) { - /* in progress */ - wait_for_completion_interruptible(&result.completion); - ret = result.err; -#ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, - ctx->split_key_pad_len, 1); -#endif - } - - dma_sync_single_for_cpu(jrdev, dma_addr_out, ctx->split_key_pad_len, - DMA_FROM_DEVICE); - dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, - DMA_FROM_DEVICE); - dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); - - kfree(desc); - - return ret; + return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, + ctx->split_key_pad_len, key_in, authkeylen, + ctx->alg_op); } static int aead_setkey(struct crypto_aead *aead, @@ -621,7 +509,7 @@ static int aead_setkey(struct crypto_aead *aead, DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); #endif - ret = gen_split_key(ctx, key, authkeylen); + ret = gen_split_aead_key(ctx, key, authkeylen); if (ret) { goto badkey; } @@ -777,31 +665,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return ret; } -/* - * 16-byte hardware scatter/gather table - * An 8-byte table exists in the hardware spec, but has never been - * implemented to date. The 8/16 option is selected at RTL-compile-time. - * and this selection is visible in the Compile Time Parameters Register - */ - -#define LINKTBL_EXT 0x80000000 /* Entry points to table */ -#define LINKTBL_FINAL 0x40000000 /* Last ent in table */ -#define LINKTBL_BPID_MASK 0x000000ff -#define LINKTBL_BPID_SHIFT 16 -#define LINKTBL_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ -#define LINKTBL_OFFS_MASK 0x00001fff - -struct link_tbl_entry { -#ifdef CONFIG_64BIT - u64 ptr; -#else - u32 reserved; - u32 ptr; -#endif - u32 len; - u32 bpid_offset; /* BPID in high, offset in lowest bits */ -}; - /* * aead_edesc - s/w-extended aead descriptor * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist @@ -809,18 +672,21 @@ struct link_tbl_entry { * @dst_nents: number of segments in output scatterlist * @iv_dma: dma address of iv for checking continuity and link table * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) - * @link_tbl_bytes: length of dma mapped link_tbl space - * @link_tbl_dma: bus physical mapped address of h/w link table + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables */ struct aead_edesc { int assoc_nents; + bool assoc_chained; int src_nents; + bool src_chained; int dst_nents; + bool dst_chained; dma_addr_t iv_dma; - int link_tbl_bytes; - dma_addr_t link_tbl_dma; - struct link_tbl_entry *link_tbl; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; u32 hw_desc[0]; }; @@ -830,39 +696,46 @@ struct aead_edesc { * @dst_nents: number of segments in output scatterlist * @iv_dma: dma address of iv for checking continuity and link table * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) - * @link_tbl_bytes: length of dma mapped link_tbl space - * @link_tbl_dma: bus physical mapped address of h/w link table + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @sec4_sg_dma: bus physical mapped address of h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables */ struct ablkcipher_edesc { int src_nents; + bool src_chained; int dst_nents; + bool dst_chained; dma_addr_t iv_dma; - int link_tbl_bytes; - dma_addr_t link_tbl_dma; - struct link_tbl_entry *link_tbl; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; u32 hw_desc[0]; }; static void caam_unmap(struct device *dev, struct scatterlist *src, - struct scatterlist *dst, int src_nents, int dst_nents, - dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, - int link_tbl_bytes) + struct scatterlist *dst, int src_nents, + bool src_chained, int dst_nents, bool dst_chained, + dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + int sec4_sg_bytes) { - if (unlikely(dst != src)) { - dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_sync_sg_for_cpu(dev, dst, dst_nents, DMA_FROM_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst != src) { + dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, + src_chained); + dma_sync_sg_for_cpu(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE); + dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, + dst_chained); } else { - dma_sync_sg_for_cpu(dev, src, src_nents, DMA_BIDIRECTIONAL); - dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + dma_sync_sg_for_cpu(dev, src, src_nents ? : 1, + DMA_BIDIRECTIONAL); + dma_unmap_sg_chained(dev, src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); } if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - if (link_tbl_bytes) - dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, + if (sec4_sg_bytes) + dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); } @@ -873,12 +746,13 @@ static void aead_unmap(struct device *dev, struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); - dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); + dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, + DMA_TO_DEVICE, edesc->assoc_chained); caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->link_tbl_dma, - edesc->link_tbl_bytes); + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void ablkcipher_unmap(struct device *dev, @@ -889,9 +763,9 @@ static void ablkcipher_unmap(struct device *dev, int ivsize = crypto_ablkcipher_ivsize(ablkcipher); caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->link_tbl_dma, - edesc->link_tbl_bytes); + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, @@ -982,7 +856,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, sizeof(struct iphdr) + req->assoclen + ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + ctx->authsize + 36, 1); - if (!err && edesc->link_tbl_bytes) { + if (!err && edesc->sec4_sg_bytes) { struct scatterlist *sg = sg_last(req->src, edesc->src_nents); print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), @@ -1066,53 +940,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_request_complete(req, err); } -static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, - dma_addr_t dma, u32 len, u32 offset) -{ -#ifndef CONFIG_64BIT - link_tbl_ptr->reserved = 0; /* ensure high half is zero */ -#endif - link_tbl_ptr->ptr = dma; - link_tbl_ptr->len = (len & LINKTBL_LEN_MASK); - /* Does not add in buffer pool ID's at this time */ - link_tbl_ptr->bpid_offset = (offset & LINKTBL_OFFS_MASK); - -#ifdef DEBUG - print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, - sizeof(struct link_tbl_entry), 1); -#endif -} - -/* - * convert scatterlist to h/w link table format - * but does not have final bit; instead, returns last entry - */ -static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, - int sg_count, struct link_tbl_entry - *link_tbl_ptr, u32 offset) -{ - while (sg_count) { - sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), - sg_dma_len(sg), offset); - link_tbl_ptr++; - sg = sg_next(sg); - sg_count--; - } - return link_tbl_ptr - 1; -} - -/* - * convert scatterlist to h/w link table format - * scatterlist must have been previously dma mapped - */ -static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, - struct link_tbl_entry *link_tbl_ptr, u32 offset) -{ - link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); - link_tbl_ptr->len |= LINKTBL_FINAL; -} - /* * Fill in aead job descriptor */ @@ -1128,7 +955,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, u32 *desc = edesc->hw_desc; u32 out_options = 0, in_options; dma_addr_t dst_dma, src_dma; - int len, link_tbl_index = 0; + int len, sec4_sg_index = 0; #ifdef DEBUG debug("assoclen %d cryptlen %d authsize %d\n", @@ -1154,8 +981,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, src_dma = sg_dma_address(req->assoc); in_options = 0; } else { - src_dma = edesc->link_tbl_dma; - link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + (edesc->src_nents ? : 1); in_options = LDST_SGF; } @@ -1170,7 +997,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, if (all_contig) { dst_dma = sg_dma_address(req->src); } else { - dst_dma = src_dma + sizeof(struct link_tbl_entry) * + dst_dma = src_dma + sizeof(struct sec4_sg_entry) * ((edesc->assoc_nents ? : 1) + 1); out_options = LDST_SGF; } @@ -1178,9 +1005,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, if (!edesc->dst_nents) { dst_dma = sg_dma_address(req->dst); } else { - dst_dma = edesc->link_tbl_dma + - link_tbl_index * - sizeof(struct link_tbl_entry); + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } @@ -1206,7 +1033,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, u32 *desc = edesc->hw_desc; u32 out_options = 0, in_options; dma_addr_t dst_dma, src_dma; - int len, link_tbl_index = 0; + int len, sec4_sg_index = 0; #ifdef DEBUG debug("assoclen %d cryptlen %d authsize %d\n", @@ -1231,8 +1058,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, src_dma = sg_dma_address(req->assoc); in_options = 0; } else { - src_dma = edesc->link_tbl_dma; - link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; in_options = LDST_SGF; } append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + @@ -1242,13 +1069,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, dst_dma = edesc->iv_dma; } else { if (likely(req->src == req->dst)) { - dst_dma = src_dma + sizeof(struct link_tbl_entry) * + dst_dma = src_dma + sizeof(struct sec4_sg_entry) * edesc->assoc_nents; out_options = LDST_SGF; } else { - dst_dma = edesc->link_tbl_dma + - link_tbl_index * - sizeof(struct link_tbl_entry); + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } @@ -1269,7 +1096,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, u32 *desc = edesc->hw_desc; u32 out_options = 0, in_options; dma_addr_t dst_dma, src_dma; - int len, link_tbl_index = 0; + int len, sec4_sg_index = 0; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", @@ -1287,8 +1114,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, src_dma = edesc->iv_dma; in_options = 0; } else { - src_dma = edesc->link_tbl_dma; - link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents; in_options = LDST_SGF; } append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); @@ -1297,44 +1124,22 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, if (!edesc->src_nents && iv_contig) { dst_dma = sg_dma_address(req->src); } else { - dst_dma = edesc->link_tbl_dma + - sizeof(struct link_tbl_entry); + dst_dma = edesc->sec4_sg_dma + + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } else { if (!edesc->dst_nents) { dst_dma = sg_dma_address(req->dst); } else { - dst_dma = edesc->link_tbl_dma + - link_tbl_index * sizeof(struct link_tbl_entry); + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); } -/* - * derive number of elements in scatterlist - */ -static int sg_count(struct scatterlist *sg_list, int nbytes) -{ - struct scatterlist *sg = sg_list; - int sg_nents = 0; - - while (nbytes > 0) { - sg_nents++; - nbytes -= sg->length; - if (!sg_is_last(sg) && (sg + 1)->length == 0) - BUG(); /* Not support chaining */ - sg = scatterwalk_sg_next(sg); - } - - if (likely(sg_nents == 1)) - return 0; - - return sg_nents; -} - /* * allocate and map the aead extended descriptor */ @@ -1351,31 +1156,32 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dma_addr_t iv_dma = 0; int sgc; bool all_contig = true; + bool assoc_chained = false, src_chained = false, dst_chained = false; int ivsize = crypto_aead_ivsize(aead); - int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - assoc_nents = sg_count(req->assoc, req->assoclen); - src_nents = sg_count(req->src, req->cryptlen); + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen); + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL, assoc_chained); dma_sync_sg_for_device(jrdev, req->assoc, sgc, DMA_BIDIRECTIONAL); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_BIDIRECTIONAL); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* Check if data are contiguous */ @@ -1386,53 +1192,56 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, all_contig = false; assoc_nents = assoc_nents ? : 1; src_nents = src_nents ? : 1; - link_tbl_len = assoc_nents + 1 + src_nents; + sec4_sg_len = assoc_nents + 1 + src_nents; } - link_tbl_len += dst_nents; + sec4_sg_len += dst_nents; - link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - link_tbl_bytes, GFP_DMA | flags); + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; edesc->iv_dma = iv_dma; - edesc->link_tbl_bytes = link_tbl_bytes; - edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; - edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, - link_tbl_bytes, DMA_TO_DEVICE); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); *all_contig_ptr = all_contig; - link_tbl_index = 0; + sec4_sg_index = 0; if (!all_contig) { - sg_to_link_tbl(req->assoc, - (assoc_nents ? : 1), - edesc->link_tbl + - link_tbl_index, 0); - link_tbl_index += assoc_nents ? : 1; - sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + sg_to_sec4_sg(req->assoc, + (assoc_nents ? : 1), + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents ? : 1; + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, iv_dma, ivsize, 0); - link_tbl_index += 1; - sg_to_link_tbl_last(req->src, - (src_nents ? : 1), - edesc->link_tbl + - link_tbl_index, 0); - link_tbl_index += src_nents ? : 1; + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->src, + (src_nents ? : 1), + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += src_nents ? : 1; } if (dst_nents) { - sg_to_link_tbl_last(req->dst, dst_nents, - edesc->link_tbl + link_tbl_index, 0); + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); } - dma_sync_single_for_device(jrdev, edesc->link_tbl_dma, link_tbl_bytes, + dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); return edesc; @@ -1539,30 +1348,31 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request int sgc; u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; int ivsize = crypto_aead_ivsize(aead); - int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; + bool assoc_chained = false, src_chained = false, dst_chained = false; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - assoc_nents = sg_count(req->assoc, req->assoclen); - src_nents = sg_count(req->src, req->cryptlen); + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen); + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL, assoc_chained); dma_sync_sg_for_device(jrdev, req->assoc, assoc_nents ? : 1, DMA_BIDIRECTIONAL); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1, DMA_BIDIRECTIONAL); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); dma_sync_sg_for_device(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + DMA_TO_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* Check if data are contiguous */ @@ -1574,62 +1384,65 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request contig &= ~GIV_DST_CONTIG; if (unlikely(req->src != req->dst)) { dst_nents = dst_nents ? : 1; - link_tbl_len += 1; + sec4_sg_len += 1; } if (!(contig & GIV_SRC_CONTIG)) { assoc_nents = assoc_nents ? : 1; src_nents = src_nents ? : 1; - link_tbl_len += assoc_nents + 1 + src_nents; + sec4_sg_len += assoc_nents + 1 + src_nents; if (likely(req->src == req->dst)) contig &= ~GIV_DST_CONTIG; } - link_tbl_len += dst_nents; + sec4_sg_len += dst_nents; - link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - link_tbl_bytes, GFP_DMA | flags); + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; edesc->iv_dma = iv_dma; - edesc->link_tbl_bytes = link_tbl_bytes; - edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; - edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, - link_tbl_bytes, DMA_TO_DEVICE); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); *contig_ptr = contig; - link_tbl_index = 0; + sec4_sg_index = 0; if (!(contig & GIV_SRC_CONTIG)) { - sg_to_link_tbl(req->assoc, assoc_nents, - edesc->link_tbl + - link_tbl_index, 0); - link_tbl_index += assoc_nents; - sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + sg_to_sec4_sg(req->assoc, assoc_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents; + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, iv_dma, ivsize, 0); - link_tbl_index += 1; - sg_to_link_tbl_last(req->src, src_nents, - edesc->link_tbl + - link_tbl_index, 0); - link_tbl_index += src_nents; + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += src_nents; } if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { - sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, iv_dma, ivsize, 0); - link_tbl_index += 1; - sg_to_link_tbl_last(req->dst, dst_nents, - edesc->link_tbl + link_tbl_index, 0); + sec4_sg_index += 1; + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); } - dma_sync_single_for_device(jrdev, edesc->link_tbl_dma, link_tbl_bytes, + dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); return edesc; @@ -1695,30 +1508,31 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - int src_nents, dst_nents = 0, link_tbl_bytes; + int src_nents, dst_nents = 0, sec4_sg_bytes; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma = 0; bool iv_contig = false; int sgc; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - int link_tbl_index; + bool src_chained = false, dst_chained = false; + int sec4_sg_index; - src_nents = sg_count(req->src, req->nbytes); + src_nents = sg_count(req->src, req->nbytes, &dst_chained); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->nbytes); + if (req->dst != req->src) + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_BIDIRECTIONAL); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); dma_sync_sg_for_device(jrdev, req->src, sgc, DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* FIXME: no test for sgc values returned above... */ @@ -1732,47 +1546,49 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request iv_contig = true; else src_nents = src_nents ? : 1; - link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * - sizeof(struct link_tbl_entry); + sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * + sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + - link_tbl_bytes, GFP_DMA | flags); + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; - edesc->link_tbl_bytes = link_tbl_bytes; - edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + + edesc->dst_chained = dst_chained; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; - link_tbl_index = 0; + sec4_sg_index = 0; if (!iv_contig) { - sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); - sg_to_link_tbl_last(req->src, src_nents, - edesc->link_tbl + 1, 0); - link_tbl_index += 1 + src_nents; + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + 1, 0); + sec4_sg_index += 1 + src_nents; } - if (unlikely(dst_nents)) { - sg_to_link_tbl_last(req->dst, dst_nents, - edesc->link_tbl + link_tbl_index, 0); + if (dst_nents) { + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); } - edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, - link_tbl_bytes, DMA_TO_DEVICE); + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); edesc->iv_dma = iv_dma; - dma_sync_single_for_device(jrdev, edesc->link_tbl_dma, link_tbl_bytes, + dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, - link_tbl_bytes, 1); + print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); #endif *iv_contig_out = iv_contig; @@ -1875,6 +1691,25 @@ struct caam_alg_template { static struct caam_alg_template driver_algs[] = { /* single-pass ipsec_esp descriptor */ + { + .name = "authenc(hmac(md5),cbc(aes))", + .driver_name = "authenc-hmac-md5-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha1),cbc(aes))", .driver_name = "authenc-hmac-sha1-cbc-aes-caam", @@ -1894,6 +1729,26 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha224),cbc(aes))", + .driver_name = "authenc-hmac-sha224-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha256),cbc(aes))", .driver_name = "authenc-hmac-sha256-cbc-aes-caam", @@ -1914,6 +1769,26 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha384),cbc(aes))", + .driver_name = "authenc-hmac-sha384-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha512),cbc(aes))", .driver_name = "authenc-hmac-sha512-cbc-aes-caam", @@ -1934,6 +1809,25 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(md5),cbc(des3_ede))", + .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha1),cbc(des3_ede))", .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", @@ -1953,6 +1847,26 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha224),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha256),cbc(des3_ede))", .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", @@ -1973,6 +1887,26 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha384),cbc(des3_ede))", + .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha512),cbc(des3_ede))", .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", @@ -1993,6 +1927,25 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(md5),cbc(des))", + .driver_name = "authenc-hmac-md5-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha1),cbc(des))", .driver_name = "authenc-hmac-sha1-cbc-des-caam", @@ -2012,6 +1965,26 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha224),cbc(des))", + .driver_name = "authenc-hmac-sha224-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha256),cbc(des))", .driver_name = "authenc-hmac-sha256-cbc-des-caam", @@ -2032,6 +2005,26 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, }, + { + .name = "authenc(hmac(sha384),cbc(des))", + .driver_name = "authenc-hmac-sha384-cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, + .geniv = "", + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, { .name = "authenc(hmac(sha512),cbc(des))", .driver_name = "authenc-hmac-sha512-cbc-des-caam", -- 2.39.5