2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
37 #define DUMP_SGL(sg) \
39 SSI_LOG_DEBUG("page=%p offset=%u length=%u (dma_len=%u) " \
40 "dma_addr=%08x\n", sg_page(sg), (sg)->offset, \
41 (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
44 #define DUMP_MLLI_TABLE(mlli_p, nents) \
46 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
48 SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
49 (mlli_p)[LLI_WORD0_OFFSET], \
50 (mlli_p)[LLI_WORD1_OFFSET]); \
51 (mlli_p) += LLI_ENTRY_WORD_SIZE; \
54 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
55 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
56 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
57 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
59 #define DX_BUFFER_MGR_DUMP_SGL(sg)
60 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
61 #define GET_DMA_BUFFER_TYPE(buff_type)
65 enum dma_buffer_type {
71 struct buff_mgr_handle {
72 struct dma_pool *mlli_buffs_pool;
75 union buffer_array_entry {
76 struct scatterlist *sgl;
77 dma_addr_t buffer_dma;
81 unsigned int num_of_buffers;
82 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
83 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
84 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
85 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
86 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
87 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
88 u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
92 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
95 * @nbytes: [IN] Total SGL data bytes.
96 * @lbytes: [OUT] Returns the amount of bytes at the last entry
98 static unsigned int ssi_buffer_mgr_get_sgl_nents(
99 struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
101 unsigned int nents = 0;
102 while (nbytes != 0) {
103 if (sg_is_chain(sg_list)) {
104 SSI_LOG_ERR("Unexpected chanined entry "
105 "in sg (entry =0x%X) \n", nents);
108 if (sg_list->length != 0) {
110 /* get the number of bytes in the last entry */
112 nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
113 sg_list = sg_next(sg_list);
115 sg_list = (struct scatterlist *)sg_page(sg_list);
116 if (is_chained != NULL) {
121 SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
126 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
130 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
132 struct scatterlist *current_sg = sgl;
135 while (sg_index <= data_len) {
136 if (current_sg == NULL) {
137 /* reached the end of the sgl --> just return back */
140 memset(sg_virt(current_sg), 0, current_sg->length);
141 sg_index += current_sg->length;
142 current_sg = sg_next(current_sg);
147 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
148 * from to_skip to end, to dest and vice versa
156 void ssi_buffer_mgr_copy_scatterlist_portion(
157 u8 *dest, struct scatterlist *sg,
158 u32 to_skip, u32 end,
159 enum ssi_sg_cpy_direct direct)
163 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
164 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
167 static inline int ssi_buffer_mgr_render_buff_to_mlli(
168 dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
171 u32 *mlli_entry_p = *mlli_entry_pp;
174 /* Verify there is no memory overflow*/
175 new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
176 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
180 /*handle buffer longer than 64 kbytes */
181 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
182 cc_lli_set_addr(mlli_entry_p, buff_dma);
183 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
184 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
185 mlli_entry_p[LLI_WORD0_OFFSET],
186 mlli_entry_p[LLI_WORD1_OFFSET]);
187 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
188 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
189 mlli_entry_p = mlli_entry_p + 2;
193 cc_lli_set_addr(mlli_entry_p, buff_dma);
194 cc_lli_set_size(mlli_entry_p, buff_size);
195 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
196 mlli_entry_p[LLI_WORD0_OFFSET],
197 mlli_entry_p[LLI_WORD1_OFFSET]);
198 mlli_entry_p = mlli_entry_p + 2;
199 *mlli_entry_pp = mlli_entry_p;
205 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
206 struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
209 struct scatterlist *curr_sgl = sgl;
210 u32 *mlli_entry_p = *mlli_entry_pp;
213 for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
214 curr_sgl = sg_next(curr_sgl)) {
216 (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
217 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
218 sgl_data_len -= entry_data_len;
219 rc = ssi_buffer_mgr_render_buff_to_mlli(
220 sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
227 *mlli_entry_pp = mlli_entry_p;
231 static int ssi_buffer_mgr_generate_mlli(
233 struct buffer_array *sg_data,
234 struct mlli_params *mlli_params)
237 u32 total_nents = 0,prev_total_nents = 0;
240 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
242 /* Allocate memory from the pointed pool */
243 mlli_params->mlli_virt_addr = dma_pool_alloc(
244 mlli_params->curr_pool, GFP_KERNEL,
245 &(mlli_params->mlli_dma_addr));
246 if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
247 SSI_LOG_ERR("dma_pool_alloc() failed\n");
249 goto build_mlli_exit;
251 /* Point to start of MLLI */
252 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
253 /* go over all SG's and link it to one MLLI table */
254 for (i = 0; i < sg_data->num_of_buffers; i++) {
255 if (sg_data->type[i] == DMA_SGL_TYPE)
256 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
257 sg_data->entry[i].sgl,
258 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
260 else /*DMA_BUFF_TYPE*/
261 rc = ssi_buffer_mgr_render_buff_to_mlli(
262 sg_data->entry[i].buffer_dma,
263 sg_data->total_data_len[i], &total_nents,
269 /* set last bit in the current table */
270 if (sg_data->mlli_nents[i] != NULL) {
271 /*Calculate the current MLLI table length for the
272 *length field in the descriptor
274 *(sg_data->mlli_nents[i]) +=
275 (total_nents - prev_total_nents);
276 prev_total_nents = total_nents;
280 /* Set MLLI size for the bypass operation */
281 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
283 SSI_LOG_DEBUG("MLLI params: "
284 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
285 mlli_params->mlli_virt_addr,
286 (unsigned long long)mlli_params->mlli_dma_addr,
287 mlli_params->mlli_len);
293 static inline void ssi_buffer_mgr_add_buffer_entry(
294 struct buffer_array *sgl_data,
295 dma_addr_t buffer_dma, unsigned int buffer_len,
296 bool is_last_entry, u32 *mlli_nents)
298 unsigned int index = sgl_data->num_of_buffers;
300 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
301 "buffer_len=0x%08X is_last=%d\n",
302 index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
303 sgl_data->nents[index] = 1;
304 sgl_data->entry[index].buffer_dma = buffer_dma;
305 sgl_data->offset[index] = 0;
306 sgl_data->total_data_len[index] = buffer_len;
307 sgl_data->type[index] = DMA_BUFF_TYPE;
308 sgl_data->is_last[index] = is_last_entry;
309 sgl_data->mlli_nents[index] = mlli_nents;
310 if (sgl_data->mlli_nents[index] != NULL)
311 *sgl_data->mlli_nents[index] = 0;
312 sgl_data->num_of_buffers++;
315 static inline void ssi_buffer_mgr_add_scatterlist_entry(
316 struct buffer_array *sgl_data,
318 struct scatterlist *sgl,
319 unsigned int data_len,
320 unsigned int data_offset,
324 unsigned int index = sgl_data->num_of_buffers;
326 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
327 index, nents, sgl, data_len, is_last_table);
328 sgl_data->nents[index] = nents;
329 sgl_data->entry[index].sgl = sgl;
330 sgl_data->offset[index] = data_offset;
331 sgl_data->total_data_len[index] = data_len;
332 sgl_data->type[index] = DMA_SGL_TYPE;
333 sgl_data->is_last[index] = is_last_table;
334 sgl_data->mlli_nents[index] = mlli_nents;
335 if (sgl_data->mlli_nents[index] != NULL)
336 *sgl_data->mlli_nents[index] = 0;
337 sgl_data->num_of_buffers++;
341 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
342 enum dma_data_direction direction)
345 struct scatterlist *l_sg = sg;
346 for (i = 0; i < nents; i++) {
350 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
351 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
354 l_sg = sg_next(l_sg);
359 /* Restore mapped parts */
360 for (j = 0; j < i; j++) {
364 dma_unmap_sg(dev,sg,1,direction);
370 static int ssi_buffer_mgr_map_scatterlist(
371 struct device *dev, struct scatterlist *sg,
372 unsigned int nbytes, int direction,
373 u32 *nents, u32 max_sg_nents,
374 u32 *lbytes, u32 *mapped_nents)
376 bool is_chained = false;
378 if (sg_is_last(sg)) {
379 /* One entry only case -set to DLLI */
380 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
381 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
384 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
385 "page=%p addr=%pK offset=%u "
387 (unsigned long long)sg_dma_address(sg),
390 sg->offset, sg->length);
394 } else { /*sg_is_last*/
395 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
397 if (*nents > max_sg_nents) {
399 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
400 *nents, max_sg_nents);
404 /* In case of mmu the number of mapped nents might
405 * be changed from the original sgl nents
407 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
408 if (unlikely(*mapped_nents == 0)){
410 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
414 /*In this case the driver maps entry by entry so it
415 * must have the same nents before and after map
417 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
421 if (unlikely(*mapped_nents != *nents)){
422 *nents = *mapped_nents;
423 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
433 ssi_aead_handle_config_buf(struct device *dev,
434 struct aead_req_ctx *areq_ctx,
436 struct buffer_array *sg_data,
437 unsigned int assoclen)
439 SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
440 /* create sg for the current buffer */
441 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
442 if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
443 DMA_TO_DEVICE) != 1)) {
444 SSI_LOG_ERR("dma_map_sg() "
445 "config buffer failed\n");
448 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
450 "offset=%u length=%u\n",
451 (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
452 sg_page(&areq_ctx->ccm_adata_sg),
453 sg_virt(&areq_ctx->ccm_adata_sg),
454 areq_ctx->ccm_adata_sg.offset,
455 areq_ctx->ccm_adata_sg.length);
456 /* prepare for case of MLLI */
458 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
459 &areq_ctx->ccm_adata_sg,
461 areq_ctx->ccm_hdr_size), 0,
468 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
469 struct ahash_req_ctx *areq_ctx,
472 struct buffer_array *sg_data)
474 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
475 /* create sg for the current buffer */
476 sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
477 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
478 DMA_TO_DEVICE) != 1)) {
479 SSI_LOG_ERR("dma_map_sg() "
480 "src buffer failed\n");
483 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
485 "offset=%u length=%u\n",
486 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
487 sg_page(areq_ctx->buff_sg),
488 sg_virt(areq_ctx->buff_sg),
489 areq_ctx->buff_sg->offset,
490 areq_ctx->buff_sg->length);
491 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
492 areq_ctx->curr_sg = areq_ctx->buff_sg;
493 areq_ctx->in_nents = 0;
494 /* prepare for case of MLLI */
495 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
496 curr_buff_cnt, 0, false, NULL);
500 void ssi_buffer_mgr_unmap_blkcipher_request(
504 struct scatterlist *src,
505 struct scatterlist *dst)
507 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
509 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
510 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
511 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
513 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
515 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
519 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
520 dma_pool_free(req_ctx->mlli_params.curr_pool,
521 req_ctx->mlli_params.mlli_virt_addr,
522 req_ctx->mlli_params.mlli_dma_addr);
525 dma_unmap_sg(dev, src, req_ctx->in_nents,
527 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
531 dma_unmap_sg(dev, dst, req_ctx->out_nents,
533 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
538 int ssi_buffer_mgr_map_blkcipher_request(
539 struct ssi_drvdata *drvdata,
544 struct scatterlist *src,
545 struct scatterlist *dst)
547 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
548 struct mlli_params *mlli_params = &req_ctx->mlli_params;
549 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
550 struct device *dev = &drvdata->plat_dev->dev;
551 struct buffer_array sg_data;
554 u32 mapped_nents = 0;
556 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
557 mlli_params->curr_pool = NULL;
558 sg_data.num_of_buffers = 0;
561 if (likely(ivsize != 0) ) {
562 dump_byte_array("iv", (u8 *)info, ivsize);
563 req_ctx->gen_ctx.iv_dma_addr =
564 dma_map_single(dev, (void *)info,
566 req_ctx->is_giv ? DMA_BIDIRECTIONAL:
568 if (unlikely(dma_mapping_error(dev,
569 req_ctx->gen_ctx.iv_dma_addr))) {
570 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
571 "for DMA failed\n", ivsize, info);
574 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
576 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
578 req_ctx->gen_ctx.iv_dma_addr = 0;
580 /* Map the src SGL */
581 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
582 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
583 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
584 if (unlikely(rc != 0)) {
586 goto ablkcipher_exit;
588 if (mapped_nents > 1)
589 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
591 if (unlikely(src == dst)) {
592 /* Handle inplace operation */
593 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
594 req_ctx->out_nents = 0;
595 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
596 req_ctx->in_nents, src,
597 nbytes, 0, true, &req_ctx->in_mlli_nents);
601 if (unlikely(ssi_buffer_mgr_map_scatterlist(
603 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
604 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
607 goto ablkcipher_exit;
609 if (mapped_nents > 1)
610 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
612 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
613 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
614 req_ctx->in_nents, src,
616 &req_ctx->in_mlli_nents);
617 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
618 req_ctx->out_nents, dst,
620 &req_ctx->out_mlli_nents);
624 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
625 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
626 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
627 if (unlikely(rc!= 0))
628 goto ablkcipher_exit;
632 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
633 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
638 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
642 void ssi_buffer_mgr_unmap_aead_request(
643 struct device *dev, struct aead_request *req)
645 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
646 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
647 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
650 u32 size_to_unmap = 0;
652 if (areq_ctx->mac_buf_dma_addr != 0) {
653 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
654 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
657 #if SSI_CC_HAS_AES_GCM
658 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
659 if (areq_ctx->hkey_dma_addr != 0) {
660 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
661 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
664 if (areq_ctx->gcm_block_len_dma_addr != 0) {
665 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
666 AES_BLOCK_SIZE, DMA_TO_DEVICE);
669 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
670 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
671 AES_BLOCK_SIZE, DMA_TO_DEVICE);
674 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
675 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
676 AES_BLOCK_SIZE, DMA_TO_DEVICE);
681 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
682 if (areq_ctx->ccm_iv0_dma_addr != 0) {
683 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
684 AES_BLOCK_SIZE, DMA_TO_DEVICE);
687 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
689 if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
690 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
691 hw_iv_size, DMA_BIDIRECTIONAL);
694 /*In case a pool was set, a table was
695 *allocated and should be released
697 if (areq_ctx->mlli_params.curr_pool != NULL) {
698 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
699 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
700 areq_ctx->mlli_params.mlli_virt_addr);
701 dma_pool_free(areq_ctx->mlli_params.curr_pool,
702 areq_ctx->mlli_params.mlli_virt_addr,
703 areq_ctx->mlli_params.mlli_dma_addr);
706 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
707 size_to_unmap = req->assoclen+req->cryptlen;
708 if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
709 size_to_unmap += areq_ctx->req_authsize;
711 if (areq_ctx->is_gcm4543)
712 size_to_unmap += crypto_aead_ivsize(tfm);
714 dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
715 if (unlikely(req->src != req->dst)) {
716 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
718 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
722 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
723 likely(req->src == req->dst))
725 u32 size_to_skip = req->assoclen;
726 if (areq_ctx->is_gcm4543) {
727 size_to_skip += crypto_aead_ivsize(tfm);
729 /* copy mac to a temporary location to deal with possible
730 * data memory overriding that caused by cache coherence problem.
732 ssi_buffer_mgr_copy_scatterlist_portion(
733 areq_ctx->backup_mac, req->src,
734 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
735 size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
740 static inline int ssi_buffer_mgr_get_aead_icv_nents(
741 struct scatterlist *sgl,
742 unsigned int sgl_nents,
743 unsigned int authsize,
744 u32 last_entry_data_size,
745 bool *is_icv_fragmented)
747 unsigned int icv_max_size = 0;
748 unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
752 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
753 *is_icv_fragmented = false;
757 for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
765 icv_max_size = sgl->length;
768 if (last_entry_data_size > authsize) {
769 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
770 *is_icv_fragmented = false;
771 } else if (last_entry_data_size == authsize) {
772 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
773 *is_icv_fragmented = false;
774 } else if (icv_max_size > icv_required_size) {
776 *is_icv_fragmented = true;
777 } else if (icv_max_size == icv_required_size) {
779 *is_icv_fragmented = true;
781 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
782 MAX_ICV_NENTS_SUPPORTED);
783 nents = -1; /*unsupported*/
785 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
786 (*is_icv_fragmented ? "true" : "false"), nents);
791 static inline int ssi_buffer_mgr_aead_chain_iv(
792 struct ssi_drvdata *drvdata,
793 struct aead_request *req,
794 struct buffer_array *sg_data,
795 bool is_last, bool do_chain)
797 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
798 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
799 struct device *dev = &drvdata->plat_dev->dev;
802 if (unlikely(req->iv == NULL)) {
803 areq_ctx->gen_ctx.iv_dma_addr = 0;
807 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
808 hw_iv_size, DMA_BIDIRECTIONAL);
809 if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
810 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
811 hw_iv_size, req->iv);
816 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
818 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
819 if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
820 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
821 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
822 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
823 /* Chain to given list */
824 ssi_buffer_mgr_add_buffer_entry(
825 sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
826 iv_size_to_authenc, is_last,
827 &areq_ctx->assoc.mlli_nents);
828 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
835 static inline int ssi_buffer_mgr_aead_chain_assoc(
836 struct ssi_drvdata *drvdata,
837 struct aead_request *req,
838 struct buffer_array *sg_data,
839 bool is_last, bool do_chain)
841 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
843 u32 mapped_nents = 0;
844 struct scatterlist *current_sg = req->src;
845 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
846 unsigned int sg_index = 0;
847 u32 size_of_assoc = req->assoclen;
849 if (areq_ctx->is_gcm4543) {
850 size_of_assoc += crypto_aead_ivsize(tfm);
853 if (sg_data == NULL) {
855 goto chain_assoc_exit;
858 if (unlikely(req->assoclen == 0)) {
859 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
860 areq_ctx->assoc.nents = 0;
861 areq_ctx->assoc.mlli_nents = 0;
862 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
863 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
864 areq_ctx->assoc.nents);
865 goto chain_assoc_exit;
868 //iterate over the sgl to see how many entries are for associated data
869 //it is assumed that if we reach here , the sgl is already mapped
870 sg_index = current_sg->length;
871 if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
875 while (sg_index <= size_of_assoc) {
876 current_sg = sg_next(current_sg);
877 //if have reached the end of the sgl, then this is unexpected
878 if (current_sg == NULL) {
879 SSI_LOG_ERR("reached end of sg list. unexpected \n");
882 sg_index += current_sg->length;
886 if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
887 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
888 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
891 areq_ctx->assoc.nents = mapped_nents;
893 /* in CCM case we have additional entry for
894 * ccm header configurations
896 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
897 if (unlikely((mapped_nents + 1) >
898 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
900 SSI_LOG_ERR("CCM case.Too many fragments. "
901 "Current %d max %d\n",
902 (areq_ctx->assoc.nents + 1),
903 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
905 goto chain_assoc_exit;
909 if (likely(mapped_nents == 1) &&
910 (areq_ctx->ccm_hdr_size == ccm_header_size_null))
911 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
913 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
915 if (unlikely((do_chain == true) ||
916 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
918 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
919 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
920 areq_ctx->assoc.nents);
921 ssi_buffer_mgr_add_scatterlist_entry(
922 sg_data, areq_ctx->assoc.nents,
923 req->src, req->assoclen, 0, is_last,
924 &areq_ctx->assoc.mlli_nents);
925 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
932 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
933 struct aead_request *req,
934 u32 *src_last_bytes, u32 *dst_last_bytes)
936 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
937 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
938 unsigned int authsize = areq_ctx->req_authsize;
940 areq_ctx->is_icv_fragmented = false;
941 if (likely(req->src == req->dst)) {
943 areq_ctx->icv_dma_addr = sg_dma_address(
945 (*src_last_bytes - authsize);
946 areq_ctx->icv_virt_addr = sg_virt(
948 (*src_last_bytes - authsize);
949 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
950 /*NON-INPLACE and DECRYPT*/
951 areq_ctx->icv_dma_addr = sg_dma_address(
953 (*src_last_bytes - authsize);
954 areq_ctx->icv_virt_addr = sg_virt(
956 (*src_last_bytes - authsize);
958 /*NON-INPLACE and ENCRYPT*/
959 areq_ctx->icv_dma_addr = sg_dma_address(
961 (*dst_last_bytes - authsize);
962 areq_ctx->icv_virt_addr = sg_virt(
964 (*dst_last_bytes - authsize);
968 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
969 struct ssi_drvdata *drvdata,
970 struct aead_request *req,
971 struct buffer_array *sg_data,
972 u32 *src_last_bytes, u32 *dst_last_bytes,
975 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
976 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
977 unsigned int authsize = areq_ctx->req_authsize;
978 int rc = 0, icv_nents;
979 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
981 if (likely(req->src == req->dst)) {
983 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
984 areq_ctx->src.nents, areq_ctx->srcSgl,
985 areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
986 &areq_ctx->src.mlli_nents);
988 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
989 areq_ctx->src.nents, authsize, *src_last_bytes,
990 &areq_ctx->is_icv_fragmented);
991 if (unlikely(icv_nents < 0)) {
993 goto prepare_data_mlli_exit;
996 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
997 /* Backup happens only when ICV is fragmented, ICV
998 * verification is made by CPU compare in order to simplify
999 * MAC verification upon request completion
1001 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1003 /* In ACP platform we already copying ICV
1004 * for any INPLACE-DECRYPT operation, hence
1005 * we must neglect this code.
1007 u32 size_to_skip = req->assoclen;
1008 if (areq_ctx->is_gcm4543) {
1009 size_to_skip += crypto_aead_ivsize(tfm);
1011 ssi_buffer_mgr_copy_scatterlist_portion(
1012 areq_ctx->backup_mac, req->src,
1013 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1014 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1016 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1018 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1019 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1021 } else { /* Contig. ICV */
1022 /*Should hanlde if the sg is not contig.*/
1023 areq_ctx->icv_dma_addr = sg_dma_address(
1024 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1025 (*src_last_bytes - authsize);
1026 areq_ctx->icv_virt_addr = sg_virt(
1027 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1028 (*src_last_bytes - authsize);
1031 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1032 /*NON-INPLACE and DECRYPT*/
1033 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1034 areq_ctx->src.nents, areq_ctx->srcSgl,
1035 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1036 &areq_ctx->src.mlli_nents);
1037 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1038 areq_ctx->dst.nents, areq_ctx->dstSgl,
1039 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1040 &areq_ctx->dst.mlli_nents);
1042 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1043 areq_ctx->src.nents, authsize, *src_last_bytes,
1044 &areq_ctx->is_icv_fragmented);
1045 if (unlikely(icv_nents < 0)) {
1047 goto prepare_data_mlli_exit;
1050 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1051 /* Backup happens only when ICV is fragmented, ICV
1052 * verification is made by CPU compare in order to simplify
1053 * MAC verification upon request completion
1055 u32 size_to_skip = req->assoclen;
1056 if (areq_ctx->is_gcm4543) {
1057 size_to_skip += crypto_aead_ivsize(tfm);
1059 ssi_buffer_mgr_copy_scatterlist_portion(
1060 areq_ctx->backup_mac, req->src,
1061 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1062 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1063 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1064 } else { /* Contig. ICV */
1065 /*Should hanlde if the sg is not contig.*/
1066 areq_ctx->icv_dma_addr = sg_dma_address(
1067 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1068 (*src_last_bytes - authsize);
1069 areq_ctx->icv_virt_addr = sg_virt(
1070 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1071 (*src_last_bytes - authsize);
1075 /*NON-INPLACE and ENCRYPT*/
1076 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1077 areq_ctx->dst.nents, areq_ctx->dstSgl,
1078 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1079 &areq_ctx->dst.mlli_nents);
1080 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1081 areq_ctx->src.nents, areq_ctx->srcSgl,
1082 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1083 &areq_ctx->src.mlli_nents);
1085 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1086 areq_ctx->dst.nents, authsize, *dst_last_bytes,
1087 &areq_ctx->is_icv_fragmented);
1088 if (unlikely(icv_nents < 0)) {
1090 goto prepare_data_mlli_exit;
1093 if (likely(areq_ctx->is_icv_fragmented == false)) {
1095 areq_ctx->icv_dma_addr = sg_dma_address(
1096 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1097 (*dst_last_bytes - authsize);
1098 areq_ctx->icv_virt_addr = sg_virt(
1099 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1100 (*dst_last_bytes - authsize);
1102 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1103 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1107 prepare_data_mlli_exit:
1111 static inline int ssi_buffer_mgr_aead_chain_data(
1112 struct ssi_drvdata *drvdata,
1113 struct aead_request *req,
1114 struct buffer_array *sg_data,
1115 bool is_last_table, bool do_chain)
1117 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1118 struct device *dev = &drvdata->plat_dev->dev;
1119 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1120 unsigned int authsize = areq_ctx->req_authsize;
1121 int src_last_bytes = 0, dst_last_bytes = 0;
1123 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1125 unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
1126 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1128 bool chained = false;
1129 bool is_gcm4543 = areq_ctx->is_gcm4543;
1130 u32 size_to_skip = req->assoclen;
1132 size_to_skip += crypto_aead_ivsize(tfm);
1134 offset = size_to_skip;
1136 if (sg_data == NULL) {
1138 goto chain_data_exit;
1140 areq_ctx->srcSgl = req->src;
1141 areq_ctx->dstSgl = req->dst;
1144 size_for_map += crypto_aead_ivsize(tfm);
1147 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
1148 src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
1149 sg_index = areq_ctx->srcSgl->length;
1150 //check where the data starts
1151 while (sg_index <= size_to_skip) {
1152 offset -= areq_ctx->srcSgl->length;
1153 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1154 //if have reached the end of the sgl, then this is unexpected
1155 if (areq_ctx->srcSgl == NULL) {
1156 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1159 sg_index += areq_ctx->srcSgl->length;
1162 if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1164 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1165 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1169 areq_ctx->src.nents = src_mapped_nents;
1171 areq_ctx->srcOffset = offset;
1173 if (req->src != req->dst) {
1174 size_for_map = req->assoclen +req->cryptlen;
1175 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1177 size_for_map += crypto_aead_ivsize(tfm);
1180 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1181 DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1182 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1184 if (unlikely(rc != 0)) {
1186 goto chain_data_exit;
1190 dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
1191 sg_index = areq_ctx->dstSgl->length;
1192 offset = size_to_skip;
1194 //check where the data starts
1195 while (sg_index <= size_to_skip) {
1197 offset -= areq_ctx->dstSgl->length;
1198 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1199 //if have reached the end of the sgl, then this is unexpected
1200 if (areq_ctx->dstSgl == NULL) {
1201 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1204 sg_index += areq_ctx->dstSgl->length;
1207 if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1209 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1210 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1213 areq_ctx->dst.nents = dst_mapped_nents;
1214 areq_ctx->dstOffset = offset;
1215 if ((src_mapped_nents > 1) ||
1216 (dst_mapped_nents > 1) ||
1217 (do_chain == true)) {
1218 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1219 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1220 &src_last_bytes, &dst_last_bytes, is_last_table);
1222 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1223 ssi_buffer_mgr_prepare_aead_data_dlli(
1224 req, &src_last_bytes, &dst_last_bytes);
1231 static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
1232 struct aead_request *req)
1234 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1235 u32 curr_mlli_size = 0;
1237 if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1238 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1239 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1240 LLI_ENTRY_BYTE_SIZE;
1243 if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1244 /*Inplace case dst nents equal to src nents*/
1245 if (req->src == req->dst) {
1246 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1247 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1249 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1250 if (areq_ctx->is_single_pass == false)
1251 areq_ctx->assoc.mlli_nents +=
1252 areq_ctx->src.mlli_nents;
1254 if (areq_ctx->gen_ctx.op_type ==
1255 DRV_CRYPTO_DIRECTION_DECRYPT) {
1256 areq_ctx->src.sram_addr =
1257 drvdata->mlli_sram_addr +
1259 areq_ctx->dst.sram_addr =
1260 areq_ctx->src.sram_addr +
1261 areq_ctx->src.mlli_nents *
1262 LLI_ENTRY_BYTE_SIZE;
1263 if (areq_ctx->is_single_pass == false)
1264 areq_ctx->assoc.mlli_nents +=
1265 areq_ctx->src.mlli_nents;
1267 areq_ctx->dst.sram_addr =
1268 drvdata->mlli_sram_addr +
1270 areq_ctx->src.sram_addr =
1271 areq_ctx->dst.sram_addr +
1272 areq_ctx->dst.mlli_nents *
1273 LLI_ENTRY_BYTE_SIZE;
1274 if (areq_ctx->is_single_pass == false)
1275 areq_ctx->assoc.mlli_nents +=
1276 areq_ctx->dst.mlli_nents;
1282 int ssi_buffer_mgr_map_aead_request(
1283 struct ssi_drvdata *drvdata, struct aead_request *req)
1285 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1286 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1287 struct device *dev = &drvdata->plat_dev->dev;
1288 struct buffer_array sg_data;
1289 unsigned int authsize = areq_ctx->req_authsize;
1290 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1292 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1293 bool is_gcm4543 = areq_ctx->is_gcm4543;
1295 u32 mapped_nents = 0;
1296 u32 dummy = 0; /*used for the assoc data fragments */
1297 u32 size_to_map = 0;
1299 mlli_params->curr_pool = NULL;
1300 sg_data.num_of_buffers = 0;
1303 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1304 likely(req->src == req->dst))
1306 u32 size_to_skip = req->assoclen;
1308 size_to_skip += crypto_aead_ivsize(tfm);
1310 /* copy mac to a temporary location to deal with possible
1311 * data memory overriding that caused by cache coherence problem.
1313 ssi_buffer_mgr_copy_scatterlist_portion(
1314 areq_ctx->backup_mac, req->src,
1315 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1316 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1320 /* cacluate the size for cipher remove ICV in decrypt*/
1321 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1322 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1324 (req->cryptlen - authsize);
1326 areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1327 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1328 if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1329 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1330 MAX_MAC_SIZE, areq_ctx->mac_buf);
1332 goto aead_map_failure;
1335 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1336 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1337 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1338 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1340 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1341 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1342 "for DMA failed\n", AES_BLOCK_SIZE,
1343 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1344 areq_ctx->ccm_iv0_dma_addr = 0;
1346 goto aead_map_failure;
1348 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1349 areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1351 goto aead_map_failure;
1355 #if SSI_CC_HAS_AES_GCM
1356 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1357 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1358 areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1359 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1360 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1361 AES_BLOCK_SIZE, areq_ctx->hkey);
1363 goto aead_map_failure;
1366 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1367 &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1368 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1369 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1370 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1372 goto aead_map_failure;
1375 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1376 areq_ctx->gcm_iv_inc1,
1377 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1379 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1380 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1381 "for DMA failed\n", AES_BLOCK_SIZE,
1382 (areq_ctx->gcm_iv_inc1));
1383 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1385 goto aead_map_failure;
1388 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1389 areq_ctx->gcm_iv_inc2,
1390 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1392 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1393 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1394 "for DMA failed\n", AES_BLOCK_SIZE,
1395 (areq_ctx->gcm_iv_inc2));
1396 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1398 goto aead_map_failure;
1401 #endif /*SSI_CC_HAS_AES_GCM*/
1403 size_to_map = req->cryptlen + req->assoclen;
1404 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1405 size_to_map += authsize;
1408 size_to_map += crypto_aead_ivsize(tfm);
1409 rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1410 size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1411 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1412 if (unlikely(rc != 0)) {
1414 goto aead_map_failure;
1417 if (likely(areq_ctx->is_single_pass == true)) {
1419 * Create MLLI table for:
1422 * Note: IV is contg. buffer (not an SGL)
1424 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1425 if (unlikely(rc != 0))
1426 goto aead_map_failure;
1427 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1428 if (unlikely(rc != 0))
1429 goto aead_map_failure;
1430 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1431 if (unlikely(rc != 0))
1432 goto aead_map_failure;
1433 } else { /* DOUBLE-PASS flow */
1435 * Prepare MLLI table(s) in this order:
1437 * If ENCRYPT/DECRYPT (inplace):
1438 * (1) MLLI table for assoc
1439 * (2) IV entry (chained right after end of assoc)
1440 * (3) MLLI for src/dst (inplace operation)
1442 * If ENCRYPT (non-inplace)
1443 * (1) MLLI table for assoc
1444 * (2) IV entry (chained right after end of assoc)
1448 * If DECRYPT (non-inplace)
1449 * (1) MLLI table for assoc
1450 * (2) IV entry (chained right after end of assoc)
1454 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1455 if (unlikely(rc != 0))
1456 goto aead_map_failure;
1457 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1458 if (unlikely(rc != 0))
1459 goto aead_map_failure;
1460 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1461 if (unlikely(rc != 0))
1462 goto aead_map_failure;
1465 /* Mlli support -start building the MLLI according to the above results */
1467 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1468 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1470 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1471 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1472 if (unlikely(rc != 0)) {
1473 goto aead_map_failure;
1476 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1477 SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
1478 SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
1479 SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
1484 ssi_buffer_mgr_unmap_aead_request(dev, req);
1488 int ssi_buffer_mgr_map_hash_request_final(
1489 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1491 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1492 struct device *dev = &drvdata->plat_dev->dev;
1493 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1495 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1496 &areq_ctx->buff0_cnt;
1497 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1498 struct buffer_array sg_data;
1499 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1501 u32 mapped_nents = 0;
1503 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1504 "curr_buff_cnt=0x%X nbytes = 0x%X "
1505 "src=%pK curr_index=%u\n",
1506 curr_buff, *curr_buff_cnt, nbytes,
1507 src, areq_ctx->buff_index);
1508 /* Init the type of the dma buffer */
1509 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1510 mlli_params->curr_pool = NULL;
1511 sg_data.num_of_buffers = 0;
1512 areq_ctx->in_nents = 0;
1514 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1519 /*TODO: copy data in case that buffer is enough for operation */
1520 /* map the previous buffer */
1521 if (*curr_buff_cnt != 0 ) {
1522 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1523 *curr_buff_cnt, &sg_data) != 0) {
1528 if (src && (nbytes > 0) && do_update) {
1529 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1532 &areq_ctx->in_nents,
1533 LLI_MAX_NUM_OF_DATA_ENTRIES,
1534 &dummy, &mapped_nents))){
1535 goto unmap_curr_buff;
1537 if ( src && (mapped_nents == 1)
1538 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1539 memcpy(areq_ctx->buff_sg,src,
1540 sizeof(struct scatterlist));
1541 areq_ctx->buff_sg->length = nbytes;
1542 areq_ctx->curr_sg = areq_ctx->buff_sg;
1543 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1545 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1551 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1552 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1553 /* add the src data to the sg_data */
1554 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1558 true, &areq_ctx->mlli_nents);
1559 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1560 mlli_params) != 0)) {
1561 goto fail_unmap_din;
1564 /* change the buffer index for the unmap function */
1565 areq_ctx->buff_index = (areq_ctx->buff_index^1);
1566 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1567 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1571 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1574 if (*curr_buff_cnt != 0 ) {
1575 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1580 int ssi_buffer_mgr_map_hash_request_update(
1581 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1583 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1584 struct device *dev = &drvdata->plat_dev->dev;
1585 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1587 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1588 &areq_ctx->buff0_cnt;
1589 u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1591 u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1592 &areq_ctx->buff1_cnt;
1593 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1594 unsigned int update_data_len;
1595 u32 total_in_len = nbytes + *curr_buff_cnt;
1596 struct buffer_array sg_data;
1597 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1598 unsigned int swap_index = 0;
1600 u32 mapped_nents = 0;
1602 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1603 "curr_buff_cnt=0x%X nbytes=0x%X "
1604 "src=%pK curr_index=%u \n",
1605 curr_buff, *curr_buff_cnt, nbytes,
1606 src, areq_ctx->buff_index);
1607 /* Init the type of the dma buffer */
1608 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1609 mlli_params->curr_pool = NULL;
1610 areq_ctx->curr_sg = NULL;
1611 sg_data.num_of_buffers = 0;
1612 areq_ctx->in_nents = 0;
1614 if (unlikely(total_in_len < block_size)) {
1615 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1616 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1617 curr_buff, *curr_buff_cnt,
1618 &curr_buff[*curr_buff_cnt]);
1619 areq_ctx->in_nents =
1620 ssi_buffer_mgr_get_sgl_nents(src,
1623 sg_copy_to_buffer(src, areq_ctx->in_nents,
1624 &curr_buff[*curr_buff_cnt], nbytes);
1625 *curr_buff_cnt += nbytes;
1629 /* Calculate the residue size*/
1630 *next_buff_cnt = total_in_len & (block_size - 1);
1631 /* update data len */
1632 update_data_len = total_in_len - *next_buff_cnt;
1634 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1635 "update_data_len=0x%X\n",
1636 *next_buff_cnt, update_data_len);
1638 /* Copy the new residue to next buffer */
1639 if (*next_buff_cnt != 0) {
1640 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1641 " residue %u \n", next_buff,
1642 (update_data_len - *curr_buff_cnt),
1644 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1645 (update_data_len -*curr_buff_cnt),
1646 nbytes,SSI_SG_TO_BUF);
1647 /* change the buffer index for next operation */
1651 if (*curr_buff_cnt != 0) {
1652 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1653 *curr_buff_cnt, &sg_data) != 0) {
1656 /* change the buffer index for next operation */
1660 if ( update_data_len > *curr_buff_cnt ) {
1661 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1662 (update_data_len -*curr_buff_cnt),
1664 &areq_ctx->in_nents,
1665 LLI_MAX_NUM_OF_DATA_ENTRIES,
1666 &dummy, &mapped_nents))){
1667 goto unmap_curr_buff;
1669 if ( (mapped_nents == 1)
1670 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1671 /* only one entry in the SG and no previous data */
1672 memcpy(areq_ctx->buff_sg,src,
1673 sizeof(struct scatterlist));
1674 areq_ctx->buff_sg->length = update_data_len;
1675 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1676 areq_ctx->curr_sg = areq_ctx->buff_sg;
1678 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1682 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1683 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1684 /* add the src data to the sg_data */
1685 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1688 (update_data_len - *curr_buff_cnt), 0,
1689 true, &areq_ctx->mlli_nents);
1690 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1691 mlli_params) != 0)) {
1692 goto fail_unmap_din;
1696 areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
1701 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1704 if (*curr_buff_cnt != 0 ) {
1705 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1710 void ssi_buffer_mgr_unmap_hash_request(
1711 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1713 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1714 u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1715 &areq_ctx->buff1_cnt;
1717 /*In case a pool was set, a table was
1718 *allocated and should be released
1720 if (areq_ctx->mlli_params.curr_pool != NULL) {
1721 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1722 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1723 areq_ctx->mlli_params.mlli_virt_addr);
1724 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1725 areq_ctx->mlli_params.mlli_virt_addr,
1726 areq_ctx->mlli_params.mlli_dma_addr);
1729 if ((src) && likely(areq_ctx->in_nents != 0)) {
1730 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1732 (unsigned long long)sg_dma_address(src),
1734 dma_unmap_sg(dev, src,
1735 areq_ctx->in_nents, DMA_TO_DEVICE);
1738 if (*prev_len != 0) {
1739 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1740 "dma=0x%llX len 0x%X\n",
1741 sg_virt(areq_ctx->buff_sg),
1742 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1743 sg_dma_len(areq_ctx->buff_sg));
1744 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1746 /* clean the previous data length for update operation */
1749 areq_ctx->buff_index ^= 1;
1754 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1756 struct buff_mgr_handle *buff_mgr_handle;
1757 struct device *dev = &drvdata->plat_dev->dev;
1759 buff_mgr_handle = (struct buff_mgr_handle *)
1760 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1761 if (buff_mgr_handle == NULL)
1764 drvdata->buff_mgr_handle = buff_mgr_handle;
1766 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1767 "dx_single_mlli_tables", dev,
1768 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1769 LLI_ENTRY_BYTE_SIZE,
1770 MLLI_TABLE_MIN_ALIGNMENT, 0);
1772 if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1778 ssi_buffer_mgr_fini(drvdata);
1782 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1784 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1786 if (buff_mgr_handle != NULL) {
1787 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1788 kfree(drvdata->buff_mgr_handle);
1789 drvdata->buff_mgr_handle = NULL;