2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
36 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128
37 #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
38 #define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
39 #define MAX_NUM_OF_BUFFERS_IN_MLLI 4
40 #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
41 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
44 #define DUMP_SGL(sg) \
46 SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
47 "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
48 (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
51 #define DUMP_MLLI_TABLE(mlli_p, nents) \
53 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
55 SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
56 (mlli_p)[LLI_WORD0_OFFSET], \
57 (mlli_p)[LLI_WORD1_OFFSET]); \
58 (mlli_p) += LLI_ENTRY_WORD_SIZE; \
61 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
62 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
63 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
64 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
66 #define DX_BUFFER_MGR_DUMP_SGL(sg)
67 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
68 #define GET_DMA_BUFFER_TYPE(buff_type)
72 enum dma_buffer_type {
78 struct buff_mgr_handle {
79 struct dma_pool *mlli_buffs_pool;
82 union buffer_array_entry {
83 struct scatterlist *sgl;
84 dma_addr_t buffer_dma;
88 unsigned int num_of_buffers;
89 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
90 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
91 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
92 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
93 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
94 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
95 uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
98 #ifdef CC_DMA_48BIT_SIM
99 dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
101 dma_addr_t tmp_dma_addr;
102 #ifdef CC_DMA_48BIT_SIM_FULL
103 /* With this code all addresses will be switched to 48 bits. */
104 /* The if condition protects from double expention */
105 if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
106 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
108 if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
109 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
111 tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
112 (orig_addr & UINT16_MAX));
113 SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
114 "dma_address=0x%llX\n",
115 orig_addr, tmp_dma_addr);
121 dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
123 dma_addr_t tmp_dma_addr;
124 #ifdef CC_DMA_48BIT_SIM_FULL
125 /* With this code all addresses will be restored from 48 bits. */
126 /* The if condition protects from double restoring */
127 if((orig_addr >> 32) & 0xFFFF ) {
129 if(((orig_addr >> 32) & 0xFFFF) &&
130 !(((orig_addr >> 32) & 0xFF) % 2) ) {
132 /*return high 16 bits*/
133 tmp_dma_addr = ((orig_addr >> 16));
134 /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
135 tmp_dma_addr &= 0xFFFF0000;
136 /* Set the original 16 bits */
137 tmp_dma_addr |= (orig_addr & UINT16_MAX);
138 SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
139 "dma_address=0x%llX\n",
140 orig_addr, tmp_dma_addr);
147 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
150 * @nbytes: [IN] Total SGL data bytes.
151 * @lbytes: [OUT] Returns the amount of bytes at the last entry
153 static unsigned int ssi_buffer_mgr_get_sgl_nents(
154 struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
156 unsigned int nents = 0;
157 while (nbytes != 0) {
158 if (sg_is_chain(sg_list)) {
159 SSI_LOG_ERR("Unexpected chanined entry "
160 "in sg (entry =0x%X) \n", nents);
163 if (sg_list->length != 0) {
165 /* get the number of bytes in the last entry */
167 nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
168 sg_list = sg_next(sg_list);
170 sg_list = (struct scatterlist *)sg_page(sg_list);
171 if (is_chained != NULL) {
176 SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
181 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
185 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
187 struct scatterlist *current_sg = sgl;
190 while (sg_index <= data_len) {
191 if (current_sg == NULL) {
192 /* reached the end of the sgl --> just return back */
195 memset(sg_virt(current_sg), 0, current_sg->length);
196 sg_index += current_sg->length;
197 current_sg = sg_next(current_sg);
202 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
203 * from to_skip to end, to dest and vice versa
211 void ssi_buffer_mgr_copy_scatterlist_portion(
212 u8 *dest, struct scatterlist *sg,
213 uint32_t to_skip, uint32_t end,
214 enum ssi_sg_cpy_direct direct)
216 uint32_t nents, lbytes;
218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
219 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
220 (direct == SSI_SG_TO_BUF));
223 static inline int ssi_buffer_mgr_render_buff_to_mlli(
224 dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
225 uint32_t **mlli_entry_pp)
227 uint32_t *mlli_entry_p = *mlli_entry_pp;
230 /* Verify there is no memory overflow*/
231 new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
232 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
236 /*handle buffer longer than 64 kbytes */
237 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
238 SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE);
239 LLI_SET_ADDR(mlli_entry_p,buff_dma);
240 LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
241 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
242 mlli_entry_p[LLI_WORD0_OFFSET],
243 mlli_entry_p[LLI_WORD1_OFFSET]);
244 SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma);
245 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
246 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
247 mlli_entry_p = mlli_entry_p + 2;
251 SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size);
252 LLI_SET_ADDR(mlli_entry_p,buff_dma);
253 LLI_SET_SIZE(mlli_entry_p, buff_size);
254 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
255 mlli_entry_p[LLI_WORD0_OFFSET],
256 mlli_entry_p[LLI_WORD1_OFFSET]);
257 mlli_entry_p = mlli_entry_p + 2;
258 *mlli_entry_pp = mlli_entry_p;
264 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
265 struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
266 uint32_t **mlli_entry_pp)
268 struct scatterlist *curr_sgl = sgl;
269 uint32_t *mlli_entry_p = *mlli_entry_pp;
272 for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
273 curr_sgl = sg_next(curr_sgl)) {
274 uint32_t entry_data_len =
275 (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
276 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
277 sgl_data_len -= entry_data_len;
278 rc = ssi_buffer_mgr_render_buff_to_mlli(
279 sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
286 *mlli_entry_pp = mlli_entry_p;
290 static int ssi_buffer_mgr_generate_mlli(
292 struct buffer_array *sg_data,
293 struct mlli_params *mlli_params)
296 uint32_t total_nents = 0,prev_total_nents = 0;
299 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
301 /* Allocate memory from the pointed pool */
302 mlli_params->mlli_virt_addr = dma_pool_alloc(
303 mlli_params->curr_pool, GFP_KERNEL,
304 &(mlli_params->mlli_dma_addr));
305 if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
306 SSI_LOG_ERR("dma_pool_alloc() failed\n");
308 goto build_mlli_exit;
310 SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
311 (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
312 LLI_ENTRY_BYTE_SIZE));
313 /* Point to start of MLLI */
314 mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
315 /* go over all SG's and link it to one MLLI table */
316 for (i = 0; i < sg_data->num_of_buffers; i++) {
317 if (sg_data->type[i] == DMA_SGL_TYPE)
318 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
319 sg_data->entry[i].sgl,
320 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
322 else /*DMA_BUFF_TYPE*/
323 rc = ssi_buffer_mgr_render_buff_to_mlli(
324 sg_data->entry[i].buffer_dma,
325 sg_data->total_data_len[i], &total_nents,
331 /* set last bit in the current table */
332 if (sg_data->mlli_nents[i] != NULL) {
333 /*Calculate the current MLLI table length for the
334 length field in the descriptor*/
335 *(sg_data->mlli_nents[i]) +=
336 (total_nents - prev_total_nents);
337 prev_total_nents = total_nents;
341 /* Set MLLI size for the bypass operation */
342 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
344 SSI_LOG_DEBUG("MLLI params: "
345 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
346 mlli_params->mlli_virt_addr,
347 (unsigned long long)mlli_params->mlli_dma_addr,
348 mlli_params->mlli_len);
354 static inline void ssi_buffer_mgr_add_buffer_entry(
355 struct buffer_array *sgl_data,
356 dma_addr_t buffer_dma, unsigned int buffer_len,
357 bool is_last_entry, uint32_t *mlli_nents)
359 unsigned int index = sgl_data->num_of_buffers;
361 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
362 "buffer_len=0x%08X is_last=%d\n",
363 index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
364 sgl_data->nents[index] = 1;
365 sgl_data->entry[index].buffer_dma = buffer_dma;
366 sgl_data->offset[index] = 0;
367 sgl_data->total_data_len[index] = buffer_len;
368 sgl_data->type[index] = DMA_BUFF_TYPE;
369 sgl_data->is_last[index] = is_last_entry;
370 sgl_data->mlli_nents[index] = mlli_nents;
371 if (sgl_data->mlli_nents[index] != NULL)
372 *sgl_data->mlli_nents[index] = 0;
373 sgl_data->num_of_buffers++;
376 static inline void ssi_buffer_mgr_add_scatterlist_entry(
377 struct buffer_array *sgl_data,
379 struct scatterlist *sgl,
380 unsigned int data_len,
381 unsigned int data_offset,
383 uint32_t *mlli_nents)
385 unsigned int index = sgl_data->num_of_buffers;
387 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
388 index, nents, sgl, data_len, is_last_table);
389 sgl_data->nents[index] = nents;
390 sgl_data->entry[index].sgl = sgl;
391 sgl_data->offset[index] = data_offset;
392 sgl_data->total_data_len[index] = data_len;
393 sgl_data->type[index] = DMA_SGL_TYPE;
394 sgl_data->is_last[index] = is_last_table;
395 sgl_data->mlli_nents[index] = mlli_nents;
396 if (sgl_data->mlli_nents[index] != NULL)
397 *sgl_data->mlli_nents[index] = 0;
398 sgl_data->num_of_buffers++;
402 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
403 enum dma_data_direction direction)
406 struct scatterlist *l_sg = sg;
407 for (i = 0; i < nents; i++) {
411 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
412 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
415 l_sg = sg_next(l_sg);
420 /* Restore mapped parts */
421 for (j = 0; j < i; j++) {
425 dma_unmap_sg(dev,sg,1,direction);
431 static int ssi_buffer_mgr_map_scatterlist(
432 struct device *dev, struct scatterlist *sg,
433 unsigned int nbytes, int direction,
434 uint32_t *nents, uint32_t max_sg_nents,
435 uint32_t *lbytes, uint32_t *mapped_nents)
437 bool is_chained = false;
439 if (sg_is_last(sg)) {
440 /* One entry only case -set to DLLI */
441 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
442 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
445 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
446 "page_link=0x%08lX addr=%pK offset=%u "
448 (unsigned long long)sg_dma_address(sg),
451 sg->offset, sg->length);
455 SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
456 } else { /*sg_is_last*/
457 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
459 if (*nents > max_sg_nents) {
461 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
462 *nents, max_sg_nents);
466 /* In case of mmu the number of mapped nents might
467 be changed from the original sgl nents */
468 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
469 if (unlikely(*mapped_nents == 0)){
471 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
475 /*In this case the driver maps entry by entry so it
476 must have the same nents before and after map */
477 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
481 if (unlikely(*mapped_nents != *nents)){
482 *nents = *mapped_nents;
483 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
493 ssi_aead_handle_config_buf(struct device *dev,
494 struct aead_req_ctx *areq_ctx,
495 uint8_t* config_data,
496 struct buffer_array *sg_data,
497 unsigned int assoclen)
499 SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
500 /* create sg for the current buffer */
501 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
502 if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
503 DMA_TO_DEVICE) != 1)) {
504 SSI_LOG_ERR("dma_map_sg() "
505 "config buffer failed\n");
508 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
509 "page_link=0x%08lX addr=%pK "
510 "offset=%u length=%u\n",
511 (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
512 areq_ctx->ccm_adata_sg.page_link,
513 sg_virt(&areq_ctx->ccm_adata_sg),
514 areq_ctx->ccm_adata_sg.offset,
515 areq_ctx->ccm_adata_sg.length);
516 /* prepare for case of MLLI */
518 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
519 &areq_ctx->ccm_adata_sg,
521 areq_ctx->ccm_hdr_size), 0,
528 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
529 struct ahash_req_ctx *areq_ctx,
531 uint32_t curr_buff_cnt,
532 struct buffer_array *sg_data)
534 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
535 /* create sg for the current buffer */
536 sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
537 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
538 DMA_TO_DEVICE) != 1)) {
539 SSI_LOG_ERR("dma_map_sg() "
540 "src buffer failed\n");
543 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
544 "page_link=0x%08lX addr=%pK "
545 "offset=%u length=%u\n",
546 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
547 areq_ctx->buff_sg->page_link,
548 sg_virt(areq_ctx->buff_sg),
549 areq_ctx->buff_sg->offset,
550 areq_ctx->buff_sg->length);
551 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
552 areq_ctx->curr_sg = areq_ctx->buff_sg;
553 areq_ctx->in_nents = 0;
554 /* prepare for case of MLLI */
555 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
556 curr_buff_cnt, 0, false, NULL);
560 void ssi_buffer_mgr_unmap_blkcipher_request(
564 struct scatterlist *src,
565 struct scatterlist *dst)
567 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
569 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
570 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
571 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
573 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
574 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
576 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
580 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
581 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->mlli_params.mlli_dma_addr);
582 dma_pool_free(req_ctx->mlli_params.curr_pool,
583 req_ctx->mlli_params.mlli_virt_addr,
584 req_ctx->mlli_params.mlli_dma_addr);
587 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
588 dma_unmap_sg(dev, src, req_ctx->in_nents,
590 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
594 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
595 dma_unmap_sg(dev, dst, req_ctx->out_nents,
597 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
602 int ssi_buffer_mgr_map_blkcipher_request(
603 struct ssi_drvdata *drvdata,
608 struct scatterlist *src,
609 struct scatterlist *dst)
611 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
612 struct mlli_params *mlli_params = &req_ctx->mlli_params;
613 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
614 struct device *dev = &drvdata->plat_dev->dev;
615 struct buffer_array sg_data;
618 uint32_t mapped_nents = 0;
620 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
621 mlli_params->curr_pool = NULL;
622 sg_data.num_of_buffers = 0;
625 if (likely(ivsize != 0) ) {
626 dump_byte_array("iv", (uint8_t *)info, ivsize);
627 req_ctx->gen_ctx.iv_dma_addr =
628 dma_map_single(dev, (void *)info,
630 req_ctx->is_giv ? DMA_BIDIRECTIONAL:
632 if (unlikely(dma_mapping_error(dev,
633 req_ctx->gen_ctx.iv_dma_addr))) {
634 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
635 "for DMA failed\n", ivsize, info);
638 SSI_UPDATE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr,
640 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
642 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
644 req_ctx->gen_ctx.iv_dma_addr = 0;
646 /* Map the src SGL */
647 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
648 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
649 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
650 if (unlikely(rc != 0)) {
652 goto ablkcipher_exit;
654 if (mapped_nents > 1)
655 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
657 if (unlikely(src == dst)) {
658 /* Handle inplace operation */
659 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
660 req_ctx->out_nents = 0;
661 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
662 req_ctx->in_nents, src,
663 nbytes, 0, true, &req_ctx->in_mlli_nents);
667 if (unlikely(ssi_buffer_mgr_map_scatterlist(
669 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
670 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
673 goto ablkcipher_exit;
675 if (mapped_nents > 1)
676 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
678 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
679 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
680 req_ctx->in_nents, src,
682 &req_ctx->in_mlli_nents);
683 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
684 req_ctx->out_nents, dst,
686 &req_ctx->out_mlli_nents);
690 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
691 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
692 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
693 if (unlikely(rc!= 0))
694 goto ablkcipher_exit;
698 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
699 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
704 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
708 void ssi_buffer_mgr_unmap_aead_request(
709 struct device *dev, struct aead_request *req)
711 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
712 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
713 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
716 uint32_t size_to_unmap = 0;
718 if (areq_ctx->mac_buf_dma_addr != 0) {
719 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
720 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
721 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
724 #if SSI_CC_HAS_AES_GCM
725 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
726 if (areq_ctx->hkey_dma_addr != 0) {
727 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr);
728 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
729 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
732 if (areq_ctx->gcm_block_len_dma_addr != 0) {
733 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr);
734 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
735 AES_BLOCK_SIZE, DMA_TO_DEVICE);
738 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
739 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr);
740 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
741 AES_BLOCK_SIZE, DMA_TO_DEVICE);
744 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
745 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr);
746 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
747 AES_BLOCK_SIZE, DMA_TO_DEVICE);
752 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
753 if (areq_ctx->ccm_iv0_dma_addr != 0) {
754 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr);
755 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
756 AES_BLOCK_SIZE, DMA_TO_DEVICE);
759 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
761 if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
762 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr);
763 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
764 hw_iv_size, DMA_BIDIRECTIONAL);
767 /*In case a pool was set, a table was
768 allocated and should be released */
769 if (areq_ctx->mlli_params.curr_pool != NULL) {
770 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
771 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
772 areq_ctx->mlli_params.mlli_virt_addr);
773 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
774 dma_pool_free(areq_ctx->mlli_params.curr_pool,
775 areq_ctx->mlli_params.mlli_virt_addr,
776 areq_ctx->mlli_params.mlli_dma_addr);
779 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
780 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->src));
781 size_to_unmap = req->assoclen+req->cryptlen;
782 if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
783 size_to_unmap += areq_ctx->req_authsize;
785 if (areq_ctx->is_gcm4543)
786 size_to_unmap += crypto_aead_ivsize(tfm);
788 dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
789 if (unlikely(req->src != req->dst)) {
790 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
792 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->dst));
793 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
797 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
798 likely(req->src == req->dst))
800 uint32_t size_to_skip = req->assoclen;
801 if (areq_ctx->is_gcm4543) {
802 size_to_skip += crypto_aead_ivsize(tfm);
804 /* copy mac to a temporary location to deal with possible
805 data memory overriding that caused by cache coherence problem. */
806 ssi_buffer_mgr_copy_scatterlist_portion(
807 areq_ctx->backup_mac, req->src,
808 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
809 size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
814 static inline int ssi_buffer_mgr_get_aead_icv_nents(
815 struct scatterlist *sgl,
816 unsigned int sgl_nents,
817 unsigned int authsize,
818 uint32_t last_entry_data_size,
819 bool *is_icv_fragmented)
821 unsigned int icv_max_size = 0;
822 unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
826 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
827 *is_icv_fragmented = false;
831 for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
839 icv_max_size = sgl->length;
842 if (last_entry_data_size > authsize) {
843 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
844 *is_icv_fragmented = false;
845 } else if (last_entry_data_size == authsize) {
846 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
847 *is_icv_fragmented = false;
848 } else if (icv_max_size > icv_required_size) {
850 *is_icv_fragmented = true;
851 } else if (icv_max_size == icv_required_size) {
853 *is_icv_fragmented = true;
855 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
856 MAX_ICV_NENTS_SUPPORTED);
857 nents = -1; /*unsupported*/
859 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
860 (*is_icv_fragmented ? "true" : "false"), nents);
865 static inline int ssi_buffer_mgr_aead_chain_iv(
866 struct ssi_drvdata *drvdata,
867 struct aead_request *req,
868 struct buffer_array *sg_data,
869 bool is_last, bool do_chain)
871 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
872 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
873 struct device *dev = &drvdata->plat_dev->dev;
876 if (unlikely(req->iv == NULL)) {
877 areq_ctx->gen_ctx.iv_dma_addr = 0;
881 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
882 hw_iv_size, DMA_BIDIRECTIONAL);
883 if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
884 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
885 hw_iv_size, req->iv);
889 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size);
891 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
893 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
894 if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
895 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
896 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
897 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
898 /* Chain to given list */
899 ssi_buffer_mgr_add_buffer_entry(
900 sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
901 iv_size_to_authenc, is_last,
902 &areq_ctx->assoc.mlli_nents);
903 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
910 static inline int ssi_buffer_mgr_aead_chain_assoc(
911 struct ssi_drvdata *drvdata,
912 struct aead_request *req,
913 struct buffer_array *sg_data,
914 bool is_last, bool do_chain)
916 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
918 uint32_t mapped_nents = 0;
919 struct scatterlist *current_sg = req->src;
920 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
921 unsigned int sg_index = 0;
922 uint32_t size_of_assoc = req->assoclen;
924 if (areq_ctx->is_gcm4543) {
925 size_of_assoc += crypto_aead_ivsize(tfm);
928 if (sg_data == NULL) {
930 goto chain_assoc_exit;
933 if (unlikely(req->assoclen == 0)) {
934 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
935 areq_ctx->assoc.nents = 0;
936 areq_ctx->assoc.mlli_nents = 0;
937 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
938 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
939 areq_ctx->assoc.nents);
940 goto chain_assoc_exit;
943 //iterate over the sgl to see how many entries are for associated data
944 //it is assumed that if we reach here , the sgl is already mapped
945 sg_index = current_sg->length;
946 if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
950 while (sg_index <= size_of_assoc) {
951 current_sg = sg_next(current_sg);
952 //if have reached the end of the sgl, then this is unexpected
953 if (current_sg == NULL) {
954 SSI_LOG_ERR("reached end of sg list. unexpected \n");
957 sg_index += current_sg->length;
961 if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
962 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
963 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
966 areq_ctx->assoc.nents = mapped_nents;
968 /* in CCM case we have additional entry for
969 * ccm header configurations */
970 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
971 if (unlikely((mapped_nents + 1) >
972 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
974 SSI_LOG_ERR("CCM case.Too many fragments. "
975 "Current %d max %d\n",
976 (areq_ctx->assoc.nents + 1),
977 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
979 goto chain_assoc_exit;
983 if (likely(mapped_nents == 1) &&
984 (areq_ctx->ccm_hdr_size == ccm_header_size_null))
985 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
987 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
989 if (unlikely((do_chain == true) ||
990 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
992 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
993 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
994 areq_ctx->assoc.nents);
995 ssi_buffer_mgr_add_scatterlist_entry(
996 sg_data, areq_ctx->assoc.nents,
997 req->src, req->assoclen, 0, is_last,
998 &areq_ctx->assoc.mlli_nents);
999 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
1006 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
1007 struct aead_request *req,
1008 uint32_t *src_last_bytes, uint32_t *dst_last_bytes)
1010 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1011 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1012 unsigned int authsize = areq_ctx->req_authsize;
1014 areq_ctx->is_icv_fragmented = false;
1015 if (likely(req->src == req->dst)) {
1017 areq_ctx->icv_dma_addr = sg_dma_address(
1019 (*src_last_bytes - authsize);
1020 areq_ctx->icv_virt_addr = sg_virt(
1022 (*src_last_bytes - authsize);
1023 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1024 /*NON-INPLACE and DECRYPT*/
1025 areq_ctx->icv_dma_addr = sg_dma_address(
1027 (*src_last_bytes - authsize);
1028 areq_ctx->icv_virt_addr = sg_virt(
1030 (*src_last_bytes - authsize);
1032 /*NON-INPLACE and ENCRYPT*/
1033 areq_ctx->icv_dma_addr = sg_dma_address(
1035 (*dst_last_bytes - authsize);
1036 areq_ctx->icv_virt_addr = sg_virt(
1038 (*dst_last_bytes - authsize);
1042 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
1043 struct ssi_drvdata *drvdata,
1044 struct aead_request *req,
1045 struct buffer_array *sg_data,
1046 uint32_t *src_last_bytes, uint32_t *dst_last_bytes,
1049 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1050 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1051 unsigned int authsize = areq_ctx->req_authsize;
1052 int rc = 0, icv_nents;
1053 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1055 if (likely(req->src == req->dst)) {
1057 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1058 areq_ctx->src.nents, areq_ctx->srcSgl,
1059 areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
1060 &areq_ctx->src.mlli_nents);
1062 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1063 areq_ctx->src.nents, authsize, *src_last_bytes,
1064 &areq_ctx->is_icv_fragmented);
1065 if (unlikely(icv_nents < 0)) {
1067 goto prepare_data_mlli_exit;
1070 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1071 /* Backup happens only when ICV is fragmented, ICV
1072 verification is made by CPU compare in order to simplify
1073 MAC verification upon request completion */
1074 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1076 /* In ACP platform we already copying ICV
1077 for any INPLACE-DECRYPT operation, hence
1078 we must neglect this code. */
1079 uint32_t size_to_skip = req->assoclen;
1080 if (areq_ctx->is_gcm4543) {
1081 size_to_skip += crypto_aead_ivsize(tfm);
1083 ssi_buffer_mgr_copy_scatterlist_portion(
1084 areq_ctx->backup_mac, req->src,
1085 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1086 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1088 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1090 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1091 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1093 } else { /* Contig. ICV */
1094 /*Should hanlde if the sg is not contig.*/
1095 areq_ctx->icv_dma_addr = sg_dma_address(
1096 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1097 (*src_last_bytes - authsize);
1098 areq_ctx->icv_virt_addr = sg_virt(
1099 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1100 (*src_last_bytes - authsize);
1103 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1104 /*NON-INPLACE and DECRYPT*/
1105 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1106 areq_ctx->src.nents, areq_ctx->srcSgl,
1107 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1108 &areq_ctx->src.mlli_nents);
1109 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1110 areq_ctx->dst.nents, areq_ctx->dstSgl,
1111 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1112 &areq_ctx->dst.mlli_nents);
1114 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1115 areq_ctx->src.nents, authsize, *src_last_bytes,
1116 &areq_ctx->is_icv_fragmented);
1117 if (unlikely(icv_nents < 0)) {
1119 goto prepare_data_mlli_exit;
1122 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1123 /* Backup happens only when ICV is fragmented, ICV
1124 verification is made by CPU compare in order to simplify
1125 MAC verification upon request completion */
1126 uint32_t size_to_skip = req->assoclen;
1127 if (areq_ctx->is_gcm4543) {
1128 size_to_skip += crypto_aead_ivsize(tfm);
1130 ssi_buffer_mgr_copy_scatterlist_portion(
1131 areq_ctx->backup_mac, req->src,
1132 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1133 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1134 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1135 } else { /* Contig. ICV */
1136 /*Should hanlde if the sg is not contig.*/
1137 areq_ctx->icv_dma_addr = sg_dma_address(
1138 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1139 (*src_last_bytes - authsize);
1140 areq_ctx->icv_virt_addr = sg_virt(
1141 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1142 (*src_last_bytes - authsize);
1146 /*NON-INPLACE and ENCRYPT*/
1147 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1148 areq_ctx->dst.nents, areq_ctx->dstSgl,
1149 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1150 &areq_ctx->dst.mlli_nents);
1151 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1152 areq_ctx->src.nents, areq_ctx->srcSgl,
1153 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1154 &areq_ctx->src.mlli_nents);
1156 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1157 areq_ctx->dst.nents, authsize, *dst_last_bytes,
1158 &areq_ctx->is_icv_fragmented);
1159 if (unlikely(icv_nents < 0)) {
1161 goto prepare_data_mlli_exit;
1164 if (likely(areq_ctx->is_icv_fragmented == false)) {
1166 areq_ctx->icv_dma_addr = sg_dma_address(
1167 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1168 (*dst_last_bytes - authsize);
1169 areq_ctx->icv_virt_addr = sg_virt(
1170 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1171 (*dst_last_bytes - authsize);
1173 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1174 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1178 prepare_data_mlli_exit:
1182 static inline int ssi_buffer_mgr_aead_chain_data(
1183 struct ssi_drvdata *drvdata,
1184 struct aead_request *req,
1185 struct buffer_array *sg_data,
1186 bool is_last_table, bool do_chain)
1188 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1189 struct device *dev = &drvdata->plat_dev->dev;
1190 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1191 unsigned int authsize = areq_ctx->req_authsize;
1192 int src_last_bytes = 0, dst_last_bytes = 0;
1194 uint32_t src_mapped_nents = 0, dst_mapped_nents = 0;
1195 uint32_t offset = 0;
1196 unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
1197 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1198 uint32_t sg_index = 0;
1199 bool chained = false;
1200 bool is_gcm4543 = areq_ctx->is_gcm4543;
1201 uint32_t size_to_skip = req->assoclen;
1203 size_to_skip += crypto_aead_ivsize(tfm);
1205 offset = size_to_skip;
1207 if (sg_data == NULL) {
1209 goto chain_data_exit;
1211 areq_ctx->srcSgl = req->src;
1212 areq_ctx->dstSgl = req->dst;
1215 size_for_map += crypto_aead_ivsize(tfm);
1218 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
1219 src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
1220 sg_index = areq_ctx->srcSgl->length;
1221 //check where the data starts
1222 while (sg_index <= size_to_skip) {
1223 offset -= areq_ctx->srcSgl->length;
1224 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1225 //if have reached the end of the sgl, then this is unexpected
1226 if (areq_ctx->srcSgl == NULL) {
1227 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1230 sg_index += areq_ctx->srcSgl->length;
1233 if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1235 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1236 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1240 areq_ctx->src.nents = src_mapped_nents;
1242 areq_ctx->srcOffset = offset;
1244 if (req->src != req->dst) {
1245 size_for_map = req->assoclen +req->cryptlen;
1246 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1248 size_for_map += crypto_aead_ivsize(tfm);
1251 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1252 DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1253 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1255 if (unlikely(rc != 0)) {
1257 goto chain_data_exit;
1261 dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
1262 sg_index = areq_ctx->dstSgl->length;
1263 offset = size_to_skip;
1265 //check where the data starts
1266 while (sg_index <= size_to_skip) {
1268 offset -= areq_ctx->dstSgl->length;
1269 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1270 //if have reached the end of the sgl, then this is unexpected
1271 if (areq_ctx->dstSgl == NULL) {
1272 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1275 sg_index += areq_ctx->dstSgl->length;
1278 if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1280 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1281 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1284 areq_ctx->dst.nents = dst_mapped_nents;
1285 areq_ctx->dstOffset = offset;
1286 if ((src_mapped_nents > 1) ||
1287 (dst_mapped_nents > 1) ||
1288 (do_chain == true)) {
1289 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1290 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1291 &src_last_bytes, &dst_last_bytes, is_last_table);
1293 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1294 ssi_buffer_mgr_prepare_aead_data_dlli(
1295 req, &src_last_bytes, &dst_last_bytes);
1302 static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
1303 struct aead_request *req)
1305 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1306 uint32_t curr_mlli_size = 0;
1308 if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1309 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1310 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1311 LLI_ENTRY_BYTE_SIZE;
1314 if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1315 /*Inplace case dst nents equal to src nents*/
1316 if (req->src == req->dst) {
1317 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1318 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1320 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1321 if (areq_ctx->is_single_pass == false)
1322 areq_ctx->assoc.mlli_nents +=
1323 areq_ctx->src.mlli_nents;
1325 if (areq_ctx->gen_ctx.op_type ==
1326 DRV_CRYPTO_DIRECTION_DECRYPT) {
1327 areq_ctx->src.sram_addr =
1328 drvdata->mlli_sram_addr +
1330 areq_ctx->dst.sram_addr =
1331 areq_ctx->src.sram_addr +
1332 areq_ctx->src.mlli_nents *
1333 LLI_ENTRY_BYTE_SIZE;
1334 if (areq_ctx->is_single_pass == false)
1335 areq_ctx->assoc.mlli_nents +=
1336 areq_ctx->src.mlli_nents;
1338 areq_ctx->dst.sram_addr =
1339 drvdata->mlli_sram_addr +
1341 areq_ctx->src.sram_addr =
1342 areq_ctx->dst.sram_addr +
1343 areq_ctx->dst.mlli_nents *
1344 LLI_ENTRY_BYTE_SIZE;
1345 if (areq_ctx->is_single_pass == false)
1346 areq_ctx->assoc.mlli_nents +=
1347 areq_ctx->dst.mlli_nents;
1353 int ssi_buffer_mgr_map_aead_request(
1354 struct ssi_drvdata *drvdata, struct aead_request *req)
1356 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1357 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1358 struct device *dev = &drvdata->plat_dev->dev;
1359 struct buffer_array sg_data;
1360 unsigned int authsize = areq_ctx->req_authsize;
1361 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1363 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1364 bool is_gcm4543 = areq_ctx->is_gcm4543;
1366 uint32_t mapped_nents = 0;
1367 uint32_t dummy = 0; /*used for the assoc data fragments */
1368 uint32_t size_to_map = 0;
1370 mlli_params->curr_pool = NULL;
1371 sg_data.num_of_buffers = 0;
1374 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1375 likely(req->src == req->dst))
1377 uint32_t size_to_skip = req->assoclen;
1379 size_to_skip += crypto_aead_ivsize(tfm);
1381 /* copy mac to a temporary location to deal with possible
1382 data memory overriding that caused by cache coherence problem. */
1383 ssi_buffer_mgr_copy_scatterlist_portion(
1384 areq_ctx->backup_mac, req->src,
1385 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1386 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1390 /* cacluate the size for cipher remove ICV in decrypt*/
1391 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1392 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1394 (req->cryptlen - authsize);
1396 areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1397 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1398 if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1399 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1400 MAX_MAC_SIZE, areq_ctx->mac_buf);
1402 goto aead_map_failure;
1404 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE);
1406 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1407 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1408 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1409 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1411 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1412 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1413 "for DMA failed\n", AES_BLOCK_SIZE,
1414 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1415 areq_ctx->ccm_iv0_dma_addr = 0;
1417 goto aead_map_failure;
1419 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr,
1421 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1422 areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1424 goto aead_map_failure;
1428 #if SSI_CC_HAS_AES_GCM
1429 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1430 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1431 areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1432 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1433 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1434 AES_BLOCK_SIZE, areq_ctx->hkey);
1436 goto aead_map_failure;
1438 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr, AES_BLOCK_SIZE);
1440 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1441 &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1442 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1443 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1444 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1446 goto aead_map_failure;
1448 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE);
1450 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1451 areq_ctx->gcm_iv_inc1,
1452 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1454 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1455 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1456 "for DMA failed\n", AES_BLOCK_SIZE,
1457 (areq_ctx->gcm_iv_inc1));
1458 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1460 goto aead_map_failure;
1462 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr,
1465 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1466 areq_ctx->gcm_iv_inc2,
1467 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1469 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1470 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1471 "for DMA failed\n", AES_BLOCK_SIZE,
1472 (areq_ctx->gcm_iv_inc2));
1473 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1475 goto aead_map_failure;
1477 SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr,
1480 #endif /*SSI_CC_HAS_AES_GCM*/
1482 size_to_map = req->cryptlen + req->assoclen;
1483 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1484 size_to_map += authsize;
1487 size_to_map += crypto_aead_ivsize(tfm);
1488 rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1489 size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1490 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1491 if (unlikely(rc != 0)) {
1493 goto aead_map_failure;
1496 if (likely(areq_ctx->is_single_pass == true)) {
1498 * Create MLLI table for:
1501 * Note: IV is contg. buffer (not an SGL)
1503 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1504 if (unlikely(rc != 0))
1505 goto aead_map_failure;
1506 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1507 if (unlikely(rc != 0))
1508 goto aead_map_failure;
1509 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1510 if (unlikely(rc != 0))
1511 goto aead_map_failure;
1512 } else { /* DOUBLE-PASS flow */
1514 * Prepare MLLI table(s) in this order:
1516 * If ENCRYPT/DECRYPT (inplace):
1517 * (1) MLLI table for assoc
1518 * (2) IV entry (chained right after end of assoc)
1519 * (3) MLLI for src/dst (inplace operation)
1521 * If ENCRYPT (non-inplace)
1522 * (1) MLLI table for assoc
1523 * (2) IV entry (chained right after end of assoc)
1527 * If DECRYPT (non-inplace)
1528 * (1) MLLI table for assoc
1529 * (2) IV entry (chained right after end of assoc)
1533 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1534 if (unlikely(rc != 0))
1535 goto aead_map_failure;
1536 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1537 if (unlikely(rc != 0))
1538 goto aead_map_failure;
1539 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1540 if (unlikely(rc != 0))
1541 goto aead_map_failure;
1544 /* Mlli support -start building the MLLI according to the above results */
1546 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1547 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1549 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1550 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1551 if (unlikely(rc != 0)) {
1552 goto aead_map_failure;
1555 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1556 SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
1557 SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
1558 SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
1563 ssi_buffer_mgr_unmap_aead_request(dev, req);
1567 int ssi_buffer_mgr_map_hash_request_final(
1568 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1570 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1571 struct device *dev = &drvdata->plat_dev->dev;
1572 uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1574 uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1575 &areq_ctx->buff0_cnt;
1576 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1577 struct buffer_array sg_data;
1578 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1580 uint32_t mapped_nents = 0;
1582 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1583 "curr_buff_cnt=0x%X nbytes = 0x%X "
1584 "src=%pK curr_index=%u\n",
1585 curr_buff, *curr_buff_cnt, nbytes,
1586 src, areq_ctx->buff_index);
1587 /* Init the type of the dma buffer */
1588 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1589 mlli_params->curr_pool = NULL;
1590 sg_data.num_of_buffers = 0;
1591 areq_ctx->in_nents = 0;
1593 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1598 /*TODO: copy data in case that buffer is enough for operation */
1599 /* map the previous buffer */
1600 if (*curr_buff_cnt != 0 ) {
1601 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1602 *curr_buff_cnt, &sg_data) != 0) {
1607 if (src && (nbytes > 0) && do_update) {
1608 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1611 &areq_ctx->in_nents,
1612 LLI_MAX_NUM_OF_DATA_ENTRIES,
1613 &dummy, &mapped_nents))){
1614 goto unmap_curr_buff;
1616 if ( src && (mapped_nents == 1)
1617 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1618 memcpy(areq_ctx->buff_sg,src,
1619 sizeof(struct scatterlist));
1620 areq_ctx->buff_sg->length = nbytes;
1621 areq_ctx->curr_sg = areq_ctx->buff_sg;
1622 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1624 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1630 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1631 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1632 /* add the src data to the sg_data */
1633 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1637 true, &areq_ctx->mlli_nents);
1638 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1639 mlli_params) != 0)) {
1640 goto fail_unmap_din;
1643 /* change the buffer index for the unmap function */
1644 areq_ctx->buff_index = (areq_ctx->buff_index^1);
1645 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1646 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1650 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1653 if (*curr_buff_cnt != 0 ) {
1654 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1659 int ssi_buffer_mgr_map_hash_request_update(
1660 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1662 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1663 struct device *dev = &drvdata->plat_dev->dev;
1664 uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1666 uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1667 &areq_ctx->buff0_cnt;
1668 uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1670 uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1671 &areq_ctx->buff1_cnt;
1672 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1673 unsigned int update_data_len;
1674 uint32_t total_in_len = nbytes + *curr_buff_cnt;
1675 struct buffer_array sg_data;
1676 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1677 unsigned int swap_index = 0;
1679 uint32_t mapped_nents = 0;
1681 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1682 "curr_buff_cnt=0x%X nbytes=0x%X "
1683 "src=%pK curr_index=%u \n",
1684 curr_buff, *curr_buff_cnt, nbytes,
1685 src, areq_ctx->buff_index);
1686 /* Init the type of the dma buffer */
1687 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1688 mlli_params->curr_pool = NULL;
1689 areq_ctx->curr_sg = NULL;
1690 sg_data.num_of_buffers = 0;
1691 areq_ctx->in_nents = 0;
1693 if (unlikely(total_in_len < block_size)) {
1694 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1695 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1696 curr_buff, *curr_buff_cnt,
1697 &curr_buff[*curr_buff_cnt]);
1698 areq_ctx->in_nents =
1699 ssi_buffer_mgr_get_sgl_nents(src,
1702 sg_copy_to_buffer(src, areq_ctx->in_nents,
1703 &curr_buff[*curr_buff_cnt], nbytes);
1704 *curr_buff_cnt += nbytes;
1708 /* Calculate the residue size*/
1709 *next_buff_cnt = total_in_len & (block_size - 1);
1710 /* update data len */
1711 update_data_len = total_in_len - *next_buff_cnt;
1713 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1714 "update_data_len=0x%X\n",
1715 *next_buff_cnt, update_data_len);
1717 /* Copy the new residue to next buffer */
1718 if (*next_buff_cnt != 0) {
1719 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1720 " residue %u \n", next_buff,
1721 (update_data_len - *curr_buff_cnt),
1723 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1724 (update_data_len -*curr_buff_cnt),
1725 nbytes,SSI_SG_TO_BUF);
1726 /* change the buffer index for next operation */
1730 if (*curr_buff_cnt != 0) {
1731 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1732 *curr_buff_cnt, &sg_data) != 0) {
1735 /* change the buffer index for next operation */
1739 if ( update_data_len > *curr_buff_cnt ) {
1740 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1741 (update_data_len -*curr_buff_cnt),
1743 &areq_ctx->in_nents,
1744 LLI_MAX_NUM_OF_DATA_ENTRIES,
1745 &dummy, &mapped_nents))){
1746 goto unmap_curr_buff;
1748 if ( (mapped_nents == 1)
1749 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1750 /* only one entry in the SG and no previous data */
1751 memcpy(areq_ctx->buff_sg,src,
1752 sizeof(struct scatterlist));
1753 areq_ctx->buff_sg->length = update_data_len;
1754 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1755 areq_ctx->curr_sg = areq_ctx->buff_sg;
1757 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1761 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1762 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1763 /* add the src data to the sg_data */
1764 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1767 (update_data_len - *curr_buff_cnt), 0,
1768 true, &areq_ctx->mlli_nents);
1769 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1770 mlli_params) != 0)) {
1771 goto fail_unmap_din;
1775 areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
1780 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1783 if (*curr_buff_cnt != 0 ) {
1784 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1789 void ssi_buffer_mgr_unmap_hash_request(
1790 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1792 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1793 uint32_t *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1794 &areq_ctx->buff1_cnt;
1796 /*In case a pool was set, a table was
1797 allocated and should be released */
1798 if (areq_ctx->mlli_params.curr_pool != NULL) {
1799 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1800 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1801 areq_ctx->mlli_params.mlli_virt_addr);
1802 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
1803 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1804 areq_ctx->mlli_params.mlli_virt_addr,
1805 areq_ctx->mlli_params.mlli_dma_addr);
1808 if ((src) && likely(areq_ctx->in_nents != 0)) {
1809 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1811 (unsigned long long)sg_dma_address(src),
1813 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
1814 dma_unmap_sg(dev, src,
1815 areq_ctx->in_nents, DMA_TO_DEVICE);
1818 if (*prev_len != 0) {
1819 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1820 "dma=0x%llX len 0x%X\n",
1821 sg_virt(areq_ctx->buff_sg),
1822 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1823 sg_dma_len(areq_ctx->buff_sg));
1824 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1826 /* clean the previous data length for update operation */
1829 areq_ctx->buff_index ^= 1;
1834 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1836 struct buff_mgr_handle *buff_mgr_handle;
1837 struct device *dev = &drvdata->plat_dev->dev;
1839 buff_mgr_handle = (struct buff_mgr_handle *)
1840 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1841 if (buff_mgr_handle == NULL)
1844 drvdata->buff_mgr_handle = buff_mgr_handle;
1846 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1847 "dx_single_mlli_tables", dev,
1848 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1849 LLI_ENTRY_BYTE_SIZE,
1850 MLLI_TABLE_MIN_ALIGNMENT, 0);
1852 if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1858 ssi_buffer_mgr_fini(drvdata);
1862 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1864 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1866 if (buff_mgr_handle != NULL) {
1867 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1868 kfree(drvdata->buff_mgr_handle);
1869 drvdata->buff_mgr_handle = NULL;