2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
36 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128
37 #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
38 #define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
39 #define MAX_NUM_OF_BUFFERS_IN_MLLI 4
40 #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
41 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
44 #define DUMP_SGL(sg) \
46 SSI_LOG_DEBUG("page=%p offset=%u length=%u (dma_len=%u) " \
47 "dma_addr=%08x\n", sg_page(sg), (sg)->offset, \
48 (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
51 #define DUMP_MLLI_TABLE(mlli_p, nents) \
53 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
55 SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
56 (mlli_p)[LLI_WORD0_OFFSET], \
57 (mlli_p)[LLI_WORD1_OFFSET]); \
58 (mlli_p) += LLI_ENTRY_WORD_SIZE; \
61 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
62 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
63 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
64 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
66 #define DX_BUFFER_MGR_DUMP_SGL(sg)
67 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
68 #define GET_DMA_BUFFER_TYPE(buff_type)
72 enum dma_buffer_type {
78 struct buff_mgr_handle {
79 struct dma_pool *mlli_buffs_pool;
82 union buffer_array_entry {
83 struct scatterlist *sgl;
84 dma_addr_t buffer_dma;
88 unsigned int num_of_buffers;
89 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
90 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
91 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
92 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
93 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
94 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
95 u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
99 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
102 * @nbytes: [IN] Total SGL data bytes.
103 * @lbytes: [OUT] Returns the amount of bytes at the last entry
105 static unsigned int ssi_buffer_mgr_get_sgl_nents(
106 struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
108 unsigned int nents = 0;
109 while (nbytes != 0) {
110 if (sg_is_chain(sg_list)) {
111 SSI_LOG_ERR("Unexpected chanined entry "
112 "in sg (entry =0x%X) \n", nents);
115 if (sg_list->length != 0) {
117 /* get the number of bytes in the last entry */
119 nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
120 sg_list = sg_next(sg_list);
122 sg_list = (struct scatterlist *)sg_page(sg_list);
123 if (is_chained != NULL) {
128 SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
133 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
137 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
139 struct scatterlist *current_sg = sgl;
142 while (sg_index <= data_len) {
143 if (current_sg == NULL) {
144 /* reached the end of the sgl --> just return back */
147 memset(sg_virt(current_sg), 0, current_sg->length);
148 sg_index += current_sg->length;
149 current_sg = sg_next(current_sg);
154 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
155 * from to_skip to end, to dest and vice versa
163 void ssi_buffer_mgr_copy_scatterlist_portion(
164 u8 *dest, struct scatterlist *sg,
165 u32 to_skip, u32 end,
166 enum ssi_sg_cpy_direct direct)
170 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
171 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
174 static inline int ssi_buffer_mgr_render_buff_to_mlli(
175 dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
178 u32 *mlli_entry_p = *mlli_entry_pp;
181 /* Verify there is no memory overflow*/
182 new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
183 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
187 /*handle buffer longer than 64 kbytes */
188 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
189 cc_lli_set_addr(mlli_entry_p, buff_dma);
190 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
191 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
192 mlli_entry_p[LLI_WORD0_OFFSET],
193 mlli_entry_p[LLI_WORD1_OFFSET]);
194 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
195 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
196 mlli_entry_p = mlli_entry_p + 2;
200 cc_lli_set_addr(mlli_entry_p, buff_dma);
201 cc_lli_set_size(mlli_entry_p, buff_size);
202 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
203 mlli_entry_p[LLI_WORD0_OFFSET],
204 mlli_entry_p[LLI_WORD1_OFFSET]);
205 mlli_entry_p = mlli_entry_p + 2;
206 *mlli_entry_pp = mlli_entry_p;
212 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
213 struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
216 struct scatterlist *curr_sgl = sgl;
217 u32 *mlli_entry_p = *mlli_entry_pp;
220 for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
221 curr_sgl = sg_next(curr_sgl)) {
223 (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
224 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
225 sgl_data_len -= entry_data_len;
226 rc = ssi_buffer_mgr_render_buff_to_mlli(
227 sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
234 *mlli_entry_pp = mlli_entry_p;
238 static int ssi_buffer_mgr_generate_mlli(
240 struct buffer_array *sg_data,
241 struct mlli_params *mlli_params)
244 u32 total_nents = 0,prev_total_nents = 0;
247 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
249 /* Allocate memory from the pointed pool */
250 mlli_params->mlli_virt_addr = dma_pool_alloc(
251 mlli_params->curr_pool, GFP_KERNEL,
252 &(mlli_params->mlli_dma_addr));
253 if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
254 SSI_LOG_ERR("dma_pool_alloc() failed\n");
256 goto build_mlli_exit;
258 /* Point to start of MLLI */
259 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
260 /* go over all SG's and link it to one MLLI table */
261 for (i = 0; i < sg_data->num_of_buffers; i++) {
262 if (sg_data->type[i] == DMA_SGL_TYPE)
263 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
264 sg_data->entry[i].sgl,
265 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
267 else /*DMA_BUFF_TYPE*/
268 rc = ssi_buffer_mgr_render_buff_to_mlli(
269 sg_data->entry[i].buffer_dma,
270 sg_data->total_data_len[i], &total_nents,
276 /* set last bit in the current table */
277 if (sg_data->mlli_nents[i] != NULL) {
278 /*Calculate the current MLLI table length for the
279 *length field in the descriptor
281 *(sg_data->mlli_nents[i]) +=
282 (total_nents - prev_total_nents);
283 prev_total_nents = total_nents;
287 /* Set MLLI size for the bypass operation */
288 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
290 SSI_LOG_DEBUG("MLLI params: "
291 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
292 mlli_params->mlli_virt_addr,
293 (unsigned long long)mlli_params->mlli_dma_addr,
294 mlli_params->mlli_len);
300 static inline void ssi_buffer_mgr_add_buffer_entry(
301 struct buffer_array *sgl_data,
302 dma_addr_t buffer_dma, unsigned int buffer_len,
303 bool is_last_entry, u32 *mlli_nents)
305 unsigned int index = sgl_data->num_of_buffers;
307 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
308 "buffer_len=0x%08X is_last=%d\n",
309 index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
310 sgl_data->nents[index] = 1;
311 sgl_data->entry[index].buffer_dma = buffer_dma;
312 sgl_data->offset[index] = 0;
313 sgl_data->total_data_len[index] = buffer_len;
314 sgl_data->type[index] = DMA_BUFF_TYPE;
315 sgl_data->is_last[index] = is_last_entry;
316 sgl_data->mlli_nents[index] = mlli_nents;
317 if (sgl_data->mlli_nents[index] != NULL)
318 *sgl_data->mlli_nents[index] = 0;
319 sgl_data->num_of_buffers++;
322 static inline void ssi_buffer_mgr_add_scatterlist_entry(
323 struct buffer_array *sgl_data,
325 struct scatterlist *sgl,
326 unsigned int data_len,
327 unsigned int data_offset,
331 unsigned int index = sgl_data->num_of_buffers;
333 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
334 index, nents, sgl, data_len, is_last_table);
335 sgl_data->nents[index] = nents;
336 sgl_data->entry[index].sgl = sgl;
337 sgl_data->offset[index] = data_offset;
338 sgl_data->total_data_len[index] = data_len;
339 sgl_data->type[index] = DMA_SGL_TYPE;
340 sgl_data->is_last[index] = is_last_table;
341 sgl_data->mlli_nents[index] = mlli_nents;
342 if (sgl_data->mlli_nents[index] != NULL)
343 *sgl_data->mlli_nents[index] = 0;
344 sgl_data->num_of_buffers++;
348 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
349 enum dma_data_direction direction)
352 struct scatterlist *l_sg = sg;
353 for (i = 0; i < nents; i++) {
357 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
358 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
361 l_sg = sg_next(l_sg);
366 /* Restore mapped parts */
367 for (j = 0; j < i; j++) {
371 dma_unmap_sg(dev,sg,1,direction);
377 static int ssi_buffer_mgr_map_scatterlist(
378 struct device *dev, struct scatterlist *sg,
379 unsigned int nbytes, int direction,
380 u32 *nents, u32 max_sg_nents,
381 u32 *lbytes, u32 *mapped_nents)
383 bool is_chained = false;
385 if (sg_is_last(sg)) {
386 /* One entry only case -set to DLLI */
387 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
388 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
391 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
392 "page=%p addr=%pK offset=%u "
394 (unsigned long long)sg_dma_address(sg),
397 sg->offset, sg->length);
401 } else { /*sg_is_last*/
402 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
404 if (*nents > max_sg_nents) {
406 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
407 *nents, max_sg_nents);
411 /* In case of mmu the number of mapped nents might
412 * be changed from the original sgl nents
414 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
415 if (unlikely(*mapped_nents == 0)){
417 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
421 /*In this case the driver maps entry by entry so it
422 * must have the same nents before and after map
424 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
428 if (unlikely(*mapped_nents != *nents)){
429 *nents = *mapped_nents;
430 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
440 ssi_aead_handle_config_buf(struct device *dev,
441 struct aead_req_ctx *areq_ctx,
443 struct buffer_array *sg_data,
444 unsigned int assoclen)
446 SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
447 /* create sg for the current buffer */
448 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
449 if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
450 DMA_TO_DEVICE) != 1)) {
451 SSI_LOG_ERR("dma_map_sg() "
452 "config buffer failed\n");
455 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
457 "offset=%u length=%u\n",
458 (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
459 sg_page(&areq_ctx->ccm_adata_sg),
460 sg_virt(&areq_ctx->ccm_adata_sg),
461 areq_ctx->ccm_adata_sg.offset,
462 areq_ctx->ccm_adata_sg.length);
463 /* prepare for case of MLLI */
465 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
466 &areq_ctx->ccm_adata_sg,
468 areq_ctx->ccm_hdr_size), 0,
475 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
476 struct ahash_req_ctx *areq_ctx,
479 struct buffer_array *sg_data)
481 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
482 /* create sg for the current buffer */
483 sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
484 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
485 DMA_TO_DEVICE) != 1)) {
486 SSI_LOG_ERR("dma_map_sg() "
487 "src buffer failed\n");
490 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
492 "offset=%u length=%u\n",
493 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
494 sg_page(areq_ctx->buff_sg),
495 sg_virt(areq_ctx->buff_sg),
496 areq_ctx->buff_sg->offset,
497 areq_ctx->buff_sg->length);
498 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
499 areq_ctx->curr_sg = areq_ctx->buff_sg;
500 areq_ctx->in_nents = 0;
501 /* prepare for case of MLLI */
502 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
503 curr_buff_cnt, 0, false, NULL);
507 void ssi_buffer_mgr_unmap_blkcipher_request(
511 struct scatterlist *src,
512 struct scatterlist *dst)
514 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
516 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
517 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
518 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
520 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
522 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
526 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
527 dma_pool_free(req_ctx->mlli_params.curr_pool,
528 req_ctx->mlli_params.mlli_virt_addr,
529 req_ctx->mlli_params.mlli_dma_addr);
532 dma_unmap_sg(dev, src, req_ctx->in_nents,
534 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
538 dma_unmap_sg(dev, dst, req_ctx->out_nents,
540 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
545 int ssi_buffer_mgr_map_blkcipher_request(
546 struct ssi_drvdata *drvdata,
551 struct scatterlist *src,
552 struct scatterlist *dst)
554 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
555 struct mlli_params *mlli_params = &req_ctx->mlli_params;
556 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
557 struct device *dev = &drvdata->plat_dev->dev;
558 struct buffer_array sg_data;
561 u32 mapped_nents = 0;
563 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
564 mlli_params->curr_pool = NULL;
565 sg_data.num_of_buffers = 0;
568 if (likely(ivsize != 0) ) {
569 dump_byte_array("iv", (u8 *)info, ivsize);
570 req_ctx->gen_ctx.iv_dma_addr =
571 dma_map_single(dev, (void *)info,
573 req_ctx->is_giv ? DMA_BIDIRECTIONAL:
575 if (unlikely(dma_mapping_error(dev,
576 req_ctx->gen_ctx.iv_dma_addr))) {
577 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
578 "for DMA failed\n", ivsize, info);
581 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
583 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
585 req_ctx->gen_ctx.iv_dma_addr = 0;
587 /* Map the src SGL */
588 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
589 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
590 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
591 if (unlikely(rc != 0)) {
593 goto ablkcipher_exit;
595 if (mapped_nents > 1)
596 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
598 if (unlikely(src == dst)) {
599 /* Handle inplace operation */
600 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
601 req_ctx->out_nents = 0;
602 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
603 req_ctx->in_nents, src,
604 nbytes, 0, true, &req_ctx->in_mlli_nents);
608 if (unlikely(ssi_buffer_mgr_map_scatterlist(
610 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
611 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
614 goto ablkcipher_exit;
616 if (mapped_nents > 1)
617 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
619 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
620 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
621 req_ctx->in_nents, src,
623 &req_ctx->in_mlli_nents);
624 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
625 req_ctx->out_nents, dst,
627 &req_ctx->out_mlli_nents);
631 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
632 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
633 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
634 if (unlikely(rc!= 0))
635 goto ablkcipher_exit;
639 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
640 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
645 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
649 void ssi_buffer_mgr_unmap_aead_request(
650 struct device *dev, struct aead_request *req)
652 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
653 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
654 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
657 u32 size_to_unmap = 0;
659 if (areq_ctx->mac_buf_dma_addr != 0) {
660 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
661 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
664 #if SSI_CC_HAS_AES_GCM
665 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
666 if (areq_ctx->hkey_dma_addr != 0) {
667 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
668 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
671 if (areq_ctx->gcm_block_len_dma_addr != 0) {
672 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
673 AES_BLOCK_SIZE, DMA_TO_DEVICE);
676 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
677 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
678 AES_BLOCK_SIZE, DMA_TO_DEVICE);
681 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
682 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
683 AES_BLOCK_SIZE, DMA_TO_DEVICE);
688 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
689 if (areq_ctx->ccm_iv0_dma_addr != 0) {
690 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
691 AES_BLOCK_SIZE, DMA_TO_DEVICE);
694 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
696 if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
697 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
698 hw_iv_size, DMA_BIDIRECTIONAL);
701 /*In case a pool was set, a table was
702 *allocated and should be released
704 if (areq_ctx->mlli_params.curr_pool != NULL) {
705 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
706 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
707 areq_ctx->mlli_params.mlli_virt_addr);
708 dma_pool_free(areq_ctx->mlli_params.curr_pool,
709 areq_ctx->mlli_params.mlli_virt_addr,
710 areq_ctx->mlli_params.mlli_dma_addr);
713 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
714 size_to_unmap = req->assoclen+req->cryptlen;
715 if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
716 size_to_unmap += areq_ctx->req_authsize;
718 if (areq_ctx->is_gcm4543)
719 size_to_unmap += crypto_aead_ivsize(tfm);
721 dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
722 if (unlikely(req->src != req->dst)) {
723 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
725 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
729 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
730 likely(req->src == req->dst))
732 u32 size_to_skip = req->assoclen;
733 if (areq_ctx->is_gcm4543) {
734 size_to_skip += crypto_aead_ivsize(tfm);
736 /* copy mac to a temporary location to deal with possible
737 * data memory overriding that caused by cache coherence problem.
739 ssi_buffer_mgr_copy_scatterlist_portion(
740 areq_ctx->backup_mac, req->src,
741 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
742 size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
747 static inline int ssi_buffer_mgr_get_aead_icv_nents(
748 struct scatterlist *sgl,
749 unsigned int sgl_nents,
750 unsigned int authsize,
751 u32 last_entry_data_size,
752 bool *is_icv_fragmented)
754 unsigned int icv_max_size = 0;
755 unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
759 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
760 *is_icv_fragmented = false;
764 for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
772 icv_max_size = sgl->length;
775 if (last_entry_data_size > authsize) {
776 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
777 *is_icv_fragmented = false;
778 } else if (last_entry_data_size == authsize) {
779 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
780 *is_icv_fragmented = false;
781 } else if (icv_max_size > icv_required_size) {
783 *is_icv_fragmented = true;
784 } else if (icv_max_size == icv_required_size) {
786 *is_icv_fragmented = true;
788 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
789 MAX_ICV_NENTS_SUPPORTED);
790 nents = -1; /*unsupported*/
792 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
793 (*is_icv_fragmented ? "true" : "false"), nents);
798 static inline int ssi_buffer_mgr_aead_chain_iv(
799 struct ssi_drvdata *drvdata,
800 struct aead_request *req,
801 struct buffer_array *sg_data,
802 bool is_last, bool do_chain)
804 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
805 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
806 struct device *dev = &drvdata->plat_dev->dev;
809 if (unlikely(req->iv == NULL)) {
810 areq_ctx->gen_ctx.iv_dma_addr = 0;
814 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
815 hw_iv_size, DMA_BIDIRECTIONAL);
816 if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
817 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
818 hw_iv_size, req->iv);
823 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
825 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
826 if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
827 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
828 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
829 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
830 /* Chain to given list */
831 ssi_buffer_mgr_add_buffer_entry(
832 sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
833 iv_size_to_authenc, is_last,
834 &areq_ctx->assoc.mlli_nents);
835 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
842 static inline int ssi_buffer_mgr_aead_chain_assoc(
843 struct ssi_drvdata *drvdata,
844 struct aead_request *req,
845 struct buffer_array *sg_data,
846 bool is_last, bool do_chain)
848 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
850 u32 mapped_nents = 0;
851 struct scatterlist *current_sg = req->src;
852 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
853 unsigned int sg_index = 0;
854 u32 size_of_assoc = req->assoclen;
856 if (areq_ctx->is_gcm4543) {
857 size_of_assoc += crypto_aead_ivsize(tfm);
860 if (sg_data == NULL) {
862 goto chain_assoc_exit;
865 if (unlikely(req->assoclen == 0)) {
866 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
867 areq_ctx->assoc.nents = 0;
868 areq_ctx->assoc.mlli_nents = 0;
869 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
870 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
871 areq_ctx->assoc.nents);
872 goto chain_assoc_exit;
875 //iterate over the sgl to see how many entries are for associated data
876 //it is assumed that if we reach here , the sgl is already mapped
877 sg_index = current_sg->length;
878 if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
882 while (sg_index <= size_of_assoc) {
883 current_sg = sg_next(current_sg);
884 //if have reached the end of the sgl, then this is unexpected
885 if (current_sg == NULL) {
886 SSI_LOG_ERR("reached end of sg list. unexpected \n");
889 sg_index += current_sg->length;
893 if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
894 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
895 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
898 areq_ctx->assoc.nents = mapped_nents;
900 /* in CCM case we have additional entry for
901 * ccm header configurations
903 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
904 if (unlikely((mapped_nents + 1) >
905 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
907 SSI_LOG_ERR("CCM case.Too many fragments. "
908 "Current %d max %d\n",
909 (areq_ctx->assoc.nents + 1),
910 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
912 goto chain_assoc_exit;
916 if (likely(mapped_nents == 1) &&
917 (areq_ctx->ccm_hdr_size == ccm_header_size_null))
918 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
920 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
922 if (unlikely((do_chain == true) ||
923 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
925 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
926 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
927 areq_ctx->assoc.nents);
928 ssi_buffer_mgr_add_scatterlist_entry(
929 sg_data, areq_ctx->assoc.nents,
930 req->src, req->assoclen, 0, is_last,
931 &areq_ctx->assoc.mlli_nents);
932 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
939 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
940 struct aead_request *req,
941 u32 *src_last_bytes, u32 *dst_last_bytes)
943 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
944 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
945 unsigned int authsize = areq_ctx->req_authsize;
947 areq_ctx->is_icv_fragmented = false;
948 if (likely(req->src == req->dst)) {
950 areq_ctx->icv_dma_addr = sg_dma_address(
952 (*src_last_bytes - authsize);
953 areq_ctx->icv_virt_addr = sg_virt(
955 (*src_last_bytes - authsize);
956 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
957 /*NON-INPLACE and DECRYPT*/
958 areq_ctx->icv_dma_addr = sg_dma_address(
960 (*src_last_bytes - authsize);
961 areq_ctx->icv_virt_addr = sg_virt(
963 (*src_last_bytes - authsize);
965 /*NON-INPLACE and ENCRYPT*/
966 areq_ctx->icv_dma_addr = sg_dma_address(
968 (*dst_last_bytes - authsize);
969 areq_ctx->icv_virt_addr = sg_virt(
971 (*dst_last_bytes - authsize);
975 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
976 struct ssi_drvdata *drvdata,
977 struct aead_request *req,
978 struct buffer_array *sg_data,
979 u32 *src_last_bytes, u32 *dst_last_bytes,
982 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
983 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
984 unsigned int authsize = areq_ctx->req_authsize;
985 int rc = 0, icv_nents;
986 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
988 if (likely(req->src == req->dst)) {
990 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
991 areq_ctx->src.nents, areq_ctx->srcSgl,
992 areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
993 &areq_ctx->src.mlli_nents);
995 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
996 areq_ctx->src.nents, authsize, *src_last_bytes,
997 &areq_ctx->is_icv_fragmented);
998 if (unlikely(icv_nents < 0)) {
1000 goto prepare_data_mlli_exit;
1003 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1004 /* Backup happens only when ICV is fragmented, ICV
1005 * verification is made by CPU compare in order to simplify
1006 * MAC verification upon request completion
1008 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1010 /* In ACP platform we already copying ICV
1011 * for any INPLACE-DECRYPT operation, hence
1012 * we must neglect this code.
1014 u32 size_to_skip = req->assoclen;
1015 if (areq_ctx->is_gcm4543) {
1016 size_to_skip += crypto_aead_ivsize(tfm);
1018 ssi_buffer_mgr_copy_scatterlist_portion(
1019 areq_ctx->backup_mac, req->src,
1020 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1021 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1023 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1025 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1026 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1028 } else { /* Contig. ICV */
1029 /*Should hanlde if the sg is not contig.*/
1030 areq_ctx->icv_dma_addr = sg_dma_address(
1031 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1032 (*src_last_bytes - authsize);
1033 areq_ctx->icv_virt_addr = sg_virt(
1034 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1035 (*src_last_bytes - authsize);
1038 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1039 /*NON-INPLACE and DECRYPT*/
1040 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1041 areq_ctx->src.nents, areq_ctx->srcSgl,
1042 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1043 &areq_ctx->src.mlli_nents);
1044 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1045 areq_ctx->dst.nents, areq_ctx->dstSgl,
1046 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1047 &areq_ctx->dst.mlli_nents);
1049 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1050 areq_ctx->src.nents, authsize, *src_last_bytes,
1051 &areq_ctx->is_icv_fragmented);
1052 if (unlikely(icv_nents < 0)) {
1054 goto prepare_data_mlli_exit;
1057 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1058 /* Backup happens only when ICV is fragmented, ICV
1059 * verification is made by CPU compare in order to simplify
1060 * MAC verification upon request completion
1062 u32 size_to_skip = req->assoclen;
1063 if (areq_ctx->is_gcm4543) {
1064 size_to_skip += crypto_aead_ivsize(tfm);
1066 ssi_buffer_mgr_copy_scatterlist_portion(
1067 areq_ctx->backup_mac, req->src,
1068 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1069 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1070 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1071 } else { /* Contig. ICV */
1072 /*Should hanlde if the sg is not contig.*/
1073 areq_ctx->icv_dma_addr = sg_dma_address(
1074 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1075 (*src_last_bytes - authsize);
1076 areq_ctx->icv_virt_addr = sg_virt(
1077 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1078 (*src_last_bytes - authsize);
1082 /*NON-INPLACE and ENCRYPT*/
1083 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1084 areq_ctx->dst.nents, areq_ctx->dstSgl,
1085 areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1086 &areq_ctx->dst.mlli_nents);
1087 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1088 areq_ctx->src.nents, areq_ctx->srcSgl,
1089 areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1090 &areq_ctx->src.mlli_nents);
1092 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1093 areq_ctx->dst.nents, authsize, *dst_last_bytes,
1094 &areq_ctx->is_icv_fragmented);
1095 if (unlikely(icv_nents < 0)) {
1097 goto prepare_data_mlli_exit;
1100 if (likely(areq_ctx->is_icv_fragmented == false)) {
1102 areq_ctx->icv_dma_addr = sg_dma_address(
1103 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1104 (*dst_last_bytes - authsize);
1105 areq_ctx->icv_virt_addr = sg_virt(
1106 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1107 (*dst_last_bytes - authsize);
1109 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1110 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1114 prepare_data_mlli_exit:
1118 static inline int ssi_buffer_mgr_aead_chain_data(
1119 struct ssi_drvdata *drvdata,
1120 struct aead_request *req,
1121 struct buffer_array *sg_data,
1122 bool is_last_table, bool do_chain)
1124 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1125 struct device *dev = &drvdata->plat_dev->dev;
1126 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1127 unsigned int authsize = areq_ctx->req_authsize;
1128 int src_last_bytes = 0, dst_last_bytes = 0;
1130 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1132 unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
1133 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1135 bool chained = false;
1136 bool is_gcm4543 = areq_ctx->is_gcm4543;
1137 u32 size_to_skip = req->assoclen;
1139 size_to_skip += crypto_aead_ivsize(tfm);
1141 offset = size_to_skip;
1143 if (sg_data == NULL) {
1145 goto chain_data_exit;
1147 areq_ctx->srcSgl = req->src;
1148 areq_ctx->dstSgl = req->dst;
1151 size_for_map += crypto_aead_ivsize(tfm);
1154 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
1155 src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
1156 sg_index = areq_ctx->srcSgl->length;
1157 //check where the data starts
1158 while (sg_index <= size_to_skip) {
1159 offset -= areq_ctx->srcSgl->length;
1160 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1161 //if have reached the end of the sgl, then this is unexpected
1162 if (areq_ctx->srcSgl == NULL) {
1163 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1166 sg_index += areq_ctx->srcSgl->length;
1169 if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1171 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1172 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1176 areq_ctx->src.nents = src_mapped_nents;
1178 areq_ctx->srcOffset = offset;
1180 if (req->src != req->dst) {
1181 size_for_map = req->assoclen +req->cryptlen;
1182 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1184 size_for_map += crypto_aead_ivsize(tfm);
1187 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1188 DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1189 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1191 if (unlikely(rc != 0)) {
1193 goto chain_data_exit;
1197 dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
1198 sg_index = areq_ctx->dstSgl->length;
1199 offset = size_to_skip;
1201 //check where the data starts
1202 while (sg_index <= size_to_skip) {
1204 offset -= areq_ctx->dstSgl->length;
1205 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1206 //if have reached the end of the sgl, then this is unexpected
1207 if (areq_ctx->dstSgl == NULL) {
1208 SSI_LOG_ERR("reached end of sg list. unexpected \n");
1211 sg_index += areq_ctx->dstSgl->length;
1214 if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1216 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1217 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1220 areq_ctx->dst.nents = dst_mapped_nents;
1221 areq_ctx->dstOffset = offset;
1222 if ((src_mapped_nents > 1) ||
1223 (dst_mapped_nents > 1) ||
1224 (do_chain == true)) {
1225 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1226 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1227 &src_last_bytes, &dst_last_bytes, is_last_table);
1229 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1230 ssi_buffer_mgr_prepare_aead_data_dlli(
1231 req, &src_last_bytes, &dst_last_bytes);
1238 static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
1239 struct aead_request *req)
1241 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1242 u32 curr_mlli_size = 0;
1244 if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1245 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1246 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1247 LLI_ENTRY_BYTE_SIZE;
1250 if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1251 /*Inplace case dst nents equal to src nents*/
1252 if (req->src == req->dst) {
1253 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1254 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1256 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1257 if (areq_ctx->is_single_pass == false)
1258 areq_ctx->assoc.mlli_nents +=
1259 areq_ctx->src.mlli_nents;
1261 if (areq_ctx->gen_ctx.op_type ==
1262 DRV_CRYPTO_DIRECTION_DECRYPT) {
1263 areq_ctx->src.sram_addr =
1264 drvdata->mlli_sram_addr +
1266 areq_ctx->dst.sram_addr =
1267 areq_ctx->src.sram_addr +
1268 areq_ctx->src.mlli_nents *
1269 LLI_ENTRY_BYTE_SIZE;
1270 if (areq_ctx->is_single_pass == false)
1271 areq_ctx->assoc.mlli_nents +=
1272 areq_ctx->src.mlli_nents;
1274 areq_ctx->dst.sram_addr =
1275 drvdata->mlli_sram_addr +
1277 areq_ctx->src.sram_addr =
1278 areq_ctx->dst.sram_addr +
1279 areq_ctx->dst.mlli_nents *
1280 LLI_ENTRY_BYTE_SIZE;
1281 if (areq_ctx->is_single_pass == false)
1282 areq_ctx->assoc.mlli_nents +=
1283 areq_ctx->dst.mlli_nents;
1289 int ssi_buffer_mgr_map_aead_request(
1290 struct ssi_drvdata *drvdata, struct aead_request *req)
1292 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1293 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1294 struct device *dev = &drvdata->plat_dev->dev;
1295 struct buffer_array sg_data;
1296 unsigned int authsize = areq_ctx->req_authsize;
1297 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1299 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1300 bool is_gcm4543 = areq_ctx->is_gcm4543;
1302 u32 mapped_nents = 0;
1303 u32 dummy = 0; /*used for the assoc data fragments */
1304 u32 size_to_map = 0;
1306 mlli_params->curr_pool = NULL;
1307 sg_data.num_of_buffers = 0;
1310 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1311 likely(req->src == req->dst))
1313 u32 size_to_skip = req->assoclen;
1315 size_to_skip += crypto_aead_ivsize(tfm);
1317 /* copy mac to a temporary location to deal with possible
1318 * data memory overriding that caused by cache coherence problem.
1320 ssi_buffer_mgr_copy_scatterlist_portion(
1321 areq_ctx->backup_mac, req->src,
1322 size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1323 size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1327 /* cacluate the size for cipher remove ICV in decrypt*/
1328 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1329 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1331 (req->cryptlen - authsize);
1333 areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1334 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1335 if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1336 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1337 MAX_MAC_SIZE, areq_ctx->mac_buf);
1339 goto aead_map_failure;
1342 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1343 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1344 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1345 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1347 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1348 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1349 "for DMA failed\n", AES_BLOCK_SIZE,
1350 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1351 areq_ctx->ccm_iv0_dma_addr = 0;
1353 goto aead_map_failure;
1355 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1356 areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1358 goto aead_map_failure;
1362 #if SSI_CC_HAS_AES_GCM
1363 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1364 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1365 areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1366 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1367 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1368 AES_BLOCK_SIZE, areq_ctx->hkey);
1370 goto aead_map_failure;
1373 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1374 &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1375 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1376 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1377 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1379 goto aead_map_failure;
1382 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1383 areq_ctx->gcm_iv_inc1,
1384 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1386 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1387 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1388 "for DMA failed\n", AES_BLOCK_SIZE,
1389 (areq_ctx->gcm_iv_inc1));
1390 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1392 goto aead_map_failure;
1395 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1396 areq_ctx->gcm_iv_inc2,
1397 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1399 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1400 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1401 "for DMA failed\n", AES_BLOCK_SIZE,
1402 (areq_ctx->gcm_iv_inc2));
1403 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1405 goto aead_map_failure;
1408 #endif /*SSI_CC_HAS_AES_GCM*/
1410 size_to_map = req->cryptlen + req->assoclen;
1411 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1412 size_to_map += authsize;
1415 size_to_map += crypto_aead_ivsize(tfm);
1416 rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1417 size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1418 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1419 if (unlikely(rc != 0)) {
1421 goto aead_map_failure;
1424 if (likely(areq_ctx->is_single_pass == true)) {
1426 * Create MLLI table for:
1429 * Note: IV is contg. buffer (not an SGL)
1431 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1432 if (unlikely(rc != 0))
1433 goto aead_map_failure;
1434 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1435 if (unlikely(rc != 0))
1436 goto aead_map_failure;
1437 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1438 if (unlikely(rc != 0))
1439 goto aead_map_failure;
1440 } else { /* DOUBLE-PASS flow */
1442 * Prepare MLLI table(s) in this order:
1444 * If ENCRYPT/DECRYPT (inplace):
1445 * (1) MLLI table for assoc
1446 * (2) IV entry (chained right after end of assoc)
1447 * (3) MLLI for src/dst (inplace operation)
1449 * If ENCRYPT (non-inplace)
1450 * (1) MLLI table for assoc
1451 * (2) IV entry (chained right after end of assoc)
1455 * If DECRYPT (non-inplace)
1456 * (1) MLLI table for assoc
1457 * (2) IV entry (chained right after end of assoc)
1461 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1462 if (unlikely(rc != 0))
1463 goto aead_map_failure;
1464 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1465 if (unlikely(rc != 0))
1466 goto aead_map_failure;
1467 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1468 if (unlikely(rc != 0))
1469 goto aead_map_failure;
1472 /* Mlli support -start building the MLLI according to the above results */
1474 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1475 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1477 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1478 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1479 if (unlikely(rc != 0)) {
1480 goto aead_map_failure;
1483 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1484 SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
1485 SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
1486 SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
1491 ssi_buffer_mgr_unmap_aead_request(dev, req);
1495 int ssi_buffer_mgr_map_hash_request_final(
1496 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1498 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1499 struct device *dev = &drvdata->plat_dev->dev;
1500 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1502 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1503 &areq_ctx->buff0_cnt;
1504 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1505 struct buffer_array sg_data;
1506 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1508 u32 mapped_nents = 0;
1510 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1511 "curr_buff_cnt=0x%X nbytes = 0x%X "
1512 "src=%pK curr_index=%u\n",
1513 curr_buff, *curr_buff_cnt, nbytes,
1514 src, areq_ctx->buff_index);
1515 /* Init the type of the dma buffer */
1516 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1517 mlli_params->curr_pool = NULL;
1518 sg_data.num_of_buffers = 0;
1519 areq_ctx->in_nents = 0;
1521 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1526 /*TODO: copy data in case that buffer is enough for operation */
1527 /* map the previous buffer */
1528 if (*curr_buff_cnt != 0 ) {
1529 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1530 *curr_buff_cnt, &sg_data) != 0) {
1535 if (src && (nbytes > 0) && do_update) {
1536 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1539 &areq_ctx->in_nents,
1540 LLI_MAX_NUM_OF_DATA_ENTRIES,
1541 &dummy, &mapped_nents))){
1542 goto unmap_curr_buff;
1544 if ( src && (mapped_nents == 1)
1545 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1546 memcpy(areq_ctx->buff_sg,src,
1547 sizeof(struct scatterlist));
1548 areq_ctx->buff_sg->length = nbytes;
1549 areq_ctx->curr_sg = areq_ctx->buff_sg;
1550 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1552 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1558 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1559 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1560 /* add the src data to the sg_data */
1561 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1565 true, &areq_ctx->mlli_nents);
1566 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1567 mlli_params) != 0)) {
1568 goto fail_unmap_din;
1571 /* change the buffer index for the unmap function */
1572 areq_ctx->buff_index = (areq_ctx->buff_index^1);
1573 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1574 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1578 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1581 if (*curr_buff_cnt != 0 ) {
1582 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1587 int ssi_buffer_mgr_map_hash_request_update(
1588 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1590 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1591 struct device *dev = &drvdata->plat_dev->dev;
1592 u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1594 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1595 &areq_ctx->buff0_cnt;
1596 u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1598 u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1599 &areq_ctx->buff1_cnt;
1600 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1601 unsigned int update_data_len;
1602 u32 total_in_len = nbytes + *curr_buff_cnt;
1603 struct buffer_array sg_data;
1604 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1605 unsigned int swap_index = 0;
1607 u32 mapped_nents = 0;
1609 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1610 "curr_buff_cnt=0x%X nbytes=0x%X "
1611 "src=%pK curr_index=%u \n",
1612 curr_buff, *curr_buff_cnt, nbytes,
1613 src, areq_ctx->buff_index);
1614 /* Init the type of the dma buffer */
1615 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1616 mlli_params->curr_pool = NULL;
1617 areq_ctx->curr_sg = NULL;
1618 sg_data.num_of_buffers = 0;
1619 areq_ctx->in_nents = 0;
1621 if (unlikely(total_in_len < block_size)) {
1622 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1623 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1624 curr_buff, *curr_buff_cnt,
1625 &curr_buff[*curr_buff_cnt]);
1626 areq_ctx->in_nents =
1627 ssi_buffer_mgr_get_sgl_nents(src,
1630 sg_copy_to_buffer(src, areq_ctx->in_nents,
1631 &curr_buff[*curr_buff_cnt], nbytes);
1632 *curr_buff_cnt += nbytes;
1636 /* Calculate the residue size*/
1637 *next_buff_cnt = total_in_len & (block_size - 1);
1638 /* update data len */
1639 update_data_len = total_in_len - *next_buff_cnt;
1641 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1642 "update_data_len=0x%X\n",
1643 *next_buff_cnt, update_data_len);
1645 /* Copy the new residue to next buffer */
1646 if (*next_buff_cnt != 0) {
1647 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1648 " residue %u \n", next_buff,
1649 (update_data_len - *curr_buff_cnt),
1651 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1652 (update_data_len -*curr_buff_cnt),
1653 nbytes,SSI_SG_TO_BUF);
1654 /* change the buffer index for next operation */
1658 if (*curr_buff_cnt != 0) {
1659 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1660 *curr_buff_cnt, &sg_data) != 0) {
1663 /* change the buffer index for next operation */
1667 if ( update_data_len > *curr_buff_cnt ) {
1668 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1669 (update_data_len -*curr_buff_cnt),
1671 &areq_ctx->in_nents,
1672 LLI_MAX_NUM_OF_DATA_ENTRIES,
1673 &dummy, &mapped_nents))){
1674 goto unmap_curr_buff;
1676 if ( (mapped_nents == 1)
1677 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1678 /* only one entry in the SG and no previous data */
1679 memcpy(areq_ctx->buff_sg,src,
1680 sizeof(struct scatterlist));
1681 areq_ctx->buff_sg->length = update_data_len;
1682 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1683 areq_ctx->curr_sg = areq_ctx->buff_sg;
1685 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1689 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1690 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1691 /* add the src data to the sg_data */
1692 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1695 (update_data_len - *curr_buff_cnt), 0,
1696 true, &areq_ctx->mlli_nents);
1697 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1698 mlli_params) != 0)) {
1699 goto fail_unmap_din;
1703 areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
1708 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1711 if (*curr_buff_cnt != 0 ) {
1712 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1717 void ssi_buffer_mgr_unmap_hash_request(
1718 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1720 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1721 u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1722 &areq_ctx->buff1_cnt;
1724 /*In case a pool was set, a table was
1725 *allocated and should be released
1727 if (areq_ctx->mlli_params.curr_pool != NULL) {
1728 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1729 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1730 areq_ctx->mlli_params.mlli_virt_addr);
1731 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1732 areq_ctx->mlli_params.mlli_virt_addr,
1733 areq_ctx->mlli_params.mlli_dma_addr);
1736 if ((src) && likely(areq_ctx->in_nents != 0)) {
1737 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1739 (unsigned long long)sg_dma_address(src),
1741 dma_unmap_sg(dev, src,
1742 areq_ctx->in_nents, DMA_TO_DEVICE);
1745 if (*prev_len != 0) {
1746 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1747 "dma=0x%llX len 0x%X\n",
1748 sg_virt(areq_ctx->buff_sg),
1749 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1750 sg_dma_len(areq_ctx->buff_sg));
1751 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1753 /* clean the previous data length for update operation */
1756 areq_ctx->buff_index ^= 1;
1761 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1763 struct buff_mgr_handle *buff_mgr_handle;
1764 struct device *dev = &drvdata->plat_dev->dev;
1766 buff_mgr_handle = (struct buff_mgr_handle *)
1767 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1768 if (buff_mgr_handle == NULL)
1771 drvdata->buff_mgr_handle = buff_mgr_handle;
1773 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1774 "dx_single_mlli_tables", dev,
1775 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1776 LLI_ENTRY_BYTE_SIZE,
1777 MLLI_TABLE_MIN_ALIGNMENT, 0);
1779 if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1785 ssi_buffer_mgr_fini(drvdata);
1789 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1791 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1793 if (buff_mgr_handle != NULL) {
1794 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1795 kfree(drvdata->buff_mgr_handle);
1796 drvdata->buff_mgr_handle = NULL;