]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/ccree/ssi_buffer_mgr.c
staging: ccree: move M/LLI defines to header file
[karo-tx-linux.git] / drivers / staging / ccree / ssi_buffer_mgr.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
33 #include "ssi_hash.h"
34 #include "ssi_aead.h"
35
36 #ifdef CC_DEBUG
37 #define DUMP_SGL(sg) \
38         while (sg) { \
39                 SSI_LOG_DEBUG("page=%p offset=%u length=%u (dma_len=%u) " \
40                              "dma_addr=%08x\n", sg_page(sg), (sg)->offset, \
41                         (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
42                 (sg) = sg_next(sg); \
43         }
44 #define DUMP_MLLI_TABLE(mlli_p, nents) \
45         do { \
46                 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
47                 while((nents)--) { \
48                         SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
49                              (mlli_p)[LLI_WORD0_OFFSET], \
50                              (mlli_p)[LLI_WORD1_OFFSET]); \
51                         (mlli_p) += LLI_ENTRY_WORD_SIZE; \
52                 } \
53         } while (0)
54 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
55         ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
56         ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
57         ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
58 #else
59 #define DX_BUFFER_MGR_DUMP_SGL(sg)
60 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
61 #define GET_DMA_BUFFER_TYPE(buff_type)
62 #endif
63
64
65 enum dma_buffer_type {
66         DMA_NULL_TYPE = -1,
67         DMA_SGL_TYPE = 1,
68         DMA_BUFF_TYPE = 2,
69 };
70
71 struct buff_mgr_handle {
72         struct dma_pool *mlli_buffs_pool;
73 };
74
75 union buffer_array_entry {
76         struct scatterlist *sgl;
77         dma_addr_t buffer_dma;
78 };
79
80 struct buffer_array {
81         unsigned int num_of_buffers;
82         union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
83         unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
84         int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
85         int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
86         enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
87         bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
88         u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
89 };
90
91 /**
92  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
93  *
94  * @sg_list: SG list
95  * @nbytes: [IN] Total SGL data bytes.
96  * @lbytes: [OUT] Returns the amount of bytes at the last entry
97  */
98 static unsigned int ssi_buffer_mgr_get_sgl_nents(
99         struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
100 {
101         unsigned int nents = 0;
102         while (nbytes != 0) {
103                 if (sg_is_chain(sg_list)) {
104                         SSI_LOG_ERR("Unexpected chanined entry "
105                                    "in sg (entry =0x%X) \n", nents);
106                         BUG();
107                 }
108                 if (sg_list->length != 0) {
109                         nents++;
110                         /* get the number of bytes in the last entry */
111                         *lbytes = nbytes;
112                         nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
113                         sg_list = sg_next(sg_list);
114                 } else {
115                         sg_list = (struct scatterlist *)sg_page(sg_list);
116                         if (is_chained != NULL) {
117                                 *is_chained = true;
118                         }
119                 }
120         }
121         SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
122         return nents;
123 }
124
125 /**
126  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
127  *
128  * @sgl:
129  */
130 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
131 {
132         struct scatterlist *current_sg = sgl;
133         int sg_index = 0;
134
135         while (sg_index <= data_len) {
136                 if (current_sg == NULL) {
137                         /* reached the end of the sgl --> just return back */
138                         return;
139                 }
140                 memset(sg_virt(current_sg), 0, current_sg->length);
141                 sg_index += current_sg->length;
142                 current_sg = sg_next(current_sg);
143         }
144 }
145
146 /**
147  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
148  * from to_skip to end, to dest and vice versa
149  *
150  * @dest:
151  * @sg:
152  * @to_skip:
153  * @end:
154  * @direct:
155  */
156 void ssi_buffer_mgr_copy_scatterlist_portion(
157         u8 *dest, struct scatterlist *sg,
158         u32 to_skip,  u32 end,
159         enum ssi_sg_cpy_direct direct)
160 {
161         u32 nents, lbytes;
162
163         nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
164         sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
165 }
166
167 static inline int ssi_buffer_mgr_render_buff_to_mlli(
168         dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
169         u32 **mlli_entry_pp)
170 {
171         u32 *mlli_entry_p = *mlli_entry_pp;
172         u32 new_nents;;
173
174         /* Verify there is no memory overflow*/
175         new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
176         if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
177                 return -ENOMEM;
178         }
179
180         /*handle buffer longer than 64 kbytes */
181         while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
182                 cc_lli_set_addr(mlli_entry_p, buff_dma);
183                 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
184                 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
185                            mlli_entry_p[LLI_WORD0_OFFSET],
186                            mlli_entry_p[LLI_WORD1_OFFSET]);
187                 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
188                 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
189                 mlli_entry_p = mlli_entry_p + 2;
190                 (*curr_nents)++;
191         }
192         /*Last entry */
193         cc_lli_set_addr(mlli_entry_p, buff_dma);
194         cc_lli_set_size(mlli_entry_p, buff_size);
195         SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
196                    mlli_entry_p[LLI_WORD0_OFFSET],
197                    mlli_entry_p[LLI_WORD1_OFFSET]);
198         mlli_entry_p = mlli_entry_p + 2;
199         *mlli_entry_pp = mlli_entry_p;
200         (*curr_nents)++;
201         return 0;
202 }
203
204
205 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
206         struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
207         u32 **mlli_entry_pp)
208 {
209         struct scatterlist *curr_sgl = sgl;
210         u32 *mlli_entry_p = *mlli_entry_pp;
211         s32 rc = 0;
212
213         for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
214               curr_sgl = sg_next(curr_sgl)) {
215                 u32 entry_data_len =
216                         (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
217                                 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
218                 sgl_data_len -= entry_data_len;
219                 rc = ssi_buffer_mgr_render_buff_to_mlli(
220                         sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
221                         &mlli_entry_p);
222                 if(rc != 0) {
223                         return rc;
224                 }
225                 sglOffset=0;
226         }
227         *mlli_entry_pp = mlli_entry_p;
228         return 0;
229 }
230
231 static int ssi_buffer_mgr_generate_mlli(
232         struct device *dev,
233         struct buffer_array *sg_data,
234         struct mlli_params *mlli_params)
235 {
236         u32 *mlli_p;
237         u32 total_nents = 0,prev_total_nents = 0;
238         int rc = 0, i;
239
240         SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
241
242         /* Allocate memory from the pointed pool */
243         mlli_params->mlli_virt_addr = dma_pool_alloc(
244                         mlli_params->curr_pool, GFP_KERNEL,
245                         &(mlli_params->mlli_dma_addr));
246         if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
247                 SSI_LOG_ERR("dma_pool_alloc() failed\n");
248                 rc =-ENOMEM;
249                 goto build_mlli_exit;
250         }
251         /* Point to start of MLLI */
252         mlli_p = (u32 *)mlli_params->mlli_virt_addr;
253         /* go over all SG's and link it to one MLLI table */
254         for (i = 0; i < sg_data->num_of_buffers; i++) {
255                 if (sg_data->type[i] == DMA_SGL_TYPE)
256                         rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
257                                 sg_data->entry[i].sgl,
258                                 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
259                                 &mlli_p);
260                 else /*DMA_BUFF_TYPE*/
261                         rc = ssi_buffer_mgr_render_buff_to_mlli(
262                                 sg_data->entry[i].buffer_dma,
263                                 sg_data->total_data_len[i], &total_nents,
264                                 &mlli_p);
265                 if(rc != 0) {
266                         return rc;
267                 }
268
269                 /* set last bit in the current table */
270                 if (sg_data->mlli_nents[i] != NULL) {
271                         /*Calculate the current MLLI table length for the
272                          *length field in the descriptor
273                          */
274                         *(sg_data->mlli_nents[i]) +=
275                                 (total_nents - prev_total_nents);
276                         prev_total_nents = total_nents;
277                 }
278         }
279
280         /* Set MLLI size for the bypass operation */
281         mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
282
283         SSI_LOG_DEBUG("MLLI params: "
284                      "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
285                    mlli_params->mlli_virt_addr,
286                    (unsigned long long)mlli_params->mlli_dma_addr,
287                    mlli_params->mlli_len);
288
289 build_mlli_exit:
290         return rc;
291 }
292
293 static inline void ssi_buffer_mgr_add_buffer_entry(
294         struct buffer_array *sgl_data,
295         dma_addr_t buffer_dma, unsigned int buffer_len,
296         bool is_last_entry, u32 *mlli_nents)
297 {
298         unsigned int index = sgl_data->num_of_buffers;
299
300         SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
301                      "buffer_len=0x%08X is_last=%d\n",
302                      index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
303         sgl_data->nents[index] = 1;
304         sgl_data->entry[index].buffer_dma = buffer_dma;
305         sgl_data->offset[index] = 0;
306         sgl_data->total_data_len[index] = buffer_len;
307         sgl_data->type[index] = DMA_BUFF_TYPE;
308         sgl_data->is_last[index] = is_last_entry;
309         sgl_data->mlli_nents[index] = mlli_nents;
310         if (sgl_data->mlli_nents[index] != NULL)
311                 *sgl_data->mlli_nents[index] = 0;
312         sgl_data->num_of_buffers++;
313 }
314
315 static inline void ssi_buffer_mgr_add_scatterlist_entry(
316         struct buffer_array *sgl_data,
317         unsigned int nents,
318         struct scatterlist *sgl,
319         unsigned int data_len,
320         unsigned int data_offset,
321         bool is_last_table,
322         u32 *mlli_nents)
323 {
324         unsigned int index = sgl_data->num_of_buffers;
325
326         SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
327                      index, nents, sgl, data_len, is_last_table);
328         sgl_data->nents[index] = nents;
329         sgl_data->entry[index].sgl = sgl;
330         sgl_data->offset[index] = data_offset;
331         sgl_data->total_data_len[index] = data_len;
332         sgl_data->type[index] = DMA_SGL_TYPE;
333         sgl_data->is_last[index] = is_last_table;
334         sgl_data->mlli_nents[index] = mlli_nents;
335         if (sgl_data->mlli_nents[index] != NULL)
336                 *sgl_data->mlli_nents[index] = 0;
337         sgl_data->num_of_buffers++;
338 }
339
340 static int
341 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
342                          enum dma_data_direction direction)
343 {
344         u32 i , j;
345         struct scatterlist *l_sg = sg;
346         for (i = 0; i < nents; i++) {
347                 if (l_sg == NULL) {
348                         break;
349                 }
350                 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
351                         SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
352                         goto err;
353                 }
354                 l_sg = sg_next(l_sg);
355         }
356         return nents;
357
358 err:
359         /* Restore mapped parts */
360         for (j = 0; j < i; j++) {
361                 if (sg == NULL) {
362                         break;
363                 }
364                 dma_unmap_sg(dev,sg,1,direction);
365                 sg = sg_next(sg);
366         }
367         return 0;
368 }
369
370 static int ssi_buffer_mgr_map_scatterlist(
371         struct device *dev, struct scatterlist *sg,
372         unsigned int nbytes, int direction,
373         u32 *nents, u32 max_sg_nents,
374         u32 *lbytes, u32 *mapped_nents)
375 {
376         bool is_chained = false;
377
378         if (sg_is_last(sg)) {
379                 /* One entry only case -set to DLLI */
380                 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
381                         SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
382                         return -ENOMEM;
383                 }
384                 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
385                              "page=%p addr=%pK offset=%u "
386                              "length=%u\n",
387                              (unsigned long long)sg_dma_address(sg),
388                              sg_page(sg),
389                              sg_virt(sg),
390                              sg->offset, sg->length);
391                 *lbytes = nbytes;
392                 *nents = 1;
393                 *mapped_nents = 1;
394         } else {  /*sg_is_last*/
395                 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
396                                                      &is_chained);
397                 if (*nents > max_sg_nents) {
398                         *nents = 0;
399                         SSI_LOG_ERR("Too many fragments. current %d max %d\n",
400                                    *nents, max_sg_nents);
401                         return -ENOMEM;
402                 }
403                 if (!is_chained) {
404                         /* In case of mmu the number of mapped nents might
405                          * be changed from the original sgl nents
406                          */
407                         *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
408                         if (unlikely(*mapped_nents == 0)){
409                                 *nents = 0;
410                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
411                                 return -ENOMEM;
412                         }
413                 } else {
414                         /*In this case the driver maps entry by entry so it
415                          * must have the same nents before and after map
416                          */
417                         *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
418                                                                  sg,
419                                                                  *nents,
420                                                                  direction);
421                         if (unlikely(*mapped_nents != *nents)){
422                                 *nents = *mapped_nents;
423                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
424                                 return -ENOMEM;
425                         }
426                 }
427         }
428
429         return 0;
430 }
431
432 static inline int
433 ssi_aead_handle_config_buf(struct device *dev,
434         struct aead_req_ctx *areq_ctx,
435         u8* config_data,
436         struct buffer_array *sg_data,
437         unsigned int assoclen)
438 {
439         SSI_LOG_DEBUG(" handle additional data config set to   DLLI \n");
440         /* create sg for the current buffer */
441         sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
442         if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
443                                 DMA_TO_DEVICE) != 1)) {
444                         SSI_LOG_ERR("dma_map_sg() "
445                            "config buffer failed\n");
446                         return -ENOMEM;
447         }
448         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
449                      "page=%p addr=%pK "
450                      "offset=%u length=%u\n",
451                      (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
452                      sg_page(&areq_ctx->ccm_adata_sg),
453                      sg_virt(&areq_ctx->ccm_adata_sg),
454                      areq_ctx->ccm_adata_sg.offset,
455                      areq_ctx->ccm_adata_sg.length);
456         /* prepare for case of MLLI */
457         if (assoclen > 0) {
458                 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
459                                                     &areq_ctx->ccm_adata_sg,
460                                                     (AES_BLOCK_SIZE +
461                                                     areq_ctx->ccm_hdr_size), 0,
462                                                     false, NULL);
463         }
464         return 0;
465 }
466
467
468 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
469                                            struct ahash_req_ctx *areq_ctx,
470                                            u8* curr_buff,
471                                            u32 curr_buff_cnt,
472                                            struct buffer_array *sg_data)
473 {
474         SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI \n", curr_buff_cnt);
475         /* create sg for the current buffer */
476         sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
477         if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
478                                 DMA_TO_DEVICE) != 1)) {
479                         SSI_LOG_ERR("dma_map_sg() "
480                            "src buffer failed\n");
481                         return -ENOMEM;
482         }
483         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
484                      "page=%p addr=%pK "
485                      "offset=%u length=%u\n",
486                      (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
487                      sg_page(areq_ctx->buff_sg),
488                      sg_virt(areq_ctx->buff_sg),
489                      areq_ctx->buff_sg->offset,
490                      areq_ctx->buff_sg->length);
491         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
492         areq_ctx->curr_sg = areq_ctx->buff_sg;
493         areq_ctx->in_nents = 0;
494         /* prepare for case of MLLI */
495         ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
496                                 curr_buff_cnt, 0, false, NULL);
497         return 0;
498 }
499
500 void ssi_buffer_mgr_unmap_blkcipher_request(
501         struct device *dev,
502         void *ctx,
503         unsigned int ivsize,
504         struct scatterlist *src,
505         struct scatterlist *dst)
506 {
507         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
508
509         if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
510                 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
511                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
512                         ivsize);
513                 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
514                                  ivsize,
515                                  req_ctx->is_giv ? DMA_BIDIRECTIONAL :
516                                  DMA_TO_DEVICE);
517         }
518         /* Release pool */
519         if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
520                 dma_pool_free(req_ctx->mlli_params.curr_pool,
521                               req_ctx->mlli_params.mlli_virt_addr,
522                               req_ctx->mlli_params.mlli_dma_addr);
523         }
524
525         dma_unmap_sg(dev, src, req_ctx->in_nents,
526                 DMA_BIDIRECTIONAL);
527         SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
528                      sg_virt(src));
529
530         if (src != dst) {
531                 dma_unmap_sg(dev, dst, req_ctx->out_nents,
532                         DMA_BIDIRECTIONAL);
533                 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
534                         sg_virt(dst));
535         }
536 }
537
538 int ssi_buffer_mgr_map_blkcipher_request(
539         struct ssi_drvdata *drvdata,
540         void *ctx,
541         unsigned int ivsize,
542         unsigned int nbytes,
543         void *info,
544         struct scatterlist *src,
545         struct scatterlist *dst)
546 {
547         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
548         struct mlli_params *mlli_params = &req_ctx->mlli_params;
549         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
550         struct device *dev = &drvdata->plat_dev->dev;
551         struct buffer_array sg_data;
552         u32 dummy = 0;
553         int rc = 0;
554         u32 mapped_nents = 0;
555
556         req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
557         mlli_params->curr_pool = NULL;
558         sg_data.num_of_buffers = 0;
559
560         /* Map IV buffer */
561         if (likely(ivsize != 0) ) {
562                 dump_byte_array("iv", (u8 *)info, ivsize);
563                 req_ctx->gen_ctx.iv_dma_addr =
564                         dma_map_single(dev, (void *)info,
565                                        ivsize,
566                                        req_ctx->is_giv ? DMA_BIDIRECTIONAL:
567                                        DMA_TO_DEVICE);
568                 if (unlikely(dma_mapping_error(dev,
569                                         req_ctx->gen_ctx.iv_dma_addr))) {
570                         SSI_LOG_ERR("Mapping iv %u B at va=%pK "
571                                    "for DMA failed\n", ivsize, info);
572                         return -ENOMEM;
573                 }
574                 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
575                         ivsize, info,
576                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
577         } else
578                 req_ctx->gen_ctx.iv_dma_addr = 0;
579
580         /* Map the src SGL */
581         rc = ssi_buffer_mgr_map_scatterlist(dev, src,
582                 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
583                 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
584         if (unlikely(rc != 0)) {
585                 rc = -ENOMEM;
586                 goto ablkcipher_exit;
587         }
588         if (mapped_nents > 1)
589                 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
590
591         if (unlikely(src == dst)) {
592                 /* Handle inplace operation */
593                 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
594                         req_ctx->out_nents = 0;
595                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
596                                 req_ctx->in_nents, src,
597                                 nbytes, 0, true, &req_ctx->in_mlli_nents);
598                 }
599         } else {
600                 /* Map the dst sg */
601                 if (unlikely(ssi_buffer_mgr_map_scatterlist(
602                         dev,dst, nbytes,
603                         DMA_BIDIRECTIONAL, &req_ctx->out_nents,
604                         LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
605                         &mapped_nents))){
606                         rc = -ENOMEM;
607                         goto ablkcipher_exit;
608                 }
609                 if (mapped_nents > 1)
610                         req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
611
612                 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
613                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
614                                 req_ctx->in_nents, src,
615                                 nbytes, 0, true,
616                                 &req_ctx->in_mlli_nents);
617                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
618                                 req_ctx->out_nents, dst,
619                                 nbytes, 0, true,
620                                 &req_ctx->out_mlli_nents);
621                 }
622         }
623
624         if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
625                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
626                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
627                 if (unlikely(rc!= 0))
628                         goto ablkcipher_exit;
629
630         }
631
632         SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
633                 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
634
635         return 0;
636
637 ablkcipher_exit:
638         ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
639         return rc;
640 }
641
642 void ssi_buffer_mgr_unmap_aead_request(
643         struct device *dev, struct aead_request *req)
644 {
645         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
646         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
647         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
648         u32 dummy;
649         bool chained;
650         u32 size_to_unmap = 0;
651
652         if (areq_ctx->mac_buf_dma_addr != 0) {
653                 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
654                         MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
655         }
656
657 #if SSI_CC_HAS_AES_GCM
658         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
659                 if (areq_ctx->hkey_dma_addr != 0) {
660                         dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
661                                          AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
662                 }
663
664                 if (areq_ctx->gcm_block_len_dma_addr != 0) {
665                         dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
666                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
667                 }
668
669                 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
670                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
671                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
672                 }
673
674                 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
675                         dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
676                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
677                 }
678         }
679 #endif
680
681         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
682                 if (areq_ctx->ccm_iv0_dma_addr != 0) {
683                         dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
684                                 AES_BLOCK_SIZE, DMA_TO_DEVICE);
685                 }
686
687                 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
688         }
689         if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
690                 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
691                                  hw_iv_size, DMA_BIDIRECTIONAL);
692         }
693
694         /*In case a pool was set, a table was
695          *allocated and should be released
696          */
697         if (areq_ctx->mlli_params.curr_pool != NULL) {
698                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
699                         (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
700                         areq_ctx->mlli_params.mlli_virt_addr);
701                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
702                               areq_ctx->mlli_params.mlli_virt_addr,
703                               areq_ctx->mlli_params.mlli_dma_addr);
704         }
705
706         SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
707         size_to_unmap = req->assoclen+req->cryptlen;
708         if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
709                 size_to_unmap += areq_ctx->req_authsize;
710         }
711         if (areq_ctx->is_gcm4543)
712                 size_to_unmap += crypto_aead_ivsize(tfm);
713
714         dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
715         if (unlikely(req->src != req->dst)) {
716                 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
717                         sg_virt(req->dst));
718                 dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
719                         DMA_BIDIRECTIONAL);
720         }
721 #if DX_HAS_ACP
722         if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
723             likely(req->src == req->dst))
724         {
725                 u32 size_to_skip = req->assoclen;
726                 if (areq_ctx->is_gcm4543) {
727                         size_to_skip += crypto_aead_ivsize(tfm);
728                 }
729                 /* copy mac to a temporary location to deal with possible
730                  * data memory overriding that caused by cache coherence problem.
731                  */
732                 ssi_buffer_mgr_copy_scatterlist_portion(
733                         areq_ctx->backup_mac, req->src,
734                         size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
735                         size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
736         }
737 #endif
738 }
739
740 static inline int ssi_buffer_mgr_get_aead_icv_nents(
741         struct scatterlist *sgl,
742         unsigned int sgl_nents,
743         unsigned int authsize,
744         u32 last_entry_data_size,
745         bool *is_icv_fragmented)
746 {
747         unsigned int icv_max_size = 0;
748         unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
749         unsigned int nents;
750         unsigned int i;
751
752         if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
753                 *is_icv_fragmented = false;
754                 return 0;
755         }
756
757         for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
758                 if (sgl == NULL) {
759                         break;
760                 }
761                 sgl = sg_next(sgl);
762         }
763
764         if (sgl != NULL) {
765                 icv_max_size = sgl->length;
766         }
767
768         if (last_entry_data_size > authsize) {
769                 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
770                 *is_icv_fragmented = false;
771         } else if (last_entry_data_size == authsize) {
772                 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
773                 *is_icv_fragmented = false;
774         } else if (icv_max_size > icv_required_size) {
775                 nents = 1;
776                 *is_icv_fragmented = true;
777         } else if (icv_max_size == icv_required_size) {
778                 nents = 2;
779                 *is_icv_fragmented = true;
780         } else {
781                 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
782                         MAX_ICV_NENTS_SUPPORTED);
783                 nents = -1; /*unsupported*/
784         }
785         SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
786                 (*is_icv_fragmented ? "true" : "false"), nents);
787
788         return nents;
789 }
790
791 static inline int ssi_buffer_mgr_aead_chain_iv(
792         struct ssi_drvdata *drvdata,
793         struct aead_request *req,
794         struct buffer_array *sg_data,
795         bool is_last, bool do_chain)
796 {
797         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
798         unsigned int hw_iv_size = areq_ctx->hw_iv_size;
799         struct device *dev = &drvdata->plat_dev->dev;
800         int rc = 0;
801
802         if (unlikely(req->iv == NULL)) {
803                 areq_ctx->gen_ctx.iv_dma_addr = 0;
804                 goto chain_iv_exit;
805         }
806
807         areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
808                 hw_iv_size, DMA_BIDIRECTIONAL);
809         if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
810                 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
811                         hw_iv_size, req->iv);
812                 rc = -ENOMEM;
813                 goto chain_iv_exit;
814         }
815
816         SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
817                 hw_iv_size, req->iv,
818                 (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
819         if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){  // TODO: what about CTR?? ask Ron
820                 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
821                 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
822                 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
823                 /* Chain to given list */
824                 ssi_buffer_mgr_add_buffer_entry(
825                         sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
826                         iv_size_to_authenc, is_last,
827                         &areq_ctx->assoc.mlli_nents);
828                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
829         }
830
831 chain_iv_exit:
832         return rc;
833 }
834
835 static inline int ssi_buffer_mgr_aead_chain_assoc(
836         struct ssi_drvdata *drvdata,
837         struct aead_request *req,
838         struct buffer_array *sg_data,
839         bool is_last, bool do_chain)
840 {
841         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
842         int rc = 0;
843         u32 mapped_nents = 0;
844         struct scatterlist *current_sg = req->src;
845         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
846         unsigned int sg_index = 0;
847         u32 size_of_assoc = req->assoclen;
848
849         if (areq_ctx->is_gcm4543) {
850                 size_of_assoc += crypto_aead_ivsize(tfm);
851         }
852
853         if (sg_data == NULL) {
854                 rc = -EINVAL;
855                 goto chain_assoc_exit;
856         }
857
858         if (unlikely(req->assoclen == 0)) {
859                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
860                 areq_ctx->assoc.nents = 0;
861                 areq_ctx->assoc.mlli_nents = 0;
862                 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
863                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
864                         areq_ctx->assoc.nents);
865                 goto chain_assoc_exit;
866         }
867
868         //iterate over the sgl to see how many entries are for associated data
869         //it is assumed that if we reach here , the sgl is already mapped
870         sg_index = current_sg->length;
871         if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
872                 mapped_nents++;
873         }
874         else{
875                 while (sg_index <= size_of_assoc) {
876                         current_sg = sg_next(current_sg);
877                         //if have reached the end of the sgl, then this is unexpected
878                         if (current_sg == NULL) {
879                                 SSI_LOG_ERR("reached end of sg list. unexpected \n");
880                                 BUG();
881                         }
882                         sg_index += current_sg->length;
883                         mapped_nents++;
884                 }
885         }
886         if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
887                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
888                             mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
889                 return -ENOMEM;
890         }
891         areq_ctx->assoc.nents = mapped_nents;
892
893         /* in CCM case we have additional entry for
894          * ccm header configurations
895          */
896         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
897                 if (unlikely((mapped_nents + 1) >
898                         LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
899
900                         SSI_LOG_ERR("CCM case.Too many fragments. "
901                                 "Current %d max %d\n",
902                                 (areq_ctx->assoc.nents + 1),
903                                 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
904                         rc = -ENOMEM;
905                         goto chain_assoc_exit;
906                 }
907         }
908
909         if (likely(mapped_nents == 1) &&
910             (areq_ctx->ccm_hdr_size == ccm_header_size_null))
911                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
912         else
913                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
914
915         if (unlikely((do_chain == true) ||
916                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
917
918                 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
919                         GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
920                         areq_ctx->assoc.nents);
921                 ssi_buffer_mgr_add_scatterlist_entry(
922                         sg_data, areq_ctx->assoc.nents,
923                         req->src, req->assoclen, 0, is_last,
924                         &areq_ctx->assoc.mlli_nents);
925                 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
926         }
927
928 chain_assoc_exit:
929         return rc;
930 }
931
932 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
933         struct aead_request *req,
934         u32 *src_last_bytes, u32 *dst_last_bytes)
935 {
936         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
937         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
938         unsigned int authsize = areq_ctx->req_authsize;
939
940         areq_ctx->is_icv_fragmented = false;
941         if (likely(req->src == req->dst)) {
942                 /*INPLACE*/
943                 areq_ctx->icv_dma_addr = sg_dma_address(
944                         areq_ctx->srcSgl)+
945                         (*src_last_bytes - authsize);
946                 areq_ctx->icv_virt_addr = sg_virt(
947                         areq_ctx->srcSgl) +
948                         (*src_last_bytes - authsize);
949         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
950                 /*NON-INPLACE and DECRYPT*/
951                 areq_ctx->icv_dma_addr = sg_dma_address(
952                         areq_ctx->srcSgl) +
953                         (*src_last_bytes - authsize);
954                 areq_ctx->icv_virt_addr = sg_virt(
955                         areq_ctx->srcSgl) +
956                         (*src_last_bytes - authsize);
957         } else {
958                 /*NON-INPLACE and ENCRYPT*/
959                 areq_ctx->icv_dma_addr = sg_dma_address(
960                         areq_ctx->dstSgl) +
961                         (*dst_last_bytes - authsize);
962                 areq_ctx->icv_virt_addr = sg_virt(
963                         areq_ctx->dstSgl)+
964                         (*dst_last_bytes - authsize);
965         }
966 }
967
968 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
969         struct ssi_drvdata *drvdata,
970         struct aead_request *req,
971         struct buffer_array *sg_data,
972         u32 *src_last_bytes, u32 *dst_last_bytes,
973         bool is_last_table)
974 {
975         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
976         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
977         unsigned int authsize = areq_ctx->req_authsize;
978         int rc = 0, icv_nents;
979         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
980
981         if (likely(req->src == req->dst)) {
982                 /*INPLACE*/
983                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
984                         areq_ctx->src.nents, areq_ctx->srcSgl,
985                         areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
986                         &areq_ctx->src.mlli_nents);
987
988                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
989                         areq_ctx->src.nents, authsize, *src_last_bytes,
990                         &areq_ctx->is_icv_fragmented);
991                 if (unlikely(icv_nents < 0)) {
992                         rc = -ENOTSUPP;
993                         goto prepare_data_mlli_exit;
994                 }
995
996                 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
997                         /* Backup happens only when ICV is fragmented, ICV
998                          * verification is made by CPU compare in order to simplify
999                          * MAC verification upon request completion
1000                          */
1001                         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1002 #if !DX_HAS_ACP
1003                                 /* In ACP platform we already copying ICV
1004                                  * for any INPLACE-DECRYPT operation, hence
1005                                  * we must neglect this code.
1006                                  */
1007                                 u32 size_to_skip = req->assoclen;
1008                                 if (areq_ctx->is_gcm4543) {
1009                                         size_to_skip += crypto_aead_ivsize(tfm);
1010                                 }
1011                                 ssi_buffer_mgr_copy_scatterlist_portion(
1012                                         areq_ctx->backup_mac, req->src,
1013                                         size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1014                                         size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1015 #endif
1016                                 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1017                         } else {
1018                                 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1019                                 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1020                         }
1021                 } else { /* Contig. ICV */
1022                         /*Should hanlde if the sg is not contig.*/
1023                         areq_ctx->icv_dma_addr = sg_dma_address(
1024                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1025                                 (*src_last_bytes - authsize);
1026                         areq_ctx->icv_virt_addr = sg_virt(
1027                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1028                                 (*src_last_bytes - authsize);
1029                 }
1030
1031         } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1032                 /*NON-INPLACE and DECRYPT*/
1033                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1034                         areq_ctx->src.nents, areq_ctx->srcSgl,
1035                         areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1036                         &areq_ctx->src.mlli_nents);
1037                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1038                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1039                         areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1040                         &areq_ctx->dst.mlli_nents);
1041
1042                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
1043                         areq_ctx->src.nents, authsize, *src_last_bytes,
1044                         &areq_ctx->is_icv_fragmented);
1045                 if (unlikely(icv_nents < 0)) {
1046                         rc = -ENOTSUPP;
1047                         goto prepare_data_mlli_exit;
1048                 }
1049
1050                 if (unlikely(areq_ctx->is_icv_fragmented == true)) {
1051                         /* Backup happens only when ICV is fragmented, ICV
1052                          * verification is made by CPU compare in order to simplify
1053                          * MAC verification upon request completion
1054                          */
1055                           u32 size_to_skip = req->assoclen;
1056                           if (areq_ctx->is_gcm4543) {
1057                                   size_to_skip += crypto_aead_ivsize(tfm);
1058                           }
1059                           ssi_buffer_mgr_copy_scatterlist_portion(
1060                                   areq_ctx->backup_mac, req->src,
1061                                   size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1062                                   size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1063                         areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1064                 } else { /* Contig. ICV */
1065                         /*Should hanlde if the sg is not contig.*/
1066                         areq_ctx->icv_dma_addr = sg_dma_address(
1067                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1068                                 (*src_last_bytes - authsize);
1069                         areq_ctx->icv_virt_addr = sg_virt(
1070                                 &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
1071                                 (*src_last_bytes - authsize);
1072                 }
1073
1074         } else {
1075                 /*NON-INPLACE and ENCRYPT*/
1076                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1077                         areq_ctx->dst.nents, areq_ctx->dstSgl,
1078                         areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
1079                         &areq_ctx->dst.mlli_nents);
1080                 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1081                         areq_ctx->src.nents, areq_ctx->srcSgl,
1082                         areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
1083                         &areq_ctx->src.mlli_nents);
1084
1085                 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
1086                         areq_ctx->dst.nents, authsize, *dst_last_bytes,
1087                         &areq_ctx->is_icv_fragmented);
1088                 if (unlikely(icv_nents < 0)) {
1089                         rc = -ENOTSUPP;
1090                         goto prepare_data_mlli_exit;
1091                 }
1092
1093                 if (likely(areq_ctx->is_icv_fragmented == false)) {
1094                         /* Contig. ICV */
1095                         areq_ctx->icv_dma_addr = sg_dma_address(
1096                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1097                                 (*dst_last_bytes - authsize);
1098                         areq_ctx->icv_virt_addr = sg_virt(
1099                                 &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
1100                                 (*dst_last_bytes - authsize);
1101                 } else {
1102                         areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1103                         areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1104                 }
1105         }
1106
1107 prepare_data_mlli_exit:
1108         return rc;
1109 }
1110
1111 static inline int ssi_buffer_mgr_aead_chain_data(
1112         struct ssi_drvdata *drvdata,
1113         struct aead_request *req,
1114         struct buffer_array *sg_data,
1115         bool is_last_table, bool do_chain)
1116 {
1117         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1118         struct device *dev = &drvdata->plat_dev->dev;
1119         enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1120         unsigned int authsize = areq_ctx->req_authsize;
1121         int src_last_bytes = 0, dst_last_bytes = 0;
1122         int rc = 0;
1123         u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1124         u32 offset = 0;
1125         unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
1126         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1127         u32 sg_index = 0;
1128         bool chained = false;
1129         bool is_gcm4543 = areq_ctx->is_gcm4543;
1130         u32 size_to_skip = req->assoclen;
1131         if (is_gcm4543) {
1132                 size_to_skip += crypto_aead_ivsize(tfm);
1133         }
1134         offset = size_to_skip;
1135
1136         if (sg_data == NULL) {
1137                 rc = -EINVAL;
1138                 goto chain_data_exit;
1139         }
1140         areq_ctx->srcSgl = req->src;
1141         areq_ctx->dstSgl = req->dst;
1142
1143         if (is_gcm4543) {
1144                 size_for_map += crypto_aead_ivsize(tfm);
1145         }
1146
1147         size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
1148         src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
1149         sg_index = areq_ctx->srcSgl->length;
1150         //check where the data starts
1151         while (sg_index <= size_to_skip) {
1152                 offset -= areq_ctx->srcSgl->length;
1153                 areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
1154                 //if have reached the end of the sgl, then this is unexpected
1155                 if (areq_ctx->srcSgl == NULL) {
1156                         SSI_LOG_ERR("reached end of sg list. unexpected \n");
1157                         BUG();
1158                 }
1159                 sg_index += areq_ctx->srcSgl->length;
1160                 src_mapped_nents--;
1161         }
1162         if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1163         {
1164                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1165                                 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1166                         return -ENOMEM;
1167         }
1168
1169         areq_ctx->src.nents = src_mapped_nents;
1170
1171         areq_ctx->srcOffset = offset;
1172
1173         if (req->src != req->dst) {
1174                 size_for_map = req->assoclen +req->cryptlen;
1175                 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1176                 if (is_gcm4543) {
1177                         size_for_map += crypto_aead_ivsize(tfm);
1178                 }
1179
1180                 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1181                          DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
1182                          LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1183                                                    &dst_mapped_nents);
1184                 if (unlikely(rc != 0)) {
1185                         rc = -ENOMEM;
1186                         goto chain_data_exit;
1187                 }
1188         }
1189
1190         dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
1191         sg_index = areq_ctx->dstSgl->length;
1192         offset = size_to_skip;
1193
1194         //check where the data starts
1195         while (sg_index <= size_to_skip) {
1196
1197                 offset -= areq_ctx->dstSgl->length;
1198                 areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
1199                 //if have reached the end of the sgl, then this is unexpected
1200                 if (areq_ctx->dstSgl == NULL) {
1201                         SSI_LOG_ERR("reached end of sg list. unexpected \n");
1202                         BUG();
1203                 }
1204                 sg_index += areq_ctx->dstSgl->length;
1205                 dst_mapped_nents--;
1206         }
1207         if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
1208         {
1209                 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1210                             dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1211                 return -ENOMEM;
1212         }
1213         areq_ctx->dst.nents = dst_mapped_nents;
1214         areq_ctx->dstOffset = offset;
1215         if ((src_mapped_nents > 1) ||
1216             (dst_mapped_nents  > 1) ||
1217             (do_chain == true)) {
1218                 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1219                 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
1220                         &src_last_bytes, &dst_last_bytes, is_last_table);
1221         } else {
1222                 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1223                 ssi_buffer_mgr_prepare_aead_data_dlli(
1224                                 req, &src_last_bytes, &dst_last_bytes);
1225         }
1226
1227 chain_data_exit:
1228         return rc;
1229 }
1230
1231 static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
1232                                            struct aead_request *req)
1233 {
1234         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1235         u32 curr_mlli_size = 0;
1236
1237         if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1238                 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1239                 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1240                                                 LLI_ENTRY_BYTE_SIZE;
1241         }
1242
1243         if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1244                 /*Inplace case dst nents equal to src nents*/
1245                 if (req->src == req->dst) {
1246                         areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1247                         areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1248                                                                 curr_mlli_size;
1249                         areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1250                         if (areq_ctx->is_single_pass == false)
1251                                 areq_ctx->assoc.mlli_nents +=
1252                                         areq_ctx->src.mlli_nents;
1253                 } else {
1254                         if (areq_ctx->gen_ctx.op_type ==
1255                                         DRV_CRYPTO_DIRECTION_DECRYPT) {
1256                                 areq_ctx->src.sram_addr =
1257                                                 drvdata->mlli_sram_addr +
1258                                                                 curr_mlli_size;
1259                                 areq_ctx->dst.sram_addr =
1260                                                 areq_ctx->src.sram_addr +
1261                                                 areq_ctx->src.mlli_nents *
1262                                                 LLI_ENTRY_BYTE_SIZE;
1263                                 if (areq_ctx->is_single_pass == false)
1264                                         areq_ctx->assoc.mlli_nents +=
1265                                                 areq_ctx->src.mlli_nents;
1266                         } else {
1267                                 areq_ctx->dst.sram_addr =
1268                                                 drvdata->mlli_sram_addr +
1269                                                                 curr_mlli_size;
1270                                 areq_ctx->src.sram_addr =
1271                                                 areq_ctx->dst.sram_addr +
1272                                                 areq_ctx->dst.mlli_nents *
1273                                                 LLI_ENTRY_BYTE_SIZE;
1274                                 if (areq_ctx->is_single_pass == false)
1275                                         areq_ctx->assoc.mlli_nents +=
1276                                                 areq_ctx->dst.mlli_nents;
1277                         }
1278                 }
1279         }
1280 }
1281
1282 int ssi_buffer_mgr_map_aead_request(
1283         struct ssi_drvdata *drvdata, struct aead_request *req)
1284 {
1285         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1286         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1287         struct device *dev = &drvdata->plat_dev->dev;
1288         struct buffer_array sg_data;
1289         unsigned int authsize = areq_ctx->req_authsize;
1290         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1291         int rc = 0;
1292         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1293         bool is_gcm4543 = areq_ctx->is_gcm4543;
1294
1295         u32 mapped_nents = 0;
1296         u32 dummy = 0; /*used for the assoc data fragments */
1297         u32 size_to_map = 0;
1298
1299         mlli_params->curr_pool = NULL;
1300         sg_data.num_of_buffers = 0;
1301
1302 #if DX_HAS_ACP
1303         if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1304             likely(req->src == req->dst))
1305         {
1306                 u32 size_to_skip = req->assoclen;
1307                 if (is_gcm4543) {
1308                         size_to_skip += crypto_aead_ivsize(tfm);
1309                 }
1310                 /* copy mac to a temporary location to deal with possible
1311                  * data memory overriding that caused by cache coherence problem.
1312                  */
1313                 ssi_buffer_mgr_copy_scatterlist_portion(
1314                         areq_ctx->backup_mac, req->src,
1315                         size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
1316                         size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
1317         }
1318 #endif
1319
1320         /* cacluate the size for cipher remove ICV in decrypt*/
1321         areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1322                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1323                                 req->cryptlen :
1324                                 (req->cryptlen - authsize);
1325
1326         areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
1327                 areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
1328         if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1329                 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1330                         MAX_MAC_SIZE, areq_ctx->mac_buf);
1331                 rc = -ENOMEM;
1332                 goto aead_map_failure;
1333         }
1334
1335         if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1336                 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1337                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1338                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1339
1340                 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1341                         SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1342                         "for DMA failed\n", AES_BLOCK_SIZE,
1343                         (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1344                         areq_ctx->ccm_iv0_dma_addr = 0;
1345                         rc = -ENOMEM;
1346                         goto aead_map_failure;
1347                 }
1348                 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1349                         areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
1350                         rc = -ENOMEM;
1351                         goto aead_map_failure;
1352                 }
1353         }
1354
1355 #if SSI_CC_HAS_AES_GCM
1356         if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1357                 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1358                         areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
1359                 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1360                         SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1361                                 AES_BLOCK_SIZE, areq_ctx->hkey);
1362                         rc = -ENOMEM;
1363                         goto aead_map_failure;
1364                 }
1365
1366                 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1367                         &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
1368                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1369                         SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1370                                 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1371                         rc = -ENOMEM;
1372                         goto aead_map_failure;
1373                 }
1374
1375                 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1376                         areq_ctx->gcm_iv_inc1,
1377                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1378
1379                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1380                         SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1381                         "for DMA failed\n", AES_BLOCK_SIZE,
1382                         (areq_ctx->gcm_iv_inc1));
1383                         areq_ctx->gcm_iv_inc1_dma_addr = 0;
1384                         rc = -ENOMEM;
1385                         goto aead_map_failure;
1386                 }
1387
1388                 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1389                         areq_ctx->gcm_iv_inc2,
1390                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
1391
1392                 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1393                         SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1394                         "for DMA failed\n", AES_BLOCK_SIZE,
1395                         (areq_ctx->gcm_iv_inc2));
1396                         areq_ctx->gcm_iv_inc2_dma_addr = 0;
1397                         rc = -ENOMEM;
1398                         goto aead_map_failure;
1399                 }
1400         }
1401 #endif /*SSI_CC_HAS_AES_GCM*/
1402
1403         size_to_map = req->cryptlen + req->assoclen;
1404         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1405                 size_to_map += authsize;
1406         }
1407         if (is_gcm4543)
1408                 size_to_map += crypto_aead_ivsize(tfm);
1409         rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1410                                             size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
1411                                             LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1412         if (unlikely(rc != 0)) {
1413                 rc = -ENOMEM;
1414                 goto aead_map_failure;
1415         }
1416
1417         if (likely(areq_ctx->is_single_pass == true)) {
1418                 /*
1419                  * Create MLLI table for:
1420                  *   (1) Assoc. data
1421                  *   (2) Src/Dst SGLs
1422                  *   Note: IV is contg. buffer (not an SGL)
1423                  */
1424                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1425                 if (unlikely(rc != 0))
1426                         goto aead_map_failure;
1427                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1428                 if (unlikely(rc != 0))
1429                         goto aead_map_failure;
1430                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1431                 if (unlikely(rc != 0))
1432                         goto aead_map_failure;
1433         } else { /* DOUBLE-PASS flow */
1434                 /*
1435                  * Prepare MLLI table(s) in this order:
1436                  *
1437                  * If ENCRYPT/DECRYPT (inplace):
1438                  *   (1) MLLI table for assoc
1439                  *   (2) IV entry (chained right after end of assoc)
1440                  *   (3) MLLI for src/dst (inplace operation)
1441                  *
1442                  * If ENCRYPT (non-inplace)
1443                  *   (1) MLLI table for assoc
1444                  *   (2) IV entry (chained right after end of assoc)
1445                  *   (3) MLLI for dst
1446                  *   (4) MLLI for src
1447                  *
1448                  * If DECRYPT (non-inplace)
1449                  *   (1) MLLI table for assoc
1450                  *   (2) IV entry (chained right after end of assoc)
1451                  *   (3) MLLI for src
1452                  *   (4) MLLI for dst
1453                  */
1454                 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1455                 if (unlikely(rc != 0))
1456                         goto aead_map_failure;
1457                 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1458                 if (unlikely(rc != 0))
1459                         goto aead_map_failure;
1460                 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1461                 if (unlikely(rc != 0))
1462                         goto aead_map_failure;
1463         }
1464
1465         /* Mlli support -start building the MLLI according to the above results */
1466         if (unlikely(
1467                 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1468                 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1469
1470                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1471                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1472                 if (unlikely(rc != 0)) {
1473                         goto aead_map_failure;
1474                 }
1475
1476                 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1477                 SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
1478                 SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
1479                 SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
1480         }
1481         return 0;
1482
1483 aead_map_failure:
1484         ssi_buffer_mgr_unmap_aead_request(dev, req);
1485         return rc;
1486 }
1487
1488 int ssi_buffer_mgr_map_hash_request_final(
1489         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1490 {
1491         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1492         struct device *dev = &drvdata->plat_dev->dev;
1493         u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1494                         areq_ctx->buff0;
1495         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1496                         &areq_ctx->buff0_cnt;
1497         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1498         struct buffer_array sg_data;
1499         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1500         u32 dummy = 0;
1501         u32 mapped_nents = 0;
1502
1503         SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1504                      "curr_buff_cnt=0x%X nbytes = 0x%X "
1505                      "src=%pK curr_index=%u\n",
1506                      curr_buff, *curr_buff_cnt, nbytes,
1507                      src, areq_ctx->buff_index);
1508         /* Init the type of the dma buffer */
1509         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1510         mlli_params->curr_pool = NULL;
1511         sg_data.num_of_buffers = 0;
1512         areq_ctx->in_nents = 0;
1513
1514         if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1515                 /* nothing to do */
1516                 return 0;
1517         }
1518
1519         /*TODO: copy data in case that buffer is enough for operation */
1520         /* map the previous buffer */
1521         if (*curr_buff_cnt != 0 ) {
1522                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1523                                             *curr_buff_cnt, &sg_data) != 0) {
1524                         return -ENOMEM;
1525                 }
1526         }
1527
1528         if (src && (nbytes > 0) && do_update) {
1529                 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1530                                           nbytes,
1531                                           DMA_TO_DEVICE,
1532                                           &areq_ctx->in_nents,
1533                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1534                                           &dummy, &mapped_nents))){
1535                         goto unmap_curr_buff;
1536                 }
1537                 if ( src && (mapped_nents == 1)
1538                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1539                         memcpy(areq_ctx->buff_sg,src,
1540                                sizeof(struct scatterlist));
1541                         areq_ctx->buff_sg->length = nbytes;
1542                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1543                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1544                 } else {
1545                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1546                 }
1547
1548         }
1549
1550         /*build mlli */
1551         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1552                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1553                 /* add the src data to the sg_data */
1554                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1555                                         areq_ctx->in_nents,
1556                                         src,
1557                                         nbytes, 0,
1558                                         true, &areq_ctx->mlli_nents);
1559                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1560                                                   mlli_params) != 0)) {
1561                         goto fail_unmap_din;
1562                 }
1563         }
1564         /* change the buffer index for the unmap function */
1565         areq_ctx->buff_index = (areq_ctx->buff_index^1);
1566         SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1567                 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1568         return 0;
1569
1570 fail_unmap_din:
1571         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1572
1573 unmap_curr_buff:
1574         if (*curr_buff_cnt != 0 ) {
1575                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1576         }
1577         return -ENOMEM;
1578 }
1579
1580 int ssi_buffer_mgr_map_hash_request_update(
1581         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1582 {
1583         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1584         struct device *dev = &drvdata->plat_dev->dev;
1585         u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1586                         areq_ctx->buff0;
1587         u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1588                         &areq_ctx->buff0_cnt;
1589         u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1590                         areq_ctx->buff1;
1591         u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1592                         &areq_ctx->buff1_cnt;
1593         struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1594         unsigned int update_data_len;
1595         u32 total_in_len = nbytes + *curr_buff_cnt;
1596         struct buffer_array sg_data;
1597         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1598         unsigned int swap_index = 0;
1599         u32 dummy = 0;
1600         u32 mapped_nents = 0;
1601
1602         SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1603                      "curr_buff_cnt=0x%X nbytes=0x%X "
1604                      "src=%pK curr_index=%u \n",
1605                      curr_buff, *curr_buff_cnt, nbytes,
1606                      src, areq_ctx->buff_index);
1607         /* Init the type of the dma buffer */
1608         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1609         mlli_params->curr_pool = NULL;
1610         areq_ctx->curr_sg = NULL;
1611         sg_data.num_of_buffers = 0;
1612         areq_ctx->in_nents = 0;
1613
1614         if (unlikely(total_in_len < block_size)) {
1615                 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1616                              "*curr_buff_cnt=0x%X copy_to=%pK\n",
1617                         curr_buff, *curr_buff_cnt,
1618                         &curr_buff[*curr_buff_cnt]);
1619                 areq_ctx->in_nents =
1620                         ssi_buffer_mgr_get_sgl_nents(src,
1621                                                     nbytes,
1622                                                     &dummy, NULL);
1623                 sg_copy_to_buffer(src, areq_ctx->in_nents,
1624                                   &curr_buff[*curr_buff_cnt], nbytes);
1625                 *curr_buff_cnt += nbytes;
1626                 return 1;
1627         }
1628
1629         /* Calculate the residue size*/
1630         *next_buff_cnt = total_in_len & (block_size - 1);
1631         /* update data len */
1632         update_data_len = total_in_len - *next_buff_cnt;
1633
1634         SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1635                      "update_data_len=0x%X\n",
1636                 *next_buff_cnt, update_data_len);
1637
1638         /* Copy the new residue to next buffer */
1639         if (*next_buff_cnt != 0) {
1640                 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1641                              " residue %u \n", next_buff,
1642                              (update_data_len - *curr_buff_cnt),
1643                              *next_buff_cnt);
1644                 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1645                              (update_data_len -*curr_buff_cnt),
1646                              nbytes,SSI_SG_TO_BUF);
1647                 /* change the buffer index for next operation */
1648                 swap_index = 1;
1649         }
1650
1651         if (*curr_buff_cnt != 0) {
1652                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1653                                             *curr_buff_cnt, &sg_data) != 0) {
1654                         return -ENOMEM;
1655                 }
1656                 /* change the buffer index for next operation */
1657                 swap_index = 1;
1658         }
1659
1660         if ( update_data_len > *curr_buff_cnt ) {
1661                 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
1662                                           (update_data_len -*curr_buff_cnt),
1663                                           DMA_TO_DEVICE,
1664                                           &areq_ctx->in_nents,
1665                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
1666                                           &dummy, &mapped_nents))){
1667                         goto unmap_curr_buff;
1668                 }
1669                 if ( (mapped_nents == 1)
1670                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
1671                         /* only one entry in the SG and no previous data */
1672                         memcpy(areq_ctx->buff_sg,src,
1673                                sizeof(struct scatterlist));
1674                         areq_ctx->buff_sg->length = update_data_len;
1675                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1676                         areq_ctx->curr_sg = areq_ctx->buff_sg;
1677                 } else {
1678                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1679                 }
1680         }
1681
1682         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1683                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1684                 /* add the src data to the sg_data */
1685                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1686                                         areq_ctx->in_nents,
1687                                         src,
1688                                         (update_data_len - *curr_buff_cnt), 0,
1689                                         true, &areq_ctx->mlli_nents);
1690                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1691                                                   mlli_params) != 0)) {
1692                         goto fail_unmap_din;
1693                 }
1694
1695         }
1696         areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
1697
1698         return 0;
1699
1700 fail_unmap_din:
1701         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1702
1703 unmap_curr_buff:
1704         if (*curr_buff_cnt != 0 ) {
1705                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1706         }
1707         return -ENOMEM;
1708 }
1709
1710 void ssi_buffer_mgr_unmap_hash_request(
1711         struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1712 {
1713         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1714         u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
1715                                                 &areq_ctx->buff1_cnt;
1716
1717         /*In case a pool was set, a table was
1718          *allocated and should be released
1719          */
1720         if (areq_ctx->mlli_params.curr_pool != NULL) {
1721                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
1722                              (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
1723                              areq_ctx->mlli_params.mlli_virt_addr);
1724                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1725                               areq_ctx->mlli_params.mlli_virt_addr,
1726                               areq_ctx->mlli_params.mlli_dma_addr);
1727         }
1728
1729         if ((src) && likely(areq_ctx->in_nents != 0)) {
1730                 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
1731                              sg_virt(src),
1732                              (unsigned long long)sg_dma_address(src),
1733                              sg_dma_len(src));
1734                 dma_unmap_sg(dev, src,
1735                              areq_ctx->in_nents, DMA_TO_DEVICE);
1736         }
1737
1738         if (*prev_len != 0) {
1739                 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1740                              "dma=0x%llX len 0x%X\n",
1741                                 sg_virt(areq_ctx->buff_sg),
1742                                 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
1743                                 sg_dma_len(areq_ctx->buff_sg));
1744                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1745                 if (!do_revert) {
1746                         /* clean the previous data length for update operation */
1747                         *prev_len = 0;
1748                 } else {
1749                         areq_ctx->buff_index ^= 1;
1750                 }
1751         }
1752 }
1753
1754 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1755 {
1756         struct buff_mgr_handle *buff_mgr_handle;
1757         struct device *dev = &drvdata->plat_dev->dev;
1758
1759         buff_mgr_handle = (struct buff_mgr_handle *)
1760                 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
1761         if (buff_mgr_handle == NULL)
1762                 return -ENOMEM;
1763
1764         drvdata->buff_mgr_handle = buff_mgr_handle;
1765
1766         buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1767                                 "dx_single_mlli_tables", dev,
1768                                 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1769                                 LLI_ENTRY_BYTE_SIZE,
1770                                 MLLI_TABLE_MIN_ALIGNMENT, 0);
1771
1772         if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
1773                 goto error;
1774
1775         return 0;
1776
1777 error:
1778         ssi_buffer_mgr_fini(drvdata);
1779         return -ENOMEM;
1780 }
1781
1782 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1783 {
1784         struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1785
1786         if (buff_mgr_handle  != NULL) {
1787                 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1788                 kfree(drvdata->buff_mgr_handle);
1789                 drvdata->buff_mgr_handle = NULL;
1790
1791         }
1792         return 0;
1793 }