]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/staging/ccree/ssi_buffer_mgr.c
6ff5d6bae5b51ff405dfbb7d5a83cee36071b80a
[linux.git] / drivers / staging / ccree / ssi_buffer_mgr.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  * 
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  * 
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  * 
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/hash.h>
21 #include <crypto/authenc.h>
22 #include <crypto/scatterwalk.h>
23 #include <linux/dmapool.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/crypto.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28
29 #include "ssi_buffer_mgr.h"
30 #include "cc_lli_defs.h"
31 #include "ssi_cipher.h"
32 #include "ssi_hash.h"
33
34 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128
35 #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
36 #define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
37 #define MAX_NUM_OF_BUFFERS_IN_MLLI 4
38 #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
39                                         LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
40
41 #ifdef CC_DEBUG
42 #define DUMP_SGL(sg) \
43         while (sg) { \
44                 SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
45                              "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
46                         (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
47                 (sg) = sg_next(sg); \
48         }
49 #define DUMP_MLLI_TABLE(mlli_p, nents) \
50         do { \
51                 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
52                 while((nents)--) { \
53                         SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
54                              (mlli_p)[LLI_WORD0_OFFSET], \
55                              (mlli_p)[LLI_WORD1_OFFSET]); \
56                         (mlli_p) += LLI_ENTRY_WORD_SIZE; \
57                 } \
58         } while (0)
59 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
60         ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
61         ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
62         ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
63 #else
64 #define DX_BUFFER_MGR_DUMP_SGL(sg)
65 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
66 #define GET_DMA_BUFFER_TYPE(buff_type)
67 #endif
68
69
70 enum dma_buffer_type {
71         DMA_NULL_TYPE = -1,
72         DMA_SGL_TYPE = 1,
73         DMA_BUFF_TYPE = 2,
74 };
75
76 struct buff_mgr_handle {
77         struct dma_pool *mlli_buffs_pool;
78 };
79
80 union buffer_array_entry {
81         struct scatterlist *sgl;
82         dma_addr_t buffer_dma;
83 };
84
85 struct buffer_array {
86         unsigned int num_of_buffers;
87         union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
88         unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
89         int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
90         int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
91         enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
92         bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
93         uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
94 };
95
96 #ifdef CC_DMA_48BIT_SIM
97 dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
98 {
99         dma_addr_t tmp_dma_addr;
100 #ifdef CC_DMA_48BIT_SIM_FULL
101         /* With this code all addresses will be switched to 48 bits. */
102         /* The if condition protects from double expention */
103         if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) && 
104                 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
105 #else
106         if((!(((orig_addr >> 16) & 0xFF) % 2)) && 
107                 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
108 #endif
109                 tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 | 
110                                 (orig_addr & UINT16_MAX));
111                         SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
112                                     "dma_address=0x%llX\n",
113                                      orig_addr, tmp_dma_addr);
114                         return tmp_dma_addr;    
115         }
116         return orig_addr;
117 }
118
119 dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
120 {
121         dma_addr_t tmp_dma_addr;
122 #ifdef CC_DMA_48BIT_SIM_FULL
123         /* With this code all addresses will be restored from 48 bits. */
124         /* The if condition protects from double restoring */
125         if((orig_addr >> 32) & 0xFFFF ) {
126 #else
127         if(((orig_addr >> 32) & 0xFFFF) && 
128                 !(((orig_addr >> 32) & 0xFF) % 2) ) {
129 #endif
130                 /*return high 16 bits*/
131                 tmp_dma_addr = ((orig_addr >> 16));
132                 /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
133                 tmp_dma_addr &= 0xFFFF0000; 
134                 /* Set the original 16 bits */
135                 tmp_dma_addr |= (orig_addr & UINT16_MAX); 
136                 SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
137                              "dma_address=0x%llX\n",
138                              orig_addr, tmp_dma_addr);
139                         return tmp_dma_addr;    
140         }
141         return orig_addr;
142 }
143 #endif
144 /**
145  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
146  * 
147  * @sg_list: SG list
148  * @nbytes: [IN] Total SGL data bytes.
149  * @lbytes: [OUT] Returns the amount of bytes at the last entry 
150  */
151 static unsigned int ssi_buffer_mgr_get_sgl_nents(
152         struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
153 {
154         unsigned int nents = 0;
155         while (nbytes != 0) {
156                 if (sg_is_chain(sg_list)) {
157                         SSI_LOG_ERR("Unexpected chanined entry "
158                                    "in sg (entry =0x%X) \n", nents);
159                         BUG();
160                 }
161                 if (sg_list->length != 0) {
162                         nents++;
163                         /* get the number of bytes in the last entry */
164                         *lbytes = nbytes;
165                         nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
166                         sg_list = sg_next(sg_list);
167                 } else {
168                         sg_list = (struct scatterlist *)sg_page(sg_list);
169                         if (is_chained != NULL) {
170                                 *is_chained = true;
171                         }
172                 }
173         }
174         SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
175         return nents;
176 }
177
178 /**
179  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
180  * 
181  * @sgl:
182  */
183 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
184 {
185         struct scatterlist *current_sg = sgl;
186         int sg_index = 0;
187
188         while (sg_index <= data_len) {
189                 if (current_sg == NULL) {
190                         /* reached the end of the sgl --> just return back */
191                         return;
192                 }
193                 memset(sg_virt(current_sg), 0, current_sg->length);
194                 sg_index += current_sg->length;
195                 current_sg = sg_next(current_sg);
196         }
197 }
198
199 /**
200  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
201  * from to_skip to end, to dest and vice versa
202  * 
203  * @dest:
204  * @sg:
205  * @to_skip:
206  * @end:
207  * @direct:
208  */
209 void ssi_buffer_mgr_copy_scatterlist_portion(
210         u8 *dest, struct scatterlist *sg,
211         uint32_t to_skip,  uint32_t end,
212         enum ssi_sg_cpy_direct direct)
213 {
214         uint32_t nents, lbytes;
215
216         nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
217         sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
218 }
219
220 static inline int ssi_buffer_mgr_render_buff_to_mlli(
221         dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
222         uint32_t **mlli_entry_pp)
223 {
224         uint32_t *mlli_entry_p = *mlli_entry_pp;
225         uint32_t new_nents;;
226
227         /* Verify there is no memory overflow*/
228         new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
229         if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
230                 return -ENOMEM;
231         }
232
233         /*handle buffer longer than 64 kbytes */
234         while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
235                 SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE);
236                 LLI_SET_ADDR(mlli_entry_p,buff_dma);
237                 LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
238                 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
239                            mlli_entry_p[LLI_WORD0_OFFSET],
240                            mlli_entry_p[LLI_WORD1_OFFSET]);
241                 SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma);
242                 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
243                 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
244                 mlli_entry_p = mlli_entry_p + 2;
245                 (*curr_nents)++;
246         }
247         /*Last entry */
248         SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size);
249         LLI_SET_ADDR(mlli_entry_p,buff_dma);
250         LLI_SET_SIZE(mlli_entry_p, buff_size);
251         SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
252                    mlli_entry_p[LLI_WORD0_OFFSET],
253                    mlli_entry_p[LLI_WORD1_OFFSET]);
254         mlli_entry_p = mlli_entry_p + 2;
255         *mlli_entry_pp = mlli_entry_p;
256         (*curr_nents)++;
257         return 0;
258 }
259
260
261 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
262         struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
263         uint32_t **mlli_entry_pp)
264 {
265         struct scatterlist *curr_sgl = sgl;
266         uint32_t *mlli_entry_p = *mlli_entry_pp;
267         int32_t rc = 0;
268
269         for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
270               curr_sgl = sg_next(curr_sgl)) {
271                 uint32_t entry_data_len =
272                         (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
273                                 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
274                 sgl_data_len -= entry_data_len;
275                 rc = ssi_buffer_mgr_render_buff_to_mlli(
276                         sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
277                         &mlli_entry_p);
278                 if(rc != 0) {
279                         return rc;
280                 }
281                 sglOffset=0;
282         }
283         *mlli_entry_pp = mlli_entry_p;
284         return 0;
285 }
286
287 static int ssi_buffer_mgr_generate_mlli(
288         struct device *dev,
289         struct buffer_array *sg_data,
290         struct mlli_params *mlli_params)
291 {
292         uint32_t *mlli_p;
293         uint32_t total_nents = 0,prev_total_nents = 0;
294         int rc = 0, i;
295
296         SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
297
298         /* Allocate memory from the pointed pool */
299         mlli_params->mlli_virt_addr = dma_pool_alloc(
300                         mlli_params->curr_pool, GFP_KERNEL,
301                         &(mlli_params->mlli_dma_addr));
302         if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
303                 SSI_LOG_ERR("dma_pool_alloc() failed\n");
304                 rc =-ENOMEM;
305                 goto build_mlli_exit;
306         }
307         SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr, 
308                                                 (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
309                                                 LLI_ENTRY_BYTE_SIZE));
310         /* Point to start of MLLI */
311         mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
312         /* go over all SG's and link it to one MLLI table */
313         for (i = 0; i < sg_data->num_of_buffers; i++) {
314                 if (sg_data->type[i] == DMA_SGL_TYPE)
315                         rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
316                                 sg_data->entry[i].sgl, 
317                                 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
318                                 &mlli_p);
319                 else /*DMA_BUFF_TYPE*/
320                         rc = ssi_buffer_mgr_render_buff_to_mlli(
321                                 sg_data->entry[i].buffer_dma,
322                                 sg_data->total_data_len[i], &total_nents,
323                                 &mlli_p);
324                 if(rc != 0) {
325                         return rc;
326                 }
327
328                 /* set last bit in the current table */
329                 if (sg_data->mlli_nents[i] != NULL) {
330                         /*Calculate the current MLLI table length for the 
331                         length field in the descriptor*/
332                         *(sg_data->mlli_nents[i]) += 
333                                 (total_nents - prev_total_nents);
334                         prev_total_nents = total_nents;
335                 }
336         }
337
338         /* Set MLLI size for the bypass operation */
339         mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
340
341         SSI_LOG_DEBUG("MLLI params: "
342                      "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
343                    mlli_params->mlli_virt_addr,
344                    (unsigned long long)mlli_params->mlli_dma_addr,
345                    mlli_params->mlli_len);
346
347 build_mlli_exit:
348         return rc;
349 }
350
351 static inline void ssi_buffer_mgr_add_buffer_entry(
352         struct buffer_array *sgl_data,
353         dma_addr_t buffer_dma, unsigned int buffer_len,
354         bool is_last_entry, uint32_t *mlli_nents)
355 {
356         unsigned int index = sgl_data->num_of_buffers;
357
358         SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
359                      "buffer_len=0x%08X is_last=%d\n",
360                      index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
361         sgl_data->nents[index] = 1;
362         sgl_data->entry[index].buffer_dma = buffer_dma;
363         sgl_data->offset[index] = 0;
364         sgl_data->total_data_len[index] = buffer_len;
365         sgl_data->type[index] = DMA_BUFF_TYPE;
366         sgl_data->is_last[index] = is_last_entry;
367         sgl_data->mlli_nents[index] = mlli_nents;
368         if (sgl_data->mlli_nents[index] != NULL)
369                 *sgl_data->mlli_nents[index] = 0;
370         sgl_data->num_of_buffers++;
371 }
372
373 static inline void ssi_buffer_mgr_add_scatterlist_entry(
374         struct buffer_array *sgl_data,
375         unsigned int nents,
376         struct scatterlist *sgl,
377         unsigned int data_len,
378         unsigned int data_offset,
379         bool is_last_table,
380         uint32_t *mlli_nents)
381 {
382         unsigned int index = sgl_data->num_of_buffers;
383
384         SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
385                      index, nents, sgl, data_len, is_last_table);
386         sgl_data->nents[index] = nents;
387         sgl_data->entry[index].sgl = sgl;
388         sgl_data->offset[index] = data_offset;
389         sgl_data->total_data_len[index] = data_len;
390         sgl_data->type[index] = DMA_SGL_TYPE;
391         sgl_data->is_last[index] = is_last_table;
392         sgl_data->mlli_nents[index] = mlli_nents;
393         if (sgl_data->mlli_nents[index] != NULL)
394                 *sgl_data->mlli_nents[index] = 0;
395         sgl_data->num_of_buffers++;
396 }
397
398 static int
399 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
400                          enum dma_data_direction direction)
401 {
402         uint32_t i , j;
403         struct scatterlist *l_sg = sg;
404         for (i = 0; i < nents; i++) {
405                 if (l_sg == NULL) {
406                         break;
407                 }
408                 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
409                         SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
410                         goto err;
411                 }
412                 l_sg = sg_next(l_sg);
413         }
414         return nents;
415
416 err:
417         /* Restore mapped parts */
418         for (j = 0; j < i; j++) {
419                 if (sg == NULL) {
420                         break;
421                 }
422                 dma_unmap_sg(dev,sg,1,direction);
423                 sg = sg_next(sg);
424         }
425         return 0;
426 }
427
428 static int ssi_buffer_mgr_map_scatterlist(
429         struct device *dev, struct scatterlist *sg,
430         unsigned int nbytes, int direction,
431         uint32_t *nents, uint32_t max_sg_nents,
432         uint32_t *lbytes, uint32_t *mapped_nents)
433 {
434         bool is_chained = false;
435
436         if (sg_is_last(sg)) {
437                 /* One entry only case -set to DLLI */
438                 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
439                         SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
440                         return -ENOMEM;
441                 } 
442                 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
443                              "page_link=0x%08lX addr=%pK offset=%u "
444                              "length=%u\n",
445                              (unsigned long long)sg_dma_address(sg), 
446                              sg->page_link, 
447                              sg_virt(sg), 
448                              sg->offset, sg->length);
449                 *lbytes = nbytes;
450                 *nents = 1;
451                 *mapped_nents = 1;
452                 SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
453         } else {  /*sg_is_last*/
454                 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, 
455                                                      &is_chained);
456                 if (*nents > max_sg_nents) {
457                         *nents = 0;
458                         SSI_LOG_ERR("Too many fragments. current %d max %d\n",
459                                    *nents, max_sg_nents);
460                         return -ENOMEM;
461                 }
462                 if (!is_chained) {
463                         /* In case of mmu the number of mapped nents might
464                         be changed from the original sgl nents */
465                         *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
466                         if (unlikely(*mapped_nents == 0)){
467                                 *nents = 0;
468                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
469                                 return -ENOMEM;
470                         }
471                 } else {
472                         /*In this case the driver maps entry by entry so it
473                         must have the same nents before and after map */
474                         *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
475                                                                  sg,
476                                                                  *nents,
477                                                                  direction);
478                         if (unlikely(*mapped_nents != *nents)){
479                                 *nents = *mapped_nents;
480                                 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
481                                 return -ENOMEM;
482                         }
483                 }
484         }
485
486         return 0;
487 }
488
489 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
490                                            struct ahash_req_ctx *areq_ctx,
491                                            uint8_t* curr_buff,
492                                            uint32_t curr_buff_cnt,
493                                            struct buffer_array *sg_data)
494 {
495         SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI \n", curr_buff_cnt);
496         /* create sg for the current buffer */
497         sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
498         if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
499                                 DMA_TO_DEVICE) != 1)) {
500                         SSI_LOG_ERR("dma_map_sg() "
501                            "src buffer failed\n");
502                         return -ENOMEM;
503         }
504         SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
505                      "page_link=0x%08lX addr=%pK "
506                      "offset=%u length=%u\n",
507                      (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
508                      areq_ctx->buff_sg->page_link, 
509                      sg_virt(areq_ctx->buff_sg),
510                      areq_ctx->buff_sg->offset, 
511                      areq_ctx->buff_sg->length);
512         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
513         areq_ctx->curr_sg = areq_ctx->buff_sg;
514         areq_ctx->in_nents = 0;
515         /* prepare for case of MLLI */
516         ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
517                                 curr_buff_cnt, 0, false, NULL);
518         return 0;
519 }
520
521 void ssi_buffer_mgr_unmap_blkcipher_request(
522         struct device *dev,
523         void *ctx,
524         unsigned int ivsize,
525         struct scatterlist *src,
526         struct scatterlist *dst)
527 {
528         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
529
530         if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
531                 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n", 
532                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
533                         ivsize);
534                 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
535                 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 
536                                  ivsize, 
537                                  req_ctx->is_giv ? DMA_BIDIRECTIONAL :
538                                  DMA_TO_DEVICE);
539         }
540         /* Release pool */
541         if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
542                 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->mlli_params.mlli_dma_addr);
543                 dma_pool_free(req_ctx->mlli_params.curr_pool,
544                               req_ctx->mlli_params.mlli_virt_addr,
545                               req_ctx->mlli_params.mlli_dma_addr);
546         }
547
548         SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
549         dma_unmap_sg(dev, src, req_ctx->in_nents,
550                 DMA_BIDIRECTIONAL);
551         SSI_LOG_DEBUG("Unmapped req->src=%pK\n", 
552                      sg_virt(src));
553
554         if (src != dst) {
555                 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
556                 dma_unmap_sg(dev, dst, req_ctx->out_nents, 
557                         DMA_BIDIRECTIONAL);
558                 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
559                         sg_virt(dst));
560         }
561 }
562
563 int ssi_buffer_mgr_map_blkcipher_request(
564         struct ssi_drvdata *drvdata,
565         void *ctx,
566         unsigned int ivsize,
567         unsigned int nbytes,
568         void *info,
569         struct scatterlist *src,
570         struct scatterlist *dst)
571 {
572         struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
573         struct mlli_params *mlli_params = &req_ctx->mlli_params;        
574         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
575         struct device *dev = &drvdata->plat_dev->dev;
576         struct buffer_array sg_data;
577         uint32_t dummy = 0;
578         int rc = 0;
579         uint32_t mapped_nents = 0;
580
581         req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
582         mlli_params->curr_pool = NULL;
583         sg_data.num_of_buffers = 0;
584
585         /* Map IV buffer */
586         if (likely(ivsize != 0) ) {
587                 dump_byte_array("iv", (uint8_t *)info, ivsize);
588                 req_ctx->gen_ctx.iv_dma_addr = 
589                         dma_map_single(dev, (void *)info, 
590                                        ivsize, 
591                                        req_ctx->is_giv ? DMA_BIDIRECTIONAL:
592                                        DMA_TO_DEVICE);
593                 if (unlikely(dma_mapping_error(dev, 
594                                         req_ctx->gen_ctx.iv_dma_addr))) {
595                         SSI_LOG_ERR("Mapping iv %u B at va=%pK "
596                                    "for DMA failed\n", ivsize, info);
597                         return -ENOMEM;
598                 }
599                 SSI_UPDATE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr,
600                                                                 ivsize);
601                 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
602                         ivsize, info,
603                         (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
604         } else
605                 req_ctx->gen_ctx.iv_dma_addr = 0;
606         
607         /* Map the src SGL */
608         rc = ssi_buffer_mgr_map_scatterlist(dev, src,
609                 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
610                 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
611         if (unlikely(rc != 0)) {
612                 rc = -ENOMEM;
613                 goto ablkcipher_exit;
614         }
615         if (mapped_nents > 1)
616                 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
617
618         if (unlikely(src == dst)) {
619                 /* Handle inplace operation */
620                 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
621                         req_ctx->out_nents = 0;
622                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
623                                 req_ctx->in_nents, src,
624                                 nbytes, 0, true, &req_ctx->in_mlli_nents);
625                 }
626         } else {
627                 /* Map the dst sg */
628                 if (unlikely(ssi_buffer_mgr_map_scatterlist(
629                         dev,dst, nbytes,
630                         DMA_BIDIRECTIONAL, &req_ctx->out_nents,
631                         LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
632                         &mapped_nents))){
633                         rc = -ENOMEM;
634                         goto ablkcipher_exit;
635                 }
636                 if (mapped_nents > 1)
637                         req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
638
639                 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
640                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
641                                 req_ctx->in_nents, src,
642                                 nbytes, 0, true,
643                                 &req_ctx->in_mlli_nents);
644                         ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
645                                 req_ctx->out_nents, dst,
646                                 nbytes, 0, true, 
647                                 &req_ctx->out_mlli_nents);
648                 }
649         }
650         
651         if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
652                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
653                 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
654                 if (unlikely(rc!= 0))
655                         goto ablkcipher_exit;
656
657         }
658
659         SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
660                 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
661
662         return 0;
663
664 ablkcipher_exit:
665         ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
666         return rc;
667 }
668
669 int ssi_buffer_mgr_map_hash_request_final(
670         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
671 {
672         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
673         struct device *dev = &drvdata->plat_dev->dev;
674         uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
675                         areq_ctx->buff0;
676         uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
677                         &areq_ctx->buff0_cnt;
678         struct mlli_params *mlli_params = &areq_ctx->mlli_params;       
679         struct buffer_array sg_data;
680         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
681         uint32_t dummy = 0;
682         uint32_t mapped_nents = 0;
683
684         SSI_LOG_DEBUG(" final params : curr_buff=%pK "
685                      "curr_buff_cnt=0x%X nbytes = 0x%X "
686                      "src=%pK curr_index=%u\n",
687                      curr_buff, *curr_buff_cnt, nbytes,
688                      src, areq_ctx->buff_index);
689         /* Init the type of the dma buffer */
690         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
691         mlli_params->curr_pool = NULL;
692         sg_data.num_of_buffers = 0;
693         areq_ctx->in_nents = 0;
694
695         if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
696                 /* nothing to do */
697                 return 0;
698         }
699         
700         /*TODO: copy data in case that buffer is enough for operation */
701         /* map the previous buffer */
702         if (*curr_buff_cnt != 0 ) {
703                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
704                                             *curr_buff_cnt, &sg_data) != 0) {
705                         return -ENOMEM;
706                 }
707         }
708
709         if (src && (nbytes > 0) && do_update) {
710                 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
711                                           nbytes,
712                                           DMA_TO_DEVICE,
713                                           &areq_ctx->in_nents,
714                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
715                                           &dummy, &mapped_nents))){
716                         goto unmap_curr_buff;
717                 }
718                 if ( src && (mapped_nents == 1) 
719                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
720                         memcpy(areq_ctx->buff_sg,src,
721                                sizeof(struct scatterlist));
722                         areq_ctx->buff_sg->length = nbytes;
723                         areq_ctx->curr_sg = areq_ctx->buff_sg;
724                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
725                 } else {
726                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
727                 }
728
729         }
730
731         /*build mlli */
732         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
733                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
734                 /* add the src data to the sg_data */
735                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
736                                         areq_ctx->in_nents,
737                                         src,
738                                         nbytes, 0,
739                                         true, &areq_ctx->mlli_nents);
740                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
741                                                   mlli_params) != 0)) {
742                         goto fail_unmap_din;
743                 }
744         }
745         /* change the buffer index for the unmap function */
746         areq_ctx->buff_index = (areq_ctx->buff_index^1);
747         SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
748                 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
749         return 0;
750
751 fail_unmap_din:
752         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
753
754 unmap_curr_buff:
755         if (*curr_buff_cnt != 0 ) {
756                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
757         }
758         return -ENOMEM;
759 }
760
761 int ssi_buffer_mgr_map_hash_request_update(
762         struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
763 {
764         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
765         struct device *dev = &drvdata->plat_dev->dev;
766         uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
767                         areq_ctx->buff0;
768         uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
769                         &areq_ctx->buff0_cnt;
770         uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
771                         areq_ctx->buff1;
772         uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
773                         &areq_ctx->buff1_cnt;
774         struct mlli_params *mlli_params = &areq_ctx->mlli_params;       
775         unsigned int update_data_len;
776         uint32_t total_in_len = nbytes + *curr_buff_cnt;
777         struct buffer_array sg_data;
778         struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
779         unsigned int swap_index = 0;
780         uint32_t dummy = 0;
781         uint32_t mapped_nents = 0;
782                 
783         SSI_LOG_DEBUG(" update params : curr_buff=%pK "
784                      "curr_buff_cnt=0x%X nbytes=0x%X "
785                      "src=%pK curr_index=%u \n",
786                      curr_buff, *curr_buff_cnt, nbytes,
787                      src, areq_ctx->buff_index);
788         /* Init the type of the dma buffer */
789         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
790         mlli_params->curr_pool = NULL;
791         areq_ctx->curr_sg = NULL;
792         sg_data.num_of_buffers = 0;
793         areq_ctx->in_nents = 0;
794
795         if (unlikely(total_in_len < block_size)) {
796                 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
797                              "*curr_buff_cnt=0x%X copy_to=%pK\n",
798                         curr_buff, *curr_buff_cnt,
799                         &curr_buff[*curr_buff_cnt]);
800                 areq_ctx->in_nents = 
801                         ssi_buffer_mgr_get_sgl_nents(src,
802                                                     nbytes,
803                                                     &dummy, NULL);
804                 sg_copy_to_buffer(src, areq_ctx->in_nents,
805                                   &curr_buff[*curr_buff_cnt], nbytes); 
806                 *curr_buff_cnt += nbytes;
807                 return 1;
808         }
809
810         /* Calculate the residue size*/
811         *next_buff_cnt = total_in_len & (block_size - 1);
812         /* update data len */
813         update_data_len = total_in_len - *next_buff_cnt;
814
815         SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
816                      "update_data_len=0x%X\n",
817                 *next_buff_cnt, update_data_len);
818
819         /* Copy the new residue to next buffer */
820         if (*next_buff_cnt != 0) {
821                 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
822                              " residue %u \n", next_buff,
823                              (update_data_len - *curr_buff_cnt),
824                              *next_buff_cnt);
825                 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
826                              (update_data_len -*curr_buff_cnt),
827                              nbytes,SSI_SG_TO_BUF);
828                 /* change the buffer index for next operation */
829                 swap_index = 1;
830         }
831
832         if (*curr_buff_cnt != 0) {
833                 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
834                                             *curr_buff_cnt, &sg_data) != 0) {
835                         return -ENOMEM;
836                 }
837                 /* change the buffer index for next operation */
838                 swap_index = 1;
839         }
840         
841         if ( update_data_len > *curr_buff_cnt ) {
842                 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
843                                           (update_data_len -*curr_buff_cnt),
844                                           DMA_TO_DEVICE,
845                                           &areq_ctx->in_nents,
846                                           LLI_MAX_NUM_OF_DATA_ENTRIES,
847                                           &dummy, &mapped_nents))){
848                         goto unmap_curr_buff;
849                 }
850                 if ( (mapped_nents == 1) 
851                      && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
852                         /* only one entry in the SG and no previous data */
853                         memcpy(areq_ctx->buff_sg,src,
854                                sizeof(struct scatterlist));
855                         areq_ctx->buff_sg->length = update_data_len;
856                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
857                         areq_ctx->curr_sg = areq_ctx->buff_sg;
858                 } else {
859                         areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
860                 }
861         }
862
863         if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
864                 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
865                 /* add the src data to the sg_data */
866                 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
867                                         areq_ctx->in_nents,
868                                         src,
869                                         (update_data_len - *curr_buff_cnt), 0,
870                                         true, &areq_ctx->mlli_nents);
871                 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
872                                                   mlli_params) != 0)) {
873                         goto fail_unmap_din;
874                 }
875
876         }
877         areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
878
879         return 0;
880
881 fail_unmap_din:
882         dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
883
884 unmap_curr_buff:
885         if (*curr_buff_cnt != 0 ) {
886                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
887         }
888         return -ENOMEM;
889 }
890
891 void ssi_buffer_mgr_unmap_hash_request(
892         struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
893 {
894         struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
895         uint32_t *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
896                                                 &areq_ctx->buff1_cnt;
897
898         /*In case a pool was set, a table was 
899           allocated and should be released */
900         if (areq_ctx->mlli_params.curr_pool != NULL) {
901                 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n", 
902                              (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
903                              areq_ctx->mlli_params.mlli_virt_addr);
904                 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
905                 dma_pool_free(areq_ctx->mlli_params.curr_pool,
906                               areq_ctx->mlli_params.mlli_virt_addr,
907                               areq_ctx->mlli_params.mlli_dma_addr);
908         }
909         
910         if ((src) && likely(areq_ctx->in_nents != 0)) {
911                 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
912                              sg_virt(src),
913                              (unsigned long long)sg_dma_address(src), 
914                              sg_dma_len(src));
915                 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
916                 dma_unmap_sg(dev, src, 
917                              areq_ctx->in_nents, DMA_TO_DEVICE);
918         }
919
920         if (*prev_len != 0) {
921                 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
922                              "dma=0x%llX len 0x%X\n", 
923                                 sg_virt(areq_ctx->buff_sg),
924                                 (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
925                                 sg_dma_len(areq_ctx->buff_sg));
926                 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
927                 if (!do_revert) {
928                         /* clean the previous data length for update operation */
929                         *prev_len = 0;
930                 } else {
931                         areq_ctx->buff_index ^= 1;
932                 }
933         }
934 }
935
936 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
937 {
938         struct buff_mgr_handle *buff_mgr_handle;
939         struct device *dev = &drvdata->plat_dev->dev;
940
941         buff_mgr_handle = (struct buff_mgr_handle *)
942                 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
943         if (buff_mgr_handle == NULL)
944                 return -ENOMEM;
945
946         drvdata->buff_mgr_handle = buff_mgr_handle;
947
948         buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
949                                 "dx_single_mlli_tables", dev,
950                                 MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 
951                                 LLI_ENTRY_BYTE_SIZE,
952                                 MLLI_TABLE_MIN_ALIGNMENT, 0);
953
954         if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
955                 goto error;
956
957         return 0;
958
959 error:
960         ssi_buffer_mgr_fini(drvdata);
961         return -ENOMEM;
962 }
963
964 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
965 {
966         struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
967
968         if (buff_mgr_handle  != NULL) {
969                 if (buff_mgr_handle->mlli_buffs_pool != NULL)
970                         dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
971                 kfree(drvdata->buff_mgr_handle);
972                 drvdata->buff_mgr_handle = NULL;
973
974         }
975         return 0;
976 }
977