2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/hash.h>
21 #include <crypto/authenc.h>
22 #include <crypto/scatterwalk.h>
23 #include <linux/dmapool.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/crypto.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
29 #include "ssi_buffer_mgr.h"
30 #include "cc_lli_defs.h"
31 #include "ssi_cipher.h"
34 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128
35 #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
36 #define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
37 #define MAX_NUM_OF_BUFFERS_IN_MLLI 4
38 #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
39 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
42 #define DUMP_SGL(sg) \
44 SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
45 "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
46 (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
49 #define DUMP_MLLI_TABLE(mlli_p, nents) \
51 SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
53 SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
54 (mlli_p)[LLI_WORD0_OFFSET], \
55 (mlli_p)[LLI_WORD1_OFFSET]); \
56 (mlli_p) += LLI_ENTRY_WORD_SIZE; \
59 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
60 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
61 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
62 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
64 #define DX_BUFFER_MGR_DUMP_SGL(sg)
65 #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
66 #define GET_DMA_BUFFER_TYPE(buff_type)
70 enum dma_buffer_type {
76 struct buff_mgr_handle {
77 struct dma_pool *mlli_buffs_pool;
80 union buffer_array_entry {
81 struct scatterlist *sgl;
82 dma_addr_t buffer_dma;
86 unsigned int num_of_buffers;
87 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
88 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
89 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
90 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
91 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
92 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
93 uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
96 #ifdef CC_DMA_48BIT_SIM
97 dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
99 dma_addr_t tmp_dma_addr;
100 #ifdef CC_DMA_48BIT_SIM_FULL
101 /* With this code all addresses will be switched to 48 bits. */
102 /* The if condition protects from double expention */
103 if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
104 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
106 if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
107 (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
109 tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
110 (orig_addr & UINT16_MAX));
111 SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
112 "dma_address=0x%llX\n",
113 orig_addr, tmp_dma_addr);
119 dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
121 dma_addr_t tmp_dma_addr;
122 #ifdef CC_DMA_48BIT_SIM_FULL
123 /* With this code all addresses will be restored from 48 bits. */
124 /* The if condition protects from double restoring */
125 if((orig_addr >> 32) & 0xFFFF ) {
127 if(((orig_addr >> 32) & 0xFFFF) &&
128 !(((orig_addr >> 32) & 0xFF) % 2) ) {
130 /*return high 16 bits*/
131 tmp_dma_addr = ((orig_addr >> 16));
132 /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
133 tmp_dma_addr &= 0xFFFF0000;
134 /* Set the original 16 bits */
135 tmp_dma_addr |= (orig_addr & UINT16_MAX);
136 SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
137 "dma_address=0x%llX\n",
138 orig_addr, tmp_dma_addr);
145 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
148 * @nbytes: [IN] Total SGL data bytes.
149 * @lbytes: [OUT] Returns the amount of bytes at the last entry
151 static unsigned int ssi_buffer_mgr_get_sgl_nents(
152 struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
154 unsigned int nents = 0;
155 while (nbytes != 0) {
156 if (sg_is_chain(sg_list)) {
157 SSI_LOG_ERR("Unexpected chanined entry "
158 "in sg (entry =0x%X) \n", nents);
161 if (sg_list->length != 0) {
163 /* get the number of bytes in the last entry */
165 nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
166 sg_list = sg_next(sg_list);
168 sg_list = (struct scatterlist *)sg_page(sg_list);
169 if (is_chained != NULL) {
174 SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
179 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
183 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
185 struct scatterlist *current_sg = sgl;
188 while (sg_index <= data_len) {
189 if (current_sg == NULL) {
190 /* reached the end of the sgl --> just return back */
193 memset(sg_virt(current_sg), 0, current_sg->length);
194 sg_index += current_sg->length;
195 current_sg = sg_next(current_sg);
200 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
201 * from to_skip to end, to dest and vice versa
209 void ssi_buffer_mgr_copy_scatterlist_portion(
210 u8 *dest, struct scatterlist *sg,
211 uint32_t to_skip, uint32_t end,
212 enum ssi_sg_cpy_direct direct)
214 uint32_t nents, lbytes;
216 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
217 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
220 static inline int ssi_buffer_mgr_render_buff_to_mlli(
221 dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
222 uint32_t **mlli_entry_pp)
224 uint32_t *mlli_entry_p = *mlli_entry_pp;
227 /* Verify there is no memory overflow*/
228 new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
229 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
233 /*handle buffer longer than 64 kbytes */
234 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
235 SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE);
236 LLI_SET_ADDR(mlli_entry_p,buff_dma);
237 LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
238 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
239 mlli_entry_p[LLI_WORD0_OFFSET],
240 mlli_entry_p[LLI_WORD1_OFFSET]);
241 SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma);
242 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
243 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
244 mlli_entry_p = mlli_entry_p + 2;
248 SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size);
249 LLI_SET_ADDR(mlli_entry_p,buff_dma);
250 LLI_SET_SIZE(mlli_entry_p, buff_size);
251 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
252 mlli_entry_p[LLI_WORD0_OFFSET],
253 mlli_entry_p[LLI_WORD1_OFFSET]);
254 mlli_entry_p = mlli_entry_p + 2;
255 *mlli_entry_pp = mlli_entry_p;
261 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
262 struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
263 uint32_t **mlli_entry_pp)
265 struct scatterlist *curr_sgl = sgl;
266 uint32_t *mlli_entry_p = *mlli_entry_pp;
269 for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
270 curr_sgl = sg_next(curr_sgl)) {
271 uint32_t entry_data_len =
272 (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
273 sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
274 sgl_data_len -= entry_data_len;
275 rc = ssi_buffer_mgr_render_buff_to_mlli(
276 sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
283 *mlli_entry_pp = mlli_entry_p;
287 static int ssi_buffer_mgr_generate_mlli(
289 struct buffer_array *sg_data,
290 struct mlli_params *mlli_params)
293 uint32_t total_nents = 0,prev_total_nents = 0;
296 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
298 /* Allocate memory from the pointed pool */
299 mlli_params->mlli_virt_addr = dma_pool_alloc(
300 mlli_params->curr_pool, GFP_KERNEL,
301 &(mlli_params->mlli_dma_addr));
302 if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
303 SSI_LOG_ERR("dma_pool_alloc() failed\n");
305 goto build_mlli_exit;
307 SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
308 (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
309 LLI_ENTRY_BYTE_SIZE));
310 /* Point to start of MLLI */
311 mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
312 /* go over all SG's and link it to one MLLI table */
313 for (i = 0; i < sg_data->num_of_buffers; i++) {
314 if (sg_data->type[i] == DMA_SGL_TYPE)
315 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
316 sg_data->entry[i].sgl,
317 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
319 else /*DMA_BUFF_TYPE*/
320 rc = ssi_buffer_mgr_render_buff_to_mlli(
321 sg_data->entry[i].buffer_dma,
322 sg_data->total_data_len[i], &total_nents,
328 /* set last bit in the current table */
329 if (sg_data->mlli_nents[i] != NULL) {
330 /*Calculate the current MLLI table length for the
331 length field in the descriptor*/
332 *(sg_data->mlli_nents[i]) +=
333 (total_nents - prev_total_nents);
334 prev_total_nents = total_nents;
338 /* Set MLLI size for the bypass operation */
339 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
341 SSI_LOG_DEBUG("MLLI params: "
342 "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
343 mlli_params->mlli_virt_addr,
344 (unsigned long long)mlli_params->mlli_dma_addr,
345 mlli_params->mlli_len);
351 static inline void ssi_buffer_mgr_add_buffer_entry(
352 struct buffer_array *sgl_data,
353 dma_addr_t buffer_dma, unsigned int buffer_len,
354 bool is_last_entry, uint32_t *mlli_nents)
356 unsigned int index = sgl_data->num_of_buffers;
358 SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
359 "buffer_len=0x%08X is_last=%d\n",
360 index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
361 sgl_data->nents[index] = 1;
362 sgl_data->entry[index].buffer_dma = buffer_dma;
363 sgl_data->offset[index] = 0;
364 sgl_data->total_data_len[index] = buffer_len;
365 sgl_data->type[index] = DMA_BUFF_TYPE;
366 sgl_data->is_last[index] = is_last_entry;
367 sgl_data->mlli_nents[index] = mlli_nents;
368 if (sgl_data->mlli_nents[index] != NULL)
369 *sgl_data->mlli_nents[index] = 0;
370 sgl_data->num_of_buffers++;
373 static inline void ssi_buffer_mgr_add_scatterlist_entry(
374 struct buffer_array *sgl_data,
376 struct scatterlist *sgl,
377 unsigned int data_len,
378 unsigned int data_offset,
380 uint32_t *mlli_nents)
382 unsigned int index = sgl_data->num_of_buffers;
384 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
385 index, nents, sgl, data_len, is_last_table);
386 sgl_data->nents[index] = nents;
387 sgl_data->entry[index].sgl = sgl;
388 sgl_data->offset[index] = data_offset;
389 sgl_data->total_data_len[index] = data_len;
390 sgl_data->type[index] = DMA_SGL_TYPE;
391 sgl_data->is_last[index] = is_last_table;
392 sgl_data->mlli_nents[index] = mlli_nents;
393 if (sgl_data->mlli_nents[index] != NULL)
394 *sgl_data->mlli_nents[index] = 0;
395 sgl_data->num_of_buffers++;
399 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
400 enum dma_data_direction direction)
403 struct scatterlist *l_sg = sg;
404 for (i = 0; i < nents; i++) {
408 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
409 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
412 l_sg = sg_next(l_sg);
417 /* Restore mapped parts */
418 for (j = 0; j < i; j++) {
422 dma_unmap_sg(dev,sg,1,direction);
428 static int ssi_buffer_mgr_map_scatterlist(
429 struct device *dev, struct scatterlist *sg,
430 unsigned int nbytes, int direction,
431 uint32_t *nents, uint32_t max_sg_nents,
432 uint32_t *lbytes, uint32_t *mapped_nents)
434 bool is_chained = false;
436 if (sg_is_last(sg)) {
437 /* One entry only case -set to DLLI */
438 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
439 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
442 SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
443 "page_link=0x%08lX addr=%pK offset=%u "
445 (unsigned long long)sg_dma_address(sg),
448 sg->offset, sg->length);
452 SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
453 } else { /*sg_is_last*/
454 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
456 if (*nents > max_sg_nents) {
458 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
459 *nents, max_sg_nents);
463 /* In case of mmu the number of mapped nents might
464 be changed from the original sgl nents */
465 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
466 if (unlikely(*mapped_nents == 0)){
468 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
472 /*In this case the driver maps entry by entry so it
473 must have the same nents before and after map */
474 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
478 if (unlikely(*mapped_nents != *nents)){
479 *nents = *mapped_nents;
480 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
489 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
490 struct ahash_req_ctx *areq_ctx,
492 uint32_t curr_buff_cnt,
493 struct buffer_array *sg_data)
495 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
496 /* create sg for the current buffer */
497 sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
498 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
499 DMA_TO_DEVICE) != 1)) {
500 SSI_LOG_ERR("dma_map_sg() "
501 "src buffer failed\n");
504 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
505 "page_link=0x%08lX addr=%pK "
506 "offset=%u length=%u\n",
507 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
508 areq_ctx->buff_sg->page_link,
509 sg_virt(areq_ctx->buff_sg),
510 areq_ctx->buff_sg->offset,
511 areq_ctx->buff_sg->length);
512 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
513 areq_ctx->curr_sg = areq_ctx->buff_sg;
514 areq_ctx->in_nents = 0;
515 /* prepare for case of MLLI */
516 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
517 curr_buff_cnt, 0, false, NULL);
521 void ssi_buffer_mgr_unmap_blkcipher_request(
525 struct scatterlist *src,
526 struct scatterlist *dst)
528 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
530 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
531 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
532 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
534 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
535 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
537 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
541 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
542 SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->mlli_params.mlli_dma_addr);
543 dma_pool_free(req_ctx->mlli_params.curr_pool,
544 req_ctx->mlli_params.mlli_virt_addr,
545 req_ctx->mlli_params.mlli_dma_addr);
548 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
549 dma_unmap_sg(dev, src, req_ctx->in_nents,
551 SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
555 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
556 dma_unmap_sg(dev, dst, req_ctx->out_nents,
558 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
563 int ssi_buffer_mgr_map_blkcipher_request(
564 struct ssi_drvdata *drvdata,
569 struct scatterlist *src,
570 struct scatterlist *dst)
572 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
573 struct mlli_params *mlli_params = &req_ctx->mlli_params;
574 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
575 struct device *dev = &drvdata->plat_dev->dev;
576 struct buffer_array sg_data;
579 uint32_t mapped_nents = 0;
581 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
582 mlli_params->curr_pool = NULL;
583 sg_data.num_of_buffers = 0;
586 if (likely(ivsize != 0) ) {
587 dump_byte_array("iv", (uint8_t *)info, ivsize);
588 req_ctx->gen_ctx.iv_dma_addr =
589 dma_map_single(dev, (void *)info,
591 req_ctx->is_giv ? DMA_BIDIRECTIONAL:
593 if (unlikely(dma_mapping_error(dev,
594 req_ctx->gen_ctx.iv_dma_addr))) {
595 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
596 "for DMA failed\n", ivsize, info);
599 SSI_UPDATE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr,
601 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
603 (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
605 req_ctx->gen_ctx.iv_dma_addr = 0;
607 /* Map the src SGL */
608 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
609 nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
610 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
611 if (unlikely(rc != 0)) {
613 goto ablkcipher_exit;
615 if (mapped_nents > 1)
616 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
618 if (unlikely(src == dst)) {
619 /* Handle inplace operation */
620 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
621 req_ctx->out_nents = 0;
622 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
623 req_ctx->in_nents, src,
624 nbytes, 0, true, &req_ctx->in_mlli_nents);
628 if (unlikely(ssi_buffer_mgr_map_scatterlist(
630 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
631 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
634 goto ablkcipher_exit;
636 if (mapped_nents > 1)
637 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
639 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
640 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
641 req_ctx->in_nents, src,
643 &req_ctx->in_mlli_nents);
644 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
645 req_ctx->out_nents, dst,
647 &req_ctx->out_mlli_nents);
651 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
652 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
653 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
654 if (unlikely(rc!= 0))
655 goto ablkcipher_exit;
659 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
660 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
665 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
669 int ssi_buffer_mgr_map_hash_request_final(
670 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
672 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
673 struct device *dev = &drvdata->plat_dev->dev;
674 uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
676 uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
677 &areq_ctx->buff0_cnt;
678 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
679 struct buffer_array sg_data;
680 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
682 uint32_t mapped_nents = 0;
684 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
685 "curr_buff_cnt=0x%X nbytes = 0x%X "
686 "src=%pK curr_index=%u\n",
687 curr_buff, *curr_buff_cnt, nbytes,
688 src, areq_ctx->buff_index);
689 /* Init the type of the dma buffer */
690 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
691 mlli_params->curr_pool = NULL;
692 sg_data.num_of_buffers = 0;
693 areq_ctx->in_nents = 0;
695 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
700 /*TODO: copy data in case that buffer is enough for operation */
701 /* map the previous buffer */
702 if (*curr_buff_cnt != 0 ) {
703 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
704 *curr_buff_cnt, &sg_data) != 0) {
709 if (src && (nbytes > 0) && do_update) {
710 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
714 LLI_MAX_NUM_OF_DATA_ENTRIES,
715 &dummy, &mapped_nents))){
716 goto unmap_curr_buff;
718 if ( src && (mapped_nents == 1)
719 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
720 memcpy(areq_ctx->buff_sg,src,
721 sizeof(struct scatterlist));
722 areq_ctx->buff_sg->length = nbytes;
723 areq_ctx->curr_sg = areq_ctx->buff_sg;
724 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
726 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
732 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
733 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
734 /* add the src data to the sg_data */
735 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
739 true, &areq_ctx->mlli_nents);
740 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
741 mlli_params) != 0)) {
745 /* change the buffer index for the unmap function */
746 areq_ctx->buff_index = (areq_ctx->buff_index^1);
747 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
748 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
752 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
755 if (*curr_buff_cnt != 0 ) {
756 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
761 int ssi_buffer_mgr_map_hash_request_update(
762 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
764 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
765 struct device *dev = &drvdata->plat_dev->dev;
766 uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
768 uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
769 &areq_ctx->buff0_cnt;
770 uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
772 uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
773 &areq_ctx->buff1_cnt;
774 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
775 unsigned int update_data_len;
776 uint32_t total_in_len = nbytes + *curr_buff_cnt;
777 struct buffer_array sg_data;
778 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
779 unsigned int swap_index = 0;
781 uint32_t mapped_nents = 0;
783 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
784 "curr_buff_cnt=0x%X nbytes=0x%X "
785 "src=%pK curr_index=%u \n",
786 curr_buff, *curr_buff_cnt, nbytes,
787 src, areq_ctx->buff_index);
788 /* Init the type of the dma buffer */
789 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
790 mlli_params->curr_pool = NULL;
791 areq_ctx->curr_sg = NULL;
792 sg_data.num_of_buffers = 0;
793 areq_ctx->in_nents = 0;
795 if (unlikely(total_in_len < block_size)) {
796 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
797 "*curr_buff_cnt=0x%X copy_to=%pK\n",
798 curr_buff, *curr_buff_cnt,
799 &curr_buff[*curr_buff_cnt]);
801 ssi_buffer_mgr_get_sgl_nents(src,
804 sg_copy_to_buffer(src, areq_ctx->in_nents,
805 &curr_buff[*curr_buff_cnt], nbytes);
806 *curr_buff_cnt += nbytes;
810 /* Calculate the residue size*/
811 *next_buff_cnt = total_in_len & (block_size - 1);
812 /* update data len */
813 update_data_len = total_in_len - *next_buff_cnt;
815 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
816 "update_data_len=0x%X\n",
817 *next_buff_cnt, update_data_len);
819 /* Copy the new residue to next buffer */
820 if (*next_buff_cnt != 0) {
821 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
822 " residue %u \n", next_buff,
823 (update_data_len - *curr_buff_cnt),
825 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
826 (update_data_len -*curr_buff_cnt),
827 nbytes,SSI_SG_TO_BUF);
828 /* change the buffer index for next operation */
832 if (*curr_buff_cnt != 0) {
833 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
834 *curr_buff_cnt, &sg_data) != 0) {
837 /* change the buffer index for next operation */
841 if ( update_data_len > *curr_buff_cnt ) {
842 if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
843 (update_data_len -*curr_buff_cnt),
846 LLI_MAX_NUM_OF_DATA_ENTRIES,
847 &dummy, &mapped_nents))){
848 goto unmap_curr_buff;
850 if ( (mapped_nents == 1)
851 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
852 /* only one entry in the SG and no previous data */
853 memcpy(areq_ctx->buff_sg,src,
854 sizeof(struct scatterlist));
855 areq_ctx->buff_sg->length = update_data_len;
856 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
857 areq_ctx->curr_sg = areq_ctx->buff_sg;
859 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
863 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
864 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
865 /* add the src data to the sg_data */
866 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
869 (update_data_len - *curr_buff_cnt), 0,
870 true, &areq_ctx->mlli_nents);
871 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
872 mlli_params) != 0)) {
877 areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
882 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
885 if (*curr_buff_cnt != 0 ) {
886 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
891 void ssi_buffer_mgr_unmap_hash_request(
892 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
894 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
895 uint32_t *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
896 &areq_ctx->buff1_cnt;
898 /*In case a pool was set, a table was
899 allocated and should be released */
900 if (areq_ctx->mlli_params.curr_pool != NULL) {
901 SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
902 (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
903 areq_ctx->mlli_params.mlli_virt_addr);
904 SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
905 dma_pool_free(areq_ctx->mlli_params.curr_pool,
906 areq_ctx->mlli_params.mlli_virt_addr,
907 areq_ctx->mlli_params.mlli_dma_addr);
910 if ((src) && likely(areq_ctx->in_nents != 0)) {
911 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
913 (unsigned long long)sg_dma_address(src),
915 SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
916 dma_unmap_sg(dev, src,
917 areq_ctx->in_nents, DMA_TO_DEVICE);
920 if (*prev_len != 0) {
921 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
922 "dma=0x%llX len 0x%X\n",
923 sg_virt(areq_ctx->buff_sg),
924 (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
925 sg_dma_len(areq_ctx->buff_sg));
926 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
928 /* clean the previous data length for update operation */
931 areq_ctx->buff_index ^= 1;
936 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
938 struct buff_mgr_handle *buff_mgr_handle;
939 struct device *dev = &drvdata->plat_dev->dev;
941 buff_mgr_handle = (struct buff_mgr_handle *)
942 kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
943 if (buff_mgr_handle == NULL)
946 drvdata->buff_mgr_handle = buff_mgr_handle;
948 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
949 "dx_single_mlli_tables", dev,
950 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
952 MLLI_TABLE_MIN_ALIGNMENT, 0);
954 if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
960 ssi_buffer_mgr_fini(drvdata);
964 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
966 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
968 if (buff_mgr_handle != NULL) {
969 if (buff_mgr_handle->mlli_buffs_pool != NULL)
970 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
971 kfree(drvdata->buff_mgr_handle);
972 drvdata->buff_mgr_handle = NULL;