]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/staging/ccree/ssi_buffer_mgr.c
staging: ccree: Cleanup: remove references to page_link
[linux.git] / drivers / staging / ccree / ssi_buffer_mgr.c
index 038e2ff5e545f4b10e7c6ef34a30926aaffc5496..04515e70d2d3c576baade02a44746de69a43c006 100644 (file)
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -43,8 +43,8 @@
 #ifdef CC_DEBUG
 #define DUMP_SGL(sg) \
        while (sg) { \
-               SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
-                            "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
+               SSI_LOG_DEBUG("page=%p offset=%u length=%u (dma_len=%u) " \
+                            "dma_addr=%08x\n", sg_page(sg), (sg)->offset, \
                        (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
                (sg) = sg_next(sg); \
        }
@@ -92,28 +92,28 @@ struct buffer_array {
        int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
        enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
        bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
-       uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+       u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
 };
 
 #ifdef CC_DMA_48BIT_SIM
-dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
+dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, u32 data_len)
 {
        dma_addr_t tmp_dma_addr;
 #ifdef CC_DMA_48BIT_SIM_FULL
        /* With this code all addresses will be switched to 48 bits. */
        /* The if condition protects from double expention */
-       if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) && 
+       if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
                (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
 #else
-       if((!(((orig_addr >> 16) & 0xFF) % 2)) && 
+       if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
                (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
 #endif
-               tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 | 
-                               (orig_addr & UINT16_MAX));
+               tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
+                               (orig_addr & U16_MAX));
                        SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
                                    "dma_address=0x%llX\n",
                                     orig_addr, tmp_dma_addr);
-                       return tmp_dma_addr;    
+                       return tmp_dma_addr;
        }
        return orig_addr;
 }
@@ -126,32 +126,32 @@ dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
        /* The if condition protects from double restoring */
        if((orig_addr >> 32) & 0xFFFF ) {
 #else
-       if(((orig_addr >> 32) & 0xFFFF) && 
+       if(((orig_addr >> 32) & 0xFFFF) &&
                !(((orig_addr >> 32) & 0xFF) % 2) ) {
 #endif
                /*return high 16 bits*/
                tmp_dma_addr = ((orig_addr >> 16));
                /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
-               tmp_dma_addr &= 0xFFFF0000; 
+               tmp_dma_addr &= 0xFFFF0000;
                /* Set the original 16 bits */
-               tmp_dma_addr |= (orig_addr & UINT16_MAX); 
+               tmp_dma_addr |= (orig_addr & U16_MAX);
                SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
                             "dma_address=0x%llX\n",
                             orig_addr, tmp_dma_addr);
-                       return tmp_dma_addr;    
+                       return tmp_dma_addr;
        }
        return orig_addr;
 }
 #endif
 /**
  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
- * 
+ *
  * @sg_list: SG list
  * @nbytes: [IN] Total SGL data bytes.
- * @lbytes: [OUT] Returns the amount of bytes at the last entry 
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
  */
 static unsigned int ssi_buffer_mgr_get_sgl_nents(
-       struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
+       struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
 {
        unsigned int nents = 0;
        while (nbytes != 0) {
@@ -179,10 +179,10 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
 
 /**
  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
- * 
+ *
  * @sgl:
  */
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
 {
        struct scatterlist *current_sg = sgl;
        int sg_index = 0;
@@ -201,7 +201,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
 /**
  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
  * from to_skip to end, to dest and vice versa
- * 
+ *
  * @dest:
  * @sg:
  * @to_skip:
@@ -210,21 +210,21 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
  */
 void ssi_buffer_mgr_copy_scatterlist_portion(
        u8 *dest, struct scatterlist *sg,
-       uint32_t to_skip,  uint32_t end,
+       u32 to_skip,  u32 end,
        enum ssi_sg_cpy_direct direct)
 {
-       uint32_t nents, lbytes;
+       u32 nents, lbytes;
 
        nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
        sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
 }
 
 static inline int ssi_buffer_mgr_render_buff_to_mlli(
-       dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
-       uint32_t **mlli_entry_pp)
+       dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
+       u32 **mlli_entry_pp)
 {
-       uint32_t *mlli_entry_p = *mlli_entry_pp;
-       uint32_t new_nents;;
+       u32 *mlli_entry_p = *mlli_entry_pp;
+       u32 new_nents;;
 
        /* Verify there is no memory overflow*/
        new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
@@ -261,16 +261,16 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
 
 
 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
-       struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
-       uint32_t **mlli_entry_pp)
+       struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
+       u32 **mlli_entry_pp)
 {
        struct scatterlist *curr_sgl = sgl;
-       uint32_t *mlli_entry_p = *mlli_entry_pp;
-       int32_t rc = 0;
+       u32 *mlli_entry_p = *mlli_entry_pp;
+       s32 rc = 0;
 
        for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
              curr_sgl = sg_next(curr_sgl)) {
-               uint32_t entry_data_len =
+               u32 entry_data_len =
                        (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
                                sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
                sgl_data_len -= entry_data_len;
@@ -291,8 +291,8 @@ static int ssi_buffer_mgr_generate_mlli(
        struct buffer_array *sg_data,
        struct mlli_params *mlli_params)
 {
-       uint32_t *mlli_p;
-       uint32_t total_nents = 0,prev_total_nents = 0;
+       u32 *mlli_p;
+       u32 total_nents = 0,prev_total_nents = 0;
        int rc = 0, i;
 
        SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
@@ -306,16 +306,16 @@ static int ssi_buffer_mgr_generate_mlli(
                rc =-ENOMEM;
                goto build_mlli_exit;
        }
-       SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr, 
+       SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
                                                (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
                                                LLI_ENTRY_BYTE_SIZE));
        /* Point to start of MLLI */
-       mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
+       mlli_p = (u32 *)mlli_params->mlli_virt_addr;
        /* go over all SG's and link it to one MLLI table */
        for (i = 0; i < sg_data->num_of_buffers; i++) {
                if (sg_data->type[i] == DMA_SGL_TYPE)
                        rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
-                               sg_data->entry[i].sgl, 
+                               sg_data->entry[i].sgl,
                                sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
                                &mlli_p);
                else /*DMA_BUFF_TYPE*/
@@ -329,9 +329,9 @@ static int ssi_buffer_mgr_generate_mlli(
 
                /* set last bit in the current table */
                if (sg_data->mlli_nents[i] != NULL) {
-                       /*Calculate the current MLLI table length for the 
+                       /*Calculate the current MLLI table length for the
                        length field in the descriptor*/
-                       *(sg_data->mlli_nents[i]) += 
+                       *(sg_data->mlli_nents[i]) +=
                                (total_nents - prev_total_nents);
                        prev_total_nents = total_nents;
                }
@@ -353,7 +353,7 @@ static int ssi_buffer_mgr_generate_mlli(
 static inline void ssi_buffer_mgr_add_buffer_entry(
        struct buffer_array *sgl_data,
        dma_addr_t buffer_dma, unsigned int buffer_len,
-       bool is_last_entry, uint32_t *mlli_nents)
+       bool is_last_entry, u32 *mlli_nents)
 {
        unsigned int index = sgl_data->num_of_buffers;
 
@@ -379,7 +379,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
        unsigned int data_len,
        unsigned int data_offset,
        bool is_last_table,
-       uint32_t *mlli_nents)
+       u32 *mlli_nents)
 {
        unsigned int index = sgl_data->num_of_buffers;
 
@@ -398,10 +398,10 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
 }
 
 static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
+ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
                         enum dma_data_direction direction)
 {
-       uint32_t i , j;
+       u32 i , j;
        struct scatterlist *l_sg = sg;
        for (i = 0; i < nents; i++) {
                if (l_sg == NULL) {
@@ -430,8 +430,8 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t n
 static int ssi_buffer_mgr_map_scatterlist(
        struct device *dev, struct scatterlist *sg,
        unsigned int nbytes, int direction,
-       uint32_t *nents, uint32_t max_sg_nents,
-       uint32_t *lbytes, uint32_t *mapped_nents)
+       u32 *nents, u32 max_sg_nents,
+       u32 *lbytes, u32 *mapped_nents)
 {
        bool is_chained = false;
 
@@ -440,20 +440,20 @@ static int ssi_buffer_mgr_map_scatterlist(
                if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
                        SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
                        return -ENOMEM;
-               } 
+               }
                SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
-                            "page_link=0x%08lX addr=%pK offset=%u "
+                            "page=%p addr=%pK offset=%u "
                             "length=%u\n",
-                            (unsigned long long)sg_dma_address(sg), 
-                            sg->page_link, 
-                            sg_virt(sg), 
+                            (unsigned long long)sg_dma_address(sg),
+                            sg_page(sg),
+                            sg_virt(sg),
                             sg->offset, sg->length);
                *lbytes = nbytes;
                *nents = 1;
                *mapped_nents = 1;
                SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
        } else {  /*sg_is_last*/
-               *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, 
+               *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
                                                     &is_chained);
                if (*nents > max_sg_nents) {
                        *nents = 0;
@@ -491,32 +491,32 @@ static int ssi_buffer_mgr_map_scatterlist(
 static inline int
 ssi_aead_handle_config_buf(struct device *dev,
        struct aead_req_ctx *areq_ctx,
-       uint8_t* config_data,
+       u8* config_data,
        struct buffer_array *sg_data,
        unsigned int assoclen)
 {
        SSI_LOG_DEBUG(" handle additional data config set to   DLLI \n");
        /* create sg for the current buffer */
        sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
-       if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, 
+       if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
                                DMA_TO_DEVICE) != 1)) {
                        SSI_LOG_ERR("dma_map_sg() "
                           "config buffer failed\n");
                        return -ENOMEM;
        }
        SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
-                    "page_link=0x%08lX addr=%pK "
+                    "page=%p addr=%pK "
                     "offset=%u length=%u\n",
-                    (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg), 
-                    areq_ctx->ccm_adata_sg.page_link, 
+                    (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
+                    sg_page(&areq_ctx->ccm_adata_sg),
                     sg_virt(&areq_ctx->ccm_adata_sg),
-                    areq_ctx->ccm_adata_sg.offset, 
+                    areq_ctx->ccm_adata_sg.offset,
                     areq_ctx->ccm_adata_sg.length);
        /* prepare for case of MLLI */
        if (assoclen > 0) {
-               ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, 
+               ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
                                                    &areq_ctx->ccm_adata_sg,
-                                                   (AES_BLOCK_SIZE + 
+                                                   (AES_BLOCK_SIZE +
                                                    areq_ctx->ccm_hdr_size), 0,
                                                    false, NULL);
        }
@@ -526,8 +526,8 @@ ssi_aead_handle_config_buf(struct device *dev,
 
 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
                                           struct ahash_req_ctx *areq_ctx,
-                                          uint8_t* curr_buff,
-                                          uint32_t curr_buff_cnt,
+                                          u8* curr_buff,
+                                          u32 curr_buff_cnt,
                                           struct buffer_array *sg_data)
 {
        SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI \n", curr_buff_cnt);
@@ -540,12 +540,12 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
                        return -ENOMEM;
        }
        SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
-                    "page_link=0x%08lX addr=%pK "
+                    "page=%p addr=%pK "
                     "offset=%u length=%u\n",
-                    (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
-                    areq_ctx->buff_sg->page_link, 
+                    (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+                    sg_page(areq_ctx->buff_sg),
                     sg_virt(areq_ctx->buff_sg),
-                    areq_ctx->buff_sg->offset, 
+                    areq_ctx->buff_sg->offset,
                     areq_ctx->buff_sg->length);
        areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
        areq_ctx->curr_sg = areq_ctx->buff_sg;
@@ -566,12 +566,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
        struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 
        if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
-               SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n", 
+               SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
                        (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
                        ivsize);
                SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
-               dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 
-                                ivsize, 
+               dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+                                ivsize,
                                 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
                                 DMA_TO_DEVICE);
        }
@@ -586,12 +586,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
        SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
        dma_unmap_sg(dev, src, req_ctx->in_nents,
                DMA_BIDIRECTIONAL);
-       SSI_LOG_DEBUG("Unmapped req->src=%pK\n", 
+       SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
                     sg_virt(src));
 
        if (src != dst) {
                SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
-               dma_unmap_sg(dev, dst, req_ctx->out_nents, 
+               dma_unmap_sg(dev, dst, req_ctx->out_nents,
                        DMA_BIDIRECTIONAL);
                SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
                        sg_virt(dst));
@@ -608,13 +608,13 @@ int ssi_buffer_mgr_map_blkcipher_request(
        struct scatterlist *dst)
 {
        struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
-       struct mlli_params *mlli_params = &req_ctx->mlli_params;        
+       struct mlli_params *mlli_params = &req_ctx->mlli_params;
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
        struct device *dev = &drvdata->plat_dev->dev;
        struct buffer_array sg_data;
-       uint32_t dummy = 0;
+       u32 dummy = 0;
        int rc = 0;
-       uint32_t mapped_nents = 0;
+       u32 mapped_nents = 0;
 
        req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
        mlli_params->curr_pool = NULL;
@@ -622,13 +622,13 @@ int ssi_buffer_mgr_map_blkcipher_request(
 
        /* Map IV buffer */
        if (likely(ivsize != 0) ) {
-               dump_byte_array("iv", (uint8_t *)info, ivsize);
-               req_ctx->gen_ctx.iv_dma_addr = 
-                       dma_map_single(dev, (void *)info, 
-                                      ivsize, 
+               dump_byte_array("iv", (u8 *)info, ivsize);
+               req_ctx->gen_ctx.iv_dma_addr =
+                       dma_map_single(dev, (void *)info,
+                                      ivsize,
                                       req_ctx->is_giv ? DMA_BIDIRECTIONAL:
                                       DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, 
+               if (unlikely(dma_mapping_error(dev,
                                        req_ctx->gen_ctx.iv_dma_addr))) {
                        SSI_LOG_ERR("Mapping iv %u B at va=%pK "
                                   "for DMA failed\n", ivsize, info);
@@ -641,7 +641,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
                        (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
        } else
                req_ctx->gen_ctx.iv_dma_addr = 0;
-       
+
        /* Map the src SGL */
        rc = ssi_buffer_mgr_map_scatterlist(dev, src,
                nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
@@ -681,11 +681,11 @@ int ssi_buffer_mgr_map_blkcipher_request(
                                &req_ctx->in_mlli_nents);
                        ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
                                req_ctx->out_nents, dst,
-                               nbytes, 0, true, 
+                               nbytes, 0, true,
                                &req_ctx->out_mlli_nents);
                }
        }
-       
+
        if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
                rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
@@ -710,13 +710,13 @@ void ssi_buffer_mgr_unmap_aead_request(
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       uint32_t dummy;
+       u32 dummy;
        bool chained;
-       uint32_t size_to_unmap = 0;
+       u32 size_to_unmap = 0;
 
        if (areq_ctx->mac_buf_dma_addr != 0) {
                SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
-               dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 
+               dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
                        MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
        }
 
@@ -727,22 +727,22 @@ void ssi_buffer_mgr_unmap_aead_request(
                        dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
                                         AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
                }
-       
+
                if (areq_ctx->gcm_block_len_dma_addr != 0) {
                        SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr);
                        dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
                }
-       
+
                if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
                        SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr);
-                       dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, 
+                       dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
                                AES_BLOCK_SIZE, DMA_TO_DEVICE);
                }
-       
+
                if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
                        SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr);
-                       dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, 
+                       dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
                                AES_BLOCK_SIZE, DMA_TO_DEVICE);
                }
        }
@@ -751,7 +751,7 @@ void ssi_buffer_mgr_unmap_aead_request(
        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
                if (areq_ctx->ccm_iv0_dma_addr != 0) {
                        SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr);
-                       dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, 
+                       dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
                                AES_BLOCK_SIZE, DMA_TO_DEVICE);
                }
 
@@ -763,10 +763,10 @@ void ssi_buffer_mgr_unmap_aead_request(
                                 hw_iv_size, DMA_BIDIRECTIONAL);
        }
 
-       /*In case a pool was set, a table was 
+       /*In case a pool was set, a table was
          allocated and should be released */
        if (areq_ctx->mlli_params.curr_pool != NULL) {
-               SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n", 
+               SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
                        (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
                        areq_ctx->mlli_params.mlli_virt_addr);
                SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
@@ -786,7 +786,7 @@ void ssi_buffer_mgr_unmap_aead_request(
 
        dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
        if (unlikely(req->src != req->dst)) {
-               SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n", 
+               SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
                        sg_virt(req->dst));
                SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->dst));
                dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
@@ -796,7 +796,7 @@ void ssi_buffer_mgr_unmap_aead_request(
        if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
            likely(req->src == req->dst))
        {
-               uint32_t size_to_skip = req->assoclen;
+               u32 size_to_skip = req->assoclen;
                if (areq_ctx->is_gcm4543) {
                        size_to_skip += crypto_aead_ivsize(tfm);
                }
@@ -814,19 +814,19 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
        struct scatterlist *sgl,
        unsigned int sgl_nents,
        unsigned int authsize,
-       uint32_t last_entry_data_size,
+       u32 last_entry_data_size,
        bool *is_icv_fragmented)
 {
        unsigned int icv_max_size = 0;
        unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
        unsigned int nents;
        unsigned int i;
-       
+
        if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
                *is_icv_fragmented = false;
                return 0;
        }
-       
+
        for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
                if (sgl == NULL) {
                        break;
@@ -883,12 +883,12 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
                SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
                        hw_iv_size, req->iv);
                rc = -ENOMEM;
-               goto chain_iv_exit; 
+               goto chain_iv_exit;
        }
        SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size);
 
        SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
-               hw_iv_size, req->iv, 
+               hw_iv_size, req->iv,
                (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
        if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){  // TODO: what about CTR?? ask Ron
                struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -914,11 +914,11 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        int rc = 0;
-       uint32_t mapped_nents = 0;
+       u32 mapped_nents = 0;
        struct scatterlist *current_sg = req->src;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        unsigned int sg_index = 0;
-       uint32_t size_of_assoc = req->assoclen;
+       u32 size_of_assoc = req->assoclen;
 
        if (areq_ctx->is_gcm4543) {
                size_of_assoc += crypto_aead_ivsize(tfm);
@@ -943,7 +943,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
        //it is assumed that if we reach here , the sgl is already mapped
        sg_index = current_sg->length;
        if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
-               mapped_nents++;        
+               mapped_nents++;
        }
        else{
                while (sg_index <= size_of_assoc) {
@@ -1004,7 +1004,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 
 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
        struct aead_request *req,
-       uint32_t *src_last_bytes, uint32_t *dst_last_bytes)
+       u32 *src_last_bytes, u32 *dst_last_bytes)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
@@ -1042,7 +1042,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
        struct ssi_drvdata *drvdata,
        struct aead_request *req,
        struct buffer_array *sg_data,
-       uint32_t *src_last_bytes, uint32_t *dst_last_bytes,
+       u32 *src_last_bytes, u32 *dst_last_bytes,
        bool is_last_table)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@@ -1075,7 +1075,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
                                /* In ACP platform we already copying ICV
                                   for any INPLACE-DECRYPT operation, hence
                                   we must neglect this code. */
-                               uint32_t size_to_skip = req->assoclen;
+                               u32 size_to_skip = req->assoclen;
                                if (areq_ctx->is_gcm4543) {
                                        size_to_skip += crypto_aead_ivsize(tfm);
                                }
@@ -1095,7 +1095,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
                                &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
                                (*src_last_bytes - authsize);
                        areq_ctx->icv_virt_addr = sg_virt(
-                               &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + 
+                               &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
                                (*src_last_bytes - authsize);
                }
 
@@ -1122,7 +1122,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
                        /* Backup happens only when ICV is fragmented, ICV
                           verification is made by CPU compare in order to simplify
                           MAC verification upon request completion */
-                         uint32_t size_to_skip = req->assoclen;
+                         u32 size_to_skip = req->assoclen;
                          if (areq_ctx->is_gcm4543) {
                                  size_to_skip += crypto_aead_ivsize(tfm);
                          }
@@ -1190,14 +1190,14 @@ static inline int ssi_buffer_mgr_aead_chain_data(
        unsigned int authsize = areq_ctx->req_authsize;
        int src_last_bytes = 0, dst_last_bytes = 0;
        int rc = 0;
-       uint32_t src_mapped_nents = 0, dst_mapped_nents = 0;
-       uint32_t offset = 0;
+       u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+       u32 offset = 0;
        unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       uint32_t sg_index = 0;
+       u32 sg_index = 0;
        bool chained = false;
        bool is_gcm4543 = areq_ctx->is_gcm4543;
-       uint32_t size_to_skip = req->assoclen;
+       u32 size_to_skip = req->assoclen;
        if (is_gcm4543) {
                size_to_skip += crypto_aead_ivsize(tfm);
        }
@@ -1214,8 +1214,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
                size_for_map += crypto_aead_ivsize(tfm);
        }
 
-       size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;  
-       src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);  
+       size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
+       src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
        sg_index = areq_ctx->srcSgl->length;
        //check where the data starts
        while (sg_index <= size_to_skip) {
@@ -1238,7 +1238,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 
        areq_ctx->src.nents = src_mapped_nents;
 
-       areq_ctx->srcOffset = offset;  
+       areq_ctx->srcOffset = offset;
 
        if (req->src != req->dst) {
                size_for_map = req->assoclen +req->cryptlen;
@@ -1253,7 +1253,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
                                                   &dst_mapped_nents);
                if (unlikely(rc != 0)) {
                        rc = -ENOMEM;
-                       goto chain_data_exit; 
+                       goto chain_data_exit;
                }
        }
 
@@ -1302,11 +1302,11 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
                                           struct aead_request *req)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-       uint32_t curr_mlli_size = 0;
-       
+       u32 curr_mlli_size = 0;
+
        if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
                areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
-               curr_mlli_size = areq_ctx->assoc.mlli_nents * 
+               curr_mlli_size = areq_ctx->assoc.mlli_nents *
                                                LLI_ENTRY_BYTE_SIZE;
        }
 
@@ -1318,31 +1318,31 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
                                                                curr_mlli_size;
                        areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
                        if (areq_ctx->is_single_pass == false)
-                               areq_ctx->assoc.mlli_nents += 
+                               areq_ctx->assoc.mlli_nents +=
                                        areq_ctx->src.mlli_nents;
                } else {
-                       if (areq_ctx->gen_ctx.op_type == 
+                       if (areq_ctx->gen_ctx.op_type ==
                                        DRV_CRYPTO_DIRECTION_DECRYPT) {
-                               areq_ctx->src.sram_addr = 
+                               areq_ctx->src.sram_addr =
                                                drvdata->mlli_sram_addr +
                                                                curr_mlli_size;
-                               areq_ctx->dst.sram_addr = 
-                                               areq_ctx->src.sram_addr + 
-                                               areq_ctx->src.mlli_nents * 
+                               areq_ctx->dst.sram_addr =
+                                               areq_ctx->src.sram_addr +
+                                               areq_ctx->src.mlli_nents *
                                                LLI_ENTRY_BYTE_SIZE;
                                if (areq_ctx->is_single_pass == false)
-                                       areq_ctx->assoc.mlli_nents += 
+                                       areq_ctx->assoc.mlli_nents +=
                                                areq_ctx->src.mlli_nents;
                        } else {
-                               areq_ctx->dst.sram_addr = 
+                               areq_ctx->dst.sram_addr =
                                                drvdata->mlli_sram_addr +
                                                                curr_mlli_size;
-                               areq_ctx->src.sram_addr = 
+                               areq_ctx->src.sram_addr =
                                                areq_ctx->dst.sram_addr +
-                                               areq_ctx->dst.mlli_nents * 
+                                               areq_ctx->dst.mlli_nents *
                                                LLI_ENTRY_BYTE_SIZE;
                                if (areq_ctx->is_single_pass == false)
-                                       areq_ctx->assoc.mlli_nents += 
+                                       areq_ctx->assoc.mlli_nents +=
                                                areq_ctx->dst.mlli_nents;
                        }
                }
@@ -1362,9 +1362,9 @@ int ssi_buffer_mgr_map_aead_request(
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        bool is_gcm4543 = areq_ctx->is_gcm4543;
 
-       uint32_t mapped_nents = 0;
-       uint32_t dummy = 0; /*used for the assoc data fragments */
-       uint32_t size_to_map = 0;
+       u32 mapped_nents = 0;
+       u32 dummy = 0; /*used for the assoc data fragments */
+       u32 size_to_map = 0;
 
        mlli_params->curr_pool = NULL;
        sg_data.num_of_buffers = 0;
@@ -1373,7 +1373,7 @@ int ssi_buffer_mgr_map_aead_request(
        if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
            likely(req->src == req->dst))
        {
-               uint32_t size_to_skip = req->assoclen;
+               u32 size_to_skip = req->assoclen;
                if (is_gcm4543) {
                        size_to_skip += crypto_aead_ivsize(tfm);
                }
@@ -1387,8 +1387,8 @@ int ssi_buffer_mgr_map_aead_request(
 #endif
 
        /* cacluate the size for cipher remove ICV in decrypt*/
-       areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == 
-                                DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
+       areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+                                DRV_CRYPTO_DIRECTION_ENCRYPT) ?
                                req->cryptlen :
                                (req->cryptlen - authsize);
 
@@ -1489,15 +1489,15 @@ int ssi_buffer_mgr_map_aead_request(
                                            LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
        if (unlikely(rc != 0)) {
                rc = -ENOMEM;
-               goto aead_map_failure; 
+               goto aead_map_failure;
        }
 
        if (likely(areq_ctx->is_single_pass == true)) {
                /*
-               * Create MLLI table for: 
+               * Create MLLI table for:
                *   (1) Assoc. data
                *   (2) Src/Dst SGLs
-               *   Note: IV is contg. buffer (not an SGL) 
+               *   Note: IV is contg. buffer (not an SGL)
                */
                rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
                if (unlikely(rc != 0))
@@ -1511,19 +1511,19 @@ int ssi_buffer_mgr_map_aead_request(
        } else { /* DOUBLE-PASS flow */
                /*
                * Prepare MLLI table(s) in this order:
-               *  
+               *
                * If ENCRYPT/DECRYPT (inplace):
                *   (1) MLLI table for assoc
                *   (2) IV entry (chained right after end of assoc)
                *   (3) MLLI for src/dst (inplace operation)
-               *  
-               * If ENCRYPT (non-inplace) 
+               *
+               * If ENCRYPT (non-inplace)
                *   (1) MLLI table for assoc
                *   (2) IV entry (chained right after end of assoc)
                *   (3) MLLI for dst
                *   (4) MLLI for src
-               *  
-               * If DECRYPT (non-inplace) 
+               *
+               * If DECRYPT (non-inplace)
                *   (1) MLLI table for assoc
                *   (2) IV entry (chained right after end of assoc)
                *   (3) MLLI for src
@@ -1568,15 +1568,15 @@ int ssi_buffer_mgr_map_hash_request_final(
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
        struct device *dev = &drvdata->plat_dev->dev;
-       uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+       u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
                        areq_ctx->buff0;
-       uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+       u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
                        &areq_ctx->buff0_cnt;
-       struct mlli_params *mlli_params = &areq_ctx->mlli_params;       
+       struct mlli_params *mlli_params = &areq_ctx->mlli_params;
        struct buffer_array sg_data;
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-       uint32_t dummy = 0;
-       uint32_t mapped_nents = 0;
+       u32 dummy = 0;
+       u32 mapped_nents = 0;
 
        SSI_LOG_DEBUG(" final params : curr_buff=%pK "
                     "curr_buff_cnt=0x%X nbytes = 0x%X "
@@ -1593,7 +1593,7 @@ int ssi_buffer_mgr_map_hash_request_final(
                /* nothing to do */
                return 0;
        }
-       
+
        /*TODO: copy data in case that buffer is enough for operation */
        /* map the previous buffer */
        if (*curr_buff_cnt != 0 ) {
@@ -1612,7 +1612,7 @@ int ssi_buffer_mgr_map_hash_request_final(
                                          &dummy, &mapped_nents))){
                        goto unmap_curr_buff;
                }
-               if ( src && (mapped_nents == 1) 
+               if ( src && (mapped_nents == 1)
                     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
                        memcpy(areq_ctx->buff_sg,src,
                               sizeof(struct scatterlist));
@@ -1660,23 +1660,23 @@ int ssi_buffer_mgr_map_hash_request_update(
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
        struct device *dev = &drvdata->plat_dev->dev;
-       uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+       u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
                        areq_ctx->buff0;
-       uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+       u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
                        &areq_ctx->buff0_cnt;
-       uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
+       u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
                        areq_ctx->buff1;
-       uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
+       u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
                        &areq_ctx->buff1_cnt;
-       struct mlli_params *mlli_params = &areq_ctx->mlli_params;       
+       struct mlli_params *mlli_params = &areq_ctx->mlli_params;
        unsigned int update_data_len;
-       uint32_t total_in_len = nbytes + *curr_buff_cnt;
+       u32 total_in_len = nbytes + *curr_buff_cnt;
        struct buffer_array sg_data;
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
        unsigned int swap_index = 0;
-       uint32_t dummy = 0;
-       uint32_t mapped_nents = 0;
-               
+       u32 dummy = 0;
+       u32 mapped_nents = 0;
+
        SSI_LOG_DEBUG(" update params : curr_buff=%pK "
                     "curr_buff_cnt=0x%X nbytes=0x%X "
                     "src=%pK curr_index=%u \n",
@@ -1694,12 +1694,12 @@ int ssi_buffer_mgr_map_hash_request_update(
                             "*curr_buff_cnt=0x%X copy_to=%pK\n",
                        curr_buff, *curr_buff_cnt,
                        &curr_buff[*curr_buff_cnt]);
-               areq_ctx->in_nents = 
+               areq_ctx->in_nents =
                        ssi_buffer_mgr_get_sgl_nents(src,
                                                    nbytes,
                                                    &dummy, NULL);
                sg_copy_to_buffer(src, areq_ctx->in_nents,
-                                 &curr_buff[*curr_buff_cnt], nbytes); 
+                                 &curr_buff[*curr_buff_cnt], nbytes);
                *curr_buff_cnt += nbytes;
                return 1;
        }
@@ -1734,7 +1734,7 @@ int ssi_buffer_mgr_map_hash_request_update(
                /* change the buffer index for next operation */
                swap_index = 1;
        }
-       
+
        if ( update_data_len > *curr_buff_cnt ) {
                if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
                                          (update_data_len -*curr_buff_cnt),
@@ -1744,7 +1744,7 @@ int ssi_buffer_mgr_map_hash_request_update(
                                          &dummy, &mapped_nents))){
                        goto unmap_curr_buff;
                }
-               if ( (mapped_nents == 1) 
+               if ( (mapped_nents == 1)
                     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
                        /* only one entry in the SG and no previous data */
                        memcpy(areq_ctx->buff_sg,src,
@@ -1789,13 +1789,13 @@ void ssi_buffer_mgr_unmap_hash_request(
        struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-       uint32_t *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
+       u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
                                                &areq_ctx->buff1_cnt;
 
-       /*In case a pool was set, a table was 
+       /*In case a pool was set, a table was
          allocated and should be released */
        if (areq_ctx->mlli_params.curr_pool != NULL) {
-               SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n", 
+               SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
                             (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
                             areq_ctx->mlli_params.mlli_virt_addr);
                SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
@@ -1803,22 +1803,22 @@ void ssi_buffer_mgr_unmap_hash_request(
                              areq_ctx->mlli_params.mlli_virt_addr,
                              areq_ctx->mlli_params.mlli_dma_addr);
        }
-       
+
        if ((src) && likely(areq_ctx->in_nents != 0)) {
                SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
                             sg_virt(src),
-                            (unsigned long long)sg_dma_address(src), 
+                            (unsigned long long)sg_dma_address(src),
                             sg_dma_len(src));
                SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
-               dma_unmap_sg(dev, src, 
+               dma_unmap_sg(dev, src,
                             areq_ctx->in_nents, DMA_TO_DEVICE);
        }
 
        if (*prev_len != 0) {
                SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
-                            "dma=0x%llX len 0x%X\n", 
+                            "dma=0x%llX len 0x%X\n",
                                sg_virt(areq_ctx->buff_sg),
-                               (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
+                               (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
                                sg_dma_len(areq_ctx->buff_sg));
                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
                if (!do_revert) {
@@ -1844,7 +1844,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
 
        buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
                                "dx_single_mlli_tables", dev,
-                               MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 
+                               MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
                                LLI_ENTRY_BYTE_SIZE,
                                MLLI_TABLE_MIN_ALIGNMENT, 0);
 
@@ -1870,4 +1870,3 @@ int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
        }
        return 0;
 }
-