]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
staging: ccree: stdint to kernel types conversion
authorGilad Ben-Yossef <gilad@benyossef.com>
Sun, 7 May 2017 13:35:55 +0000 (16:35 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 May 2017 05:41:59 +0000 (07:41 +0200)
Move from stdint style int_t/uint_t to kernel style u/s types.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24 files changed:
drivers/staging/ccree/cc_crypto_ctx.h
drivers/staging/ccree/cc_hw_queue_defs.h
drivers/staging/ccree/cc_lli_defs.h
drivers/staging/ccree/cc_regs.h
drivers/staging/ccree/hash_defs.h
drivers/staging/ccree/ssi_aead.c
drivers/staging/ccree/ssi_aead.h
drivers/staging/ccree/ssi_buffer_mgr.c
drivers/staging/ccree/ssi_buffer_mgr.h
drivers/staging/ccree/ssi_cipher.c
drivers/staging/ccree/ssi_cipher.h
drivers/staging/ccree/ssi_driver.c
drivers/staging/ccree/ssi_driver.h
drivers/staging/ccree/ssi_fips.h
drivers/staging/ccree/ssi_fips_ll.c
drivers/staging/ccree/ssi_fips_local.c
drivers/staging/ccree/ssi_fips_local.h
drivers/staging/ccree/ssi_hash.c
drivers/staging/ccree/ssi_hash.h
drivers/staging/ccree/ssi_ivgen.c
drivers/staging/ccree/ssi_request_mgr.c
drivers/staging/ccree/ssi_sram_mgr.c
drivers/staging/ccree/ssi_sram_mgr.h
drivers/staging/ccree/ssi_sysfs.c

index f8ebd76051794eca7f4389de86f01dfe494b81b1..31ccf518fda44d37e225030330943a6aa6e91a71 100644 (file)
 #ifndef _CC_CRYPTO_CTX_H_
 #define _CC_CRYPTO_CTX_H_
 
-#ifdef __KERNEL__
 #include <linux/types.h>
-#define INT32_MAX 0x7FFFFFFFL
-#else
-#include <stdint.h>
-#endif
 
 
 #ifndef max
@@ -113,7 +108,7 @@ enum drv_engine_type {
        DRV_ENGINE_HASH = 3,
        DRV_ENGINE_RC4 = 4,
        DRV_ENGINE_DOUT = 5,
-       DRV_ENGINE_RESERVE32B = INT32_MAX,
+       DRV_ENGINE_RESERVE32B = S32_MAX,
 };
 
 enum drv_crypto_alg {
@@ -126,7 +121,7 @@ enum drv_crypto_alg {
        DRV_CRYPTO_ALG_AEAD = 5,
        DRV_CRYPTO_ALG_BYPASS = 6,
        DRV_CRYPTO_ALG_NUM = 7,
-       DRV_CRYPTO_ALG_RESERVE32B = INT32_MAX
+       DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
 };
 
 enum drv_crypto_direction {
@@ -134,7 +129,7 @@ enum drv_crypto_direction {
        DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
        DRV_CRYPTO_DIRECTION_DECRYPT = 1,
        DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
-       DRV_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+       DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
 };
 
 enum drv_cipher_mode {
@@ -152,7 +147,7 @@ enum drv_cipher_mode {
        DRV_CIPHER_GCTR = 12,
        DRV_CIPHER_ESSIV = 13,
        DRV_CIPHER_BITLOCKER = 14,
-       DRV_CIPHER_RESERVE32B = INT32_MAX
+       DRV_CIPHER_RESERVE32B = S32_MAX
 };
 
 enum drv_hash_mode {
@@ -167,7 +162,7 @@ enum drv_hash_mode {
        DRV_HASH_XCBC_MAC = 7,
        DRV_HASH_CMAC = 8,
        DRV_HASH_MODE_NUM = 9,
-       DRV_HASH_RESERVE32B = INT32_MAX
+       DRV_HASH_RESERVE32B = S32_MAX
 };
 
 enum drv_hash_hw_mode {
@@ -178,7 +173,7 @@ enum drv_hash_hw_mode {
        DRV_HASH_HW_SHA512 = 4,
        DRV_HASH_HW_SHA384 = 12,
        DRV_HASH_HW_GHASH = 6,
-       DRV_HASH_HW_RESERVE32B = INT32_MAX
+       DRV_HASH_HW_RESERVE32B = S32_MAX
 };
 
 enum drv_multi2_mode {
@@ -186,7 +181,7 @@ enum drv_multi2_mode {
        DRV_MULTI2_ECB = 0,
        DRV_MULTI2_CBC = 1,
        DRV_MULTI2_OFB = 2,
-       DRV_MULTI2_RESERVE32B = INT32_MAX
+       DRV_MULTI2_RESERVE32B = S32_MAX
 };
 
 
@@ -201,13 +196,13 @@ enum drv_crypto_key_type {
        DRV_APPLET_KEY = 4,             /* NA */
        DRV_PLATFORM_KEY = 5,           /* 0x101 */
        DRV_CUSTOMER_KEY = 6,           /* 0x110 */
-       DRV_END_OF_KEYS = INT32_MAX,
+       DRV_END_OF_KEYS = S32_MAX,
 };
 
 enum drv_crypto_padding_type {
        DRV_PADDING_NONE = 0,
        DRV_PADDING_PKCS7 = 1,
-       DRV_PADDING_RESERVE32B = INT32_MAX
+       DRV_PADDING_RESERVE32B = S32_MAX
 };
 
 /*******************************************************************/
@@ -223,9 +218,9 @@ struct drv_ctx_generic {
 struct drv_ctx_hash {
        enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HASH */
        enum drv_hash_mode mode;
-       uint8_t digest[CC_DIGEST_SIZE_MAX];
+       u8 digest[CC_DIGEST_SIZE_MAX];
        /* reserve to end of allocated context size */
-       uint8_t reserved[CC_CTX_SIZE - 2 * sizeof(uint32_t) -
+       u8 reserved[CC_CTX_SIZE - 2 * sizeof(u32) -
                        CC_DIGEST_SIZE_MAX];
 };
 
@@ -234,11 +229,11 @@ struct drv_ctx_hash {
 struct drv_ctx_hmac {
        enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HMAC */
        enum drv_hash_mode mode;
-       uint8_t digest[CC_DIGEST_SIZE_MAX];
-       uint32_t k0[CC_HMAC_BLOCK_SIZE_MAX/sizeof(uint32_t)];
-       uint32_t k0_size;
+       u8 digest[CC_DIGEST_SIZE_MAX];
+       u32 k0[CC_HMAC_BLOCK_SIZE_MAX/sizeof(u32)];
+       u32 k0_size;
        /* reserve to end of allocated context size */
-       uint8_t reserved[CC_CTX_SIZE - 3 * sizeof(uint32_t) -
+       u8 reserved[CC_CTX_SIZE - 3 * sizeof(u32) -
                        CC_DIGEST_SIZE_MAX - CC_HMAC_BLOCK_SIZE_MAX];
 };
 
@@ -248,19 +243,19 @@ struct drv_ctx_cipher {
        enum drv_crypto_direction direction;
        enum drv_crypto_key_type crypto_key_type;
        enum drv_crypto_padding_type padding_type;
-       uint32_t key_size; /* numeric value in bytes   */
-       uint32_t data_unit_size; /* required for XTS */
+       u32 key_size; /* numeric value in bytes   */
+       u32 data_unit_size; /* required for XTS */
        /* block_state is the AES engine block state.
        *  It is used by the host to pass IV or counter at initialization.
        *  It is used by SeP for intermediate block chaining state and for
        *  returning MAC algorithms results.           */
-       uint8_t block_state[CC_AES_BLOCK_SIZE];
-       uint8_t key[CC_AES_KEY_SIZE_MAX];
-       uint8_t xex_key[CC_AES_KEY_SIZE_MAX];
+       u8 block_state[CC_AES_BLOCK_SIZE];
+       u8 key[CC_AES_KEY_SIZE_MAX];
+       u8 xex_key[CC_AES_KEY_SIZE_MAX];
        /* reserve to end of allocated context size */
-       uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 7 -
-               CC_AES_BLOCK_SIZE/sizeof(uint32_t) - 2 *
-               (CC_AES_KEY_SIZE_MAX/sizeof(uint32_t))];
+       u32 reserved[CC_DRV_CTX_SIZE_WORDS - 7 -
+               CC_AES_BLOCK_SIZE/sizeof(u32) - 2 *
+               (CC_AES_KEY_SIZE_MAX/sizeof(u32))];
 };
 
 /* authentication and encryption with associated data class */
@@ -268,20 +263,20 @@ struct drv_ctx_aead {
        enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_AES */
        enum drv_cipher_mode mode;
        enum drv_crypto_direction direction;
-       uint32_t key_size; /* numeric value in bytes   */
-       uint32_t nonce_size; /* nonce size (octets) */
-       uint32_t header_size; /* finit additional data size (octets) */
-       uint32_t text_size; /* finit text data size (octets) */
-       uint32_t tag_size; /* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
+       u32 key_size; /* numeric value in bytes   */
+       u32 nonce_size; /* nonce size (octets) */
+       u32 header_size; /* finit additional data size (octets) */
+       u32 text_size; /* finit text data size (octets) */
+       u32 tag_size; /* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
        /* block_state1/2 is the AES engine block state */
-       uint8_t block_state[CC_AES_BLOCK_SIZE];
-       uint8_t mac_state[CC_AES_BLOCK_SIZE]; /* MAC result */
-       uint8_t nonce[CC_AES_BLOCK_SIZE]; /* nonce buffer */
-       uint8_t key[CC_AES_KEY_SIZE_MAX];
+       u8 block_state[CC_AES_BLOCK_SIZE];
+       u8 mac_state[CC_AES_BLOCK_SIZE]; /* MAC result */
+       u8 nonce[CC_AES_BLOCK_SIZE]; /* nonce buffer */
+       u8 key[CC_AES_KEY_SIZE_MAX];
        /* reserve to end of allocated context size */
-       uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 8 -
-               3 * (CC_AES_BLOCK_SIZE/sizeof(uint32_t)) -
-               CC_AES_KEY_SIZE_MAX/sizeof(uint32_t)];
+       u32 reserved[CC_DRV_CTX_SIZE_WORDS - 8 -
+               3 * (CC_AES_BLOCK_SIZE/sizeof(u32)) -
+               CC_AES_KEY_SIZE_MAX/sizeof(u32)];
 };
 
 /*******************************************************************/
index 537ea0dcab7c4936d944f1a2100de2ce2b59c48b..8c2b7f48937345f2e193b77041aefc9b73f51590 100644 (file)
 #ifndef __CC_HW_QUEUE_DEFS_H__
 #define __CC_HW_QUEUE_DEFS_H__
 
+#include <linux/types.h>
+
 #include "cc_regs.h"
 #include "dx_crys_kernel.h"
 
-#ifdef __KERNEL__
-#include <linux/types.h>
-#define UINT32_MAX 0xFFFFFFFFL
-#define INT32_MAX  0x7FFFFFFFL
-#define UINT16_MAX 0xFFFFL
-#else
-#include <stdint.h>
-#endif
-
 /******************************************************************************
 *                              DEFINITIONS
 ******************************************************************************/
@@ -48,7 +41,7 @@
 ******************************************************************************/
 
 typedef struct HwDesc {
-       uint32_t word[HW_DESC_SIZE_WORDS];
+       u32 word[HW_DESC_SIZE_WORDS];
 } HwDesc_s;
 
 typedef enum DescDirection {
@@ -56,7 +49,7 @@ typedef enum DescDirection {
        DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
        DESC_DIRECTION_DECRYPT_DECRYPT = 1,
        DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
-       DESC_DIRECTION_END = INT32_MAX,
+       DESC_DIRECTION_END = S32_MAX,
 }DescDirection_t;
 
 typedef enum DmaMode {
@@ -66,7 +59,7 @@ typedef enum DmaMode {
        DMA_DLLI                = 2,
        DMA_MLLI                = 3,
        DmaMode_OPTIONTS,
-       DmaMode_END             = INT32_MAX,
+       DmaMode_END             = S32_MAX,
 }DmaMode_t;
 
 typedef enum FlowMode {
@@ -105,7 +98,7 @@ typedef enum FlowMode {
        S_HASH_to_DOUT          = 43,
        SET_FLOW_ID             = 44,
        FlowMode_OPTIONTS,
-       FlowMode_END = INT32_MAX,
+       FlowMode_END = S32_MAX,
 }FlowMode_t;
 
 typedef enum TunnelOp {
@@ -113,7 +106,7 @@ typedef enum TunnelOp {
        TUNNEL_OFF = 0,
        TUNNEL_ON = 1,
        TunnelOp_OPTIONS,
-       TunnelOp_END = INT32_MAX,
+       TunnelOp_END = S32_MAX,
 } TunnelOp_t;
 
 typedef enum SetupOp {
@@ -128,14 +121,14 @@ typedef enum SetupOp {
        SETUP_WRITE_STATE2      = 10,
        SETUP_WRITE_STATE3      = 11,
        setupOp_OPTIONTS,
-       setupOp_END = INT32_MAX,
+       setupOp_END = S32_MAX,
 }SetupOp_t;
 
 enum AesMacSelector {
        AES_SK = 1,
        AES_CMAC_INIT = 2,
        AES_CMAC_SIZE0 = 3,
-       AesMacEnd = INT32_MAX,
+       AesMacEnd = S32_MAX,
 };
 
 #define HW_KEY_MASK_CIPHER_DO    0x3
@@ -156,21 +149,21 @@ typedef enum HwCryptoKey {
        KFDE1_KEY = 9,                  /* 0x1001 */
        KFDE2_KEY = 10,                 /* 0x1010 */
        KFDE3_KEY = 11,                 /* 0x1011 */
-       END_OF_KEYS = INT32_MAX,
+       END_OF_KEYS = S32_MAX,
 }HwCryptoKey_t;
 
 typedef enum HwAesKeySize {
        AES_128_KEY = 0,
        AES_192_KEY = 1,
        AES_256_KEY = 2,
-       END_OF_AES_KEYS = INT32_MAX,
+       END_OF_AES_KEYS = S32_MAX,
 }HwAesKeySize_t;
 
 typedef enum HwDesKeySize {
        DES_ONE_KEY = 0,
        DES_TWO_KEYS = 1,
        DES_THREE_KEYS = 2,
-       END_OF_DES_KEYS = INT32_MAX,
+       END_OF_DES_KEYS = S32_MAX,
 }HwDesKeySize_t;
 
 /*****************************/
@@ -210,7 +203,7 @@ typedef enum HwDesKeySize {
        } while (0)
 
 
-#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
+#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&U16_MAX)
 
 /*!
  * This macro sets the DIN field of a HW descriptors
@@ -223,7 +216,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DIN_TYPE(pDesc, dmaMode, dinAdr, dinSize, axiNs)                                                           \
        do {                                                                                                                    \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (dinAdr)&UINT32_MAX );                 \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (dinAdr)&U32_MAX );                    \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DIN_ADDR_HIGH, (pDesc)->word[5], MSB64(dinAdr) );               \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], (dmaMode));                     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));                         \
@@ -241,7 +234,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DIN_NO_DMA(pDesc, dinAdr, dinSize)                                                                 \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr));           \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (u32)(dinAdr));                \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));                 \
        } while (0)
 
@@ -256,7 +249,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DIN_SRAM(pDesc, dinAdr, dinSize)                                                                   \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr));           \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (u32)(dinAdr));                \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM);              \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));                 \
        } while (0)
@@ -269,7 +262,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DIN_CONST(pDesc, val, dinSize)                                                                     \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(val));              \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (u32)(val));           \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_CONST_VALUE, (pDesc)->word[1], 1);                  \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM);              \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));                 \
@@ -296,7 +289,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DOUT_TYPE(pDesc, dmaMode, doutAdr, doutSize, axiNs)                                                        \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );                \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&U32_MAX );           \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], (dmaMode));            \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));               \
@@ -315,7 +308,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DOUT_DLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd)                                                                \
        do {                                                                                                                    \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );                \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&U32_MAX );           \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_DLLI);                     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));                       \
@@ -335,7 +328,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DOUT_MLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd)                                                                \
        do {                                                                                                                    \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );                \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&U32_MAX );           \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_MLLI);                     \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));                       \
@@ -354,7 +347,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DOUT_NO_DMA(pDesc, doutAdr, doutSize, registerWriteEnable)                                                 \
        do {                                                                                                                    \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr));                  \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (u32)(doutAdr));                       \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));                       \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], (registerWriteEnable));        \
        } while (0)
@@ -367,7 +360,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_XOR_VAL(pDesc, xorVal)                                                                             \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(xorVal));           \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (u32)(xorVal));                \
        } while (0)
 
 /*!
@@ -401,7 +394,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_DOUT_SRAM(pDesc, doutAdr, doutSize)                                                                        \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr));          \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (u32)(doutAdr));               \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_SRAM);             \
                CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));               \
        } while (0)
@@ -415,7 +408,7 @@ typedef enum HwDesKeySize {
  */
 #define HW_DESC_SET_XEX_DATA_UNIT_SIZE(pDesc, dataUnitSize)                                                            \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(dataUnitSize));     \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (u32)(dataUnitSize));  \
        } while (0)
 
 /*!
@@ -426,7 +419,7 @@ typedef enum HwDesKeySize {
 */
 #define HW_DESC_SET_MULTI2_NUM_ROUNDS(pDesc, numRounds)                                                                        \
        do {                                                                                                            \
-               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(numRounds));        \
+               CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (u32)(numRounds));     \
        } while (0)
 
 /*!
index c4cdd83af58e23aee0d91ab295a9619efd4dd837..75d5e2762e8fc0be5624daa817ddf69b91fd539a 100644 (file)
 
 #define CC_MAX_MLLI_ENTRY_SIZE 0x10000
 
-#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
+#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&U16_MAX)
 
 #define LLI_SET_ADDR(lli_p, addr) \
-               BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD0_OFFSET], LLI_LADDR_BIT_OFFSET, LLI_LADDR_BIT_SIZE, (addr & UINT32_MAX)); \
-               BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_HADDR_BIT_OFFSET, LLI_HADDR_BIT_SIZE, MSB64(addr));
+               BITFIELD_SET(((u32 *)(lli_p))[LLI_WORD0_OFFSET], LLI_LADDR_BIT_OFFSET, LLI_LADDR_BIT_SIZE, (addr & U32_MAX)); \
+               BITFIELD_SET(((u32 *)(lli_p))[LLI_WORD1_OFFSET], LLI_HADDR_BIT_OFFSET, LLI_HADDR_BIT_SIZE, MSB64(addr));
 
 #define LLI_SET_SIZE(lli_p, size) \
-               BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_SIZE_BIT_OFFSET, LLI_SIZE_BIT_SIZE, size)
+               BITFIELD_SET(((u32 *)(lli_p))[LLI_WORD1_OFFSET], LLI_SIZE_BIT_OFFSET, LLI_SIZE_BIT_SIZE, size)
 
 /* Size of entry */
 #define LLI_ENTRY_WORD_SIZE 2
-#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(uint32_t))
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
 
 /* Word0[31:0] = ADDR[31:0] */
 #define LLI_WORD0_OFFSET 0
index e81b0dc8ce5e4be9b255c88ea73f6bed795f12c8..8b89f0603f163449e9233281a8eecb3acd517e74 100644 (file)
@@ -35,7 +35,7 @@
 /* Read-Modify-Write a field of a register */
 #define MODIFY_REGISTER_FLD(unitName, regName, fldName, fldVal)         \
 do {                                                                       \
-       uint32_t regVal;                                                    \
+       u32 regVal;                                                 \
        regVal = READ_REGISTER(CC_REG_ADDR(unitName, regName));       \
        CC_REG_FLD_SET(unitName, regName, fldName, regVal, fldVal); \
        WRITE_REGISTER(CC_REG_ADDR(unitName, regName), regVal);       \
@@ -86,7 +86,7 @@ do {                                                                     \
 } while (0)
 
 /* Usage example:
-   uint32_t reg_shadow = READ_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
+   u32 reg_shadow = READ_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
    CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
    CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
    WRITE_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
index a441c81b33b1754cf5cb579f5efcb9bcc59f9697..613897038f6d8c20765f58b42940054cd7809f85 100644 (file)
@@ -48,13 +48,13 @@ enum HashConfig1Padding {
        HASH_PADDING_DISABLED = 0,
        HASH_PADDING_ENABLED = 1,
        HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
-       HASH_CONFIG1_PADDING_RESERVE32 = INT32_MAX,
+       HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
 };
 
 enum HashCipherDoPadding {
        DO_NOT_PAD = 0,
        DO_PAD = 1,
-       HASH_CIPHER_DO_PADDING_RESERVE32 = INT32_MAX,
+       HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
 };
 
 typedef struct SepHashPrivateContext {
@@ -66,11 +66,11 @@ typedef struct SepHashPrivateContext {
           This means that this structure size (without the reserved field can be up to 20 bytes ,
           in case sha512 is not suppported it is 20 bytes (SEP_HASH_LENGTH_WORDS define to 2 ) and in the other
           case it is 28 (SEP_HASH_LENGTH_WORDS define to 4) */
-       uint32_t reserved[(sizeof(struct drv_ctx_hash)/sizeof(uint32_t)) - SEP_HASH_LENGTH_WORDS - 3];
-       uint32_t CurrentDigestedLength[SEP_HASH_LENGTH_WORDS];
-       uint32_t KeyType;
-       uint32_t dataCompleted;
-       uint32_t hmacFinalization;
+       u32 reserved[(sizeof(struct drv_ctx_hash)/sizeof(u32)) - SEP_HASH_LENGTH_WORDS - 3];
+       u32 CurrentDigestedLength[SEP_HASH_LENGTH_WORDS];
+       u32 KeyType;
+       u32 dataCompleted;
+       u32 hmacFinalization;
        /* no space left */
 } SepHashPrivateContext_s;
 
index 240a3c481db8fd1088045b7d8e122232e55f3b73..7f9b5cc777e9042b877bd3a097d45c7cf9c1db85 100644 (file)
@@ -60,18 +60,18 @@ struct ssi_aead_handle {
 
 struct ssi_aead_ctx {
        struct ssi_drvdata *drvdata;
-       uint8_t ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
-       uint8_t *enckey;
+       u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+       u8 *enckey;
        dma_addr_t enckey_dma_addr;
        union {
                struct {
-                       uint8_t *padded_authkey;
-                       uint8_t *ipad_opad; /* IPAD, OPAD*/
+                       u8 *padded_authkey;
+                       u8 *ipad_opad; /* IPAD, OPAD*/
                        dma_addr_t padded_authkey_dma_addr;
                        dma_addr_t ipad_opad_dma_addr;
                } hmac;
                struct {
-                       uint8_t *xcbc_keys; /* K1,K2,K3 */
+                       u8 *xcbc_keys; /* K1,K2,K3 */
                        dma_addr_t xcbc_keys_dma_addr;
                } xcbc;
        } auth_state;
@@ -428,7 +428,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
        dma_addr_t key_dma_addr = 0;
        struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct device *dev = &ctx->drvdata->plat_dev->dev;
-       uint32_t larval_addr = ssi_ahash_get_larval_digest_sram_addr(
+       u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
                                        ctx->drvdata, ctx->auth_mode);
        struct ssi_crypto_req ssi_req = {};
        unsigned int blocksize;
@@ -831,7 +831,7 @@ ssi_aead_process_authenc_data_desc(
                 * assoc. + iv + data -compact in one table
                 * if assoclen is ZERO only IV perform */
                ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
-               uint32_t mlli_nents = areq_ctx->assoc.mlli_nents;
+               u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 
                if (likely(areq_ctx->is_single_pass == true)) {
                        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT){
@@ -1386,11 +1386,11 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
                        break;
                }
 
-               if (!IS_ALIGNED(assoclen, sizeof(uint32_t)))
+               if (!IS_ALIGNED(assoclen, sizeof(u32)))
                        areq_ctx->is_single_pass = false;
 
                if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
-                   !IS_ALIGNED(cipherlen, sizeof(uint32_t)))
+                   !IS_ALIGNED(cipherlen, sizeof(u32)))
                        areq_ctx->is_single_pass = false;
 
                break;
@@ -1412,7 +1412,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 }
 
 #if SSI_CC_HAS_AES_CCM
-static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
+static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
 {
        unsigned int len = 0;
        if ( headerSize == 0 ) {
@@ -1597,9 +1597,9 @@ static int config_ccm_adata(struct aead_request *req) {
        /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
        unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
        unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
-       uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
-       uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
-       uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+       u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+       u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+       u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
        unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
                                req->cryptlen :
index 6135ff28438e7f944c4f4754ffc1b8ee1d8f0094..654a181729d7d052724150a266632461e2a84e2c 100644 (file)
@@ -57,30 +57,30 @@ enum aead_ccm_header_size {
        ccm_header_size_zero = 0,
        ccm_header_size_2 = 2,
        ccm_header_size_6 = 6,
-       ccm_header_size_max = INT32_MAX
+       ccm_header_size_max = S32_MAX
 };
 
 struct aead_req_ctx {
        /* Allocate cache line although only 4 bytes are needed to
        *  assure next field falls @ cache line
        *  Used for both: digest HW compare and CCM/GCM MAC value */
-       uint8_t mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
-       uint8_t ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+       u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+       u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
 
        //used in gcm
-       uint8_t gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
-       uint8_t gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
-       uint8_t hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+       u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+       u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+       u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
        struct {
-               uint8_t lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
-               uint8_t lenC[GCM_BLOCK_LEN_SIZE] ;
+               u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+               u8 lenC[GCM_BLOCK_LEN_SIZE] ;
        } gcm_len_block;
 
-       uint8_t ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+       u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
        unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
-       uint8_t backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
-       uint8_t *backup_iv; /*store iv for generated IV flow*/
-       uint8_t *backup_giv; /*store iv for rfc3686(ctr) flow*/
+       u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+       u8 *backup_iv; /*store iv for generated IV flow*/
+       u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
        dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
        dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
        dma_addr_t icv_dma_addr; /* Phys. address of ICV */
@@ -92,7 +92,7 @@ struct aead_req_ctx {
        dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
        bool is_gcm4543;
 
-       uint8_t *icv_virt_addr; /* Virt. address of ICV */
+       u8 *icv_virt_addr; /* Virt. address of ICV */
        struct async_gen_req_ctx gen_ctx;
        struct ssi_mlli assoc;
        struct ssi_mlli src;
index 39065e8b75b11520dc2e0ef8bad9158a77e202d2..77e490968db9c015aa2c3e2e37635e3e92f0980f 100644 (file)
@@ -92,11 +92,11 @@ struct buffer_array {
        int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
        enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
        bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
-       uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+       u32 * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
 };
 
 #ifdef CC_DMA_48BIT_SIM
-dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
+dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, u32 data_len)
 {
        dma_addr_t tmp_dma_addr;
 #ifdef CC_DMA_48BIT_SIM_FULL
@@ -109,7 +109,7 @@ dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
                (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
 #endif
                tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
-                               (orig_addr & UINT16_MAX));
+                               (orig_addr & U16_MAX));
                        SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
                                    "dma_address=0x%llX\n",
                                     orig_addr, tmp_dma_addr);
@@ -134,7 +134,7 @@ dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
                /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
                tmp_dma_addr &= 0xFFFF0000;
                /* Set the original 16 bits */
-               tmp_dma_addr |= (orig_addr & UINT16_MAX);
+               tmp_dma_addr |= (orig_addr & U16_MAX);
                SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
                             "dma_address=0x%llX\n",
                             orig_addr, tmp_dma_addr);
@@ -151,7 +151,7 @@ dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
  * @lbytes: [OUT] Returns the amount of bytes at the last entry
  */
 static unsigned int ssi_buffer_mgr_get_sgl_nents(
-       struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
+       struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
 {
        unsigned int nents = 0;
        while (nbytes != 0) {
@@ -182,7 +182,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
  *
  * @sgl:
  */
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
 {
        struct scatterlist *current_sg = sgl;
        int sg_index = 0;
@@ -210,21 +210,21 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
  */
 void ssi_buffer_mgr_copy_scatterlist_portion(
        u8 *dest, struct scatterlist *sg,
-       uint32_t to_skip,  uint32_t end,
+       u32 to_skip,  u32 end,
        enum ssi_sg_cpy_direct direct)
 {
-       uint32_t nents, lbytes;
+       u32 nents, lbytes;
 
        nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
        sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
 }
 
 static inline int ssi_buffer_mgr_render_buff_to_mlli(
-       dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
-       uint32_t **mlli_entry_pp)
+       dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
+       u32 **mlli_entry_pp)
 {
-       uint32_t *mlli_entry_p = *mlli_entry_pp;
-       uint32_t new_nents;;
+       u32 *mlli_entry_p = *mlli_entry_pp;
+       u32 new_nents;;
 
        /* Verify there is no memory overflow*/
        new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
@@ -261,16 +261,16 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
 
 
 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
-       struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
-       uint32_t **mlli_entry_pp)
+       struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
+       u32 **mlli_entry_pp)
 {
        struct scatterlist *curr_sgl = sgl;
-       uint32_t *mlli_entry_p = *mlli_entry_pp;
-       int32_t rc = 0;
+       u32 *mlli_entry_p = *mlli_entry_pp;
+       s32 rc = 0;
 
        for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
              curr_sgl = sg_next(curr_sgl)) {
-               uint32_t entry_data_len =
+               u32 entry_data_len =
                        (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
                                sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
                sgl_data_len -= entry_data_len;
@@ -291,8 +291,8 @@ static int ssi_buffer_mgr_generate_mlli(
        struct buffer_array *sg_data,
        struct mlli_params *mlli_params)
 {
-       uint32_t *mlli_p;
-       uint32_t total_nents = 0,prev_total_nents = 0;
+       u32 *mlli_p;
+       u32 total_nents = 0,prev_total_nents = 0;
        int rc = 0, i;
 
        SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
@@ -310,7 +310,7 @@ static int ssi_buffer_mgr_generate_mlli(
                                                (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
                                                LLI_ENTRY_BYTE_SIZE));
        /* Point to start of MLLI */
-       mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
+       mlli_p = (u32 *)mlli_params->mlli_virt_addr;
        /* go over all SG's and link it to one MLLI table */
        for (i = 0; i < sg_data->num_of_buffers; i++) {
                if (sg_data->type[i] == DMA_SGL_TYPE)
@@ -353,7 +353,7 @@ static int ssi_buffer_mgr_generate_mlli(
 static inline void ssi_buffer_mgr_add_buffer_entry(
        struct buffer_array *sgl_data,
        dma_addr_t buffer_dma, unsigned int buffer_len,
-       bool is_last_entry, uint32_t *mlli_nents)
+       bool is_last_entry, u32 *mlli_nents)
 {
        unsigned int index = sgl_data->num_of_buffers;
 
@@ -379,7 +379,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
        unsigned int data_len,
        unsigned int data_offset,
        bool is_last_table,
-       uint32_t *mlli_nents)
+       u32 *mlli_nents)
 {
        unsigned int index = sgl_data->num_of_buffers;
 
@@ -398,10 +398,10 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
 }
 
 static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
+ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
                         enum dma_data_direction direction)
 {
-       uint32_t i , j;
+       u32 i , j;
        struct scatterlist *l_sg = sg;
        for (i = 0; i < nents; i++) {
                if (l_sg == NULL) {
@@ -430,8 +430,8 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t n
 static int ssi_buffer_mgr_map_scatterlist(
        struct device *dev, struct scatterlist *sg,
        unsigned int nbytes, int direction,
-       uint32_t *nents, uint32_t max_sg_nents,
-       uint32_t *lbytes, uint32_t *mapped_nents)
+       u32 *nents, u32 max_sg_nents,
+       u32 *lbytes, u32 *mapped_nents)
 {
        bool is_chained = false;
 
@@ -491,7 +491,7 @@ static int ssi_buffer_mgr_map_scatterlist(
 static inline int
 ssi_aead_handle_config_buf(struct device *dev,
        struct aead_req_ctx *areq_ctx,
-       uint8_t* config_data,
+       u8* config_data,
        struct buffer_array *sg_data,
        unsigned int assoclen)
 {
@@ -526,8 +526,8 @@ ssi_aead_handle_config_buf(struct device *dev,
 
 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
                                           struct ahash_req_ctx *areq_ctx,
-                                          uint8_t* curr_buff,
-                                          uint32_t curr_buff_cnt,
+                                          u8* curr_buff,
+                                          u32 curr_buff_cnt,
                                           struct buffer_array *sg_data)
 {
        SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI \n", curr_buff_cnt);
@@ -612,9 +612,9 @@ int ssi_buffer_mgr_map_blkcipher_request(
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
        struct device *dev = &drvdata->plat_dev->dev;
        struct buffer_array sg_data;
-       uint32_t dummy = 0;
+       u32 dummy = 0;
        int rc = 0;
-       uint32_t mapped_nents = 0;
+       u32 mapped_nents = 0;
 
        req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
        mlli_params->curr_pool = NULL;
@@ -622,7 +622,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
 
        /* Map IV buffer */
        if (likely(ivsize != 0) ) {
-               dump_byte_array("iv", (uint8_t *)info, ivsize);
+               dump_byte_array("iv", (u8 *)info, ivsize);
                req_ctx->gen_ctx.iv_dma_addr =
                        dma_map_single(dev, (void *)info,
                                       ivsize,
@@ -710,9 +710,9 @@ void ssi_buffer_mgr_unmap_aead_request(
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       uint32_t dummy;
+       u32 dummy;
        bool chained;
-       uint32_t size_to_unmap = 0;
+       u32 size_to_unmap = 0;
 
        if (areq_ctx->mac_buf_dma_addr != 0) {
                SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
@@ -796,7 +796,7 @@ void ssi_buffer_mgr_unmap_aead_request(
        if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
            likely(req->src == req->dst))
        {
-               uint32_t size_to_skip = req->assoclen;
+               u32 size_to_skip = req->assoclen;
                if (areq_ctx->is_gcm4543) {
                        size_to_skip += crypto_aead_ivsize(tfm);
                }
@@ -814,7 +814,7 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
        struct scatterlist *sgl,
        unsigned int sgl_nents,
        unsigned int authsize,
-       uint32_t last_entry_data_size,
+       u32 last_entry_data_size,
        bool *is_icv_fragmented)
 {
        unsigned int icv_max_size = 0;
@@ -914,11 +914,11 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        int rc = 0;
-       uint32_t mapped_nents = 0;
+       u32 mapped_nents = 0;
        struct scatterlist *current_sg = req->src;
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        unsigned int sg_index = 0;
-       uint32_t size_of_assoc = req->assoclen;
+       u32 size_of_assoc = req->assoclen;
 
        if (areq_ctx->is_gcm4543) {
                size_of_assoc += crypto_aead_ivsize(tfm);
@@ -1004,7 +1004,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 
 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
        struct aead_request *req,
-       uint32_t *src_last_bytes, uint32_t *dst_last_bytes)
+       u32 *src_last_bytes, u32 *dst_last_bytes)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
@@ -1042,7 +1042,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
        struct ssi_drvdata *drvdata,
        struct aead_request *req,
        struct buffer_array *sg_data,
-       uint32_t *src_last_bytes, uint32_t *dst_last_bytes,
+       u32 *src_last_bytes, u32 *dst_last_bytes,
        bool is_last_table)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@@ -1075,7 +1075,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
                                /* In ACP platform we already copying ICV
                                   for any INPLACE-DECRYPT operation, hence
                                   we must neglect this code. */
-                               uint32_t size_to_skip = req->assoclen;
+                               u32 size_to_skip = req->assoclen;
                                if (areq_ctx->is_gcm4543) {
                                        size_to_skip += crypto_aead_ivsize(tfm);
                                }
@@ -1122,7 +1122,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
                        /* Backup happens only when ICV is fragmented, ICV
                           verification is made by CPU compare in order to simplify
                           MAC verification upon request completion */
-                         uint32_t size_to_skip = req->assoclen;
+                         u32 size_to_skip = req->assoclen;
                          if (areq_ctx->is_gcm4543) {
                                  size_to_skip += crypto_aead_ivsize(tfm);
                          }
@@ -1190,14 +1190,14 @@ static inline int ssi_buffer_mgr_aead_chain_data(
        unsigned int authsize = areq_ctx->req_authsize;
        int src_last_bytes = 0, dst_last_bytes = 0;
        int rc = 0;
-       uint32_t src_mapped_nents = 0, dst_mapped_nents = 0;
-       uint32_t offset = 0;
+       u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+       u32 offset = 0;
        unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       uint32_t sg_index = 0;
+       u32 sg_index = 0;
        bool chained = false;
        bool is_gcm4543 = areq_ctx->is_gcm4543;
-       uint32_t size_to_skip = req->assoclen;
+       u32 size_to_skip = req->assoclen;
        if (is_gcm4543) {
                size_to_skip += crypto_aead_ivsize(tfm);
        }
@@ -1302,7 +1302,7 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
                                           struct aead_request *req)
 {
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-       uint32_t curr_mlli_size = 0;
+       u32 curr_mlli_size = 0;
 
        if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
                areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
@@ -1362,9 +1362,9 @@ int ssi_buffer_mgr_map_aead_request(
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        bool is_gcm4543 = areq_ctx->is_gcm4543;
 
-       uint32_t mapped_nents = 0;
-       uint32_t dummy = 0; /*used for the assoc data fragments */
-       uint32_t size_to_map = 0;
+       u32 mapped_nents = 0;
+       u32 dummy = 0; /*used for the assoc data fragments */
+       u32 size_to_map = 0;
 
        mlli_params->curr_pool = NULL;
        sg_data.num_of_buffers = 0;
@@ -1373,7 +1373,7 @@ int ssi_buffer_mgr_map_aead_request(
        if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
            likely(req->src == req->dst))
        {
-               uint32_t size_to_skip = req->assoclen;
+               u32 size_to_skip = req->assoclen;
                if (is_gcm4543) {
                        size_to_skip += crypto_aead_ivsize(tfm);
                }
@@ -1568,15 +1568,15 @@ int ssi_buffer_mgr_map_hash_request_final(
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
        struct device *dev = &drvdata->plat_dev->dev;
-       uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+       u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
                        areq_ctx->buff0;
-       uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+       u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
                        &areq_ctx->buff0_cnt;
        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
        struct buffer_array sg_data;
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-       uint32_t dummy = 0;
-       uint32_t mapped_nents = 0;
+       u32 dummy = 0;
+       u32 mapped_nents = 0;
 
        SSI_LOG_DEBUG(" final params : curr_buff=%pK "
                     "curr_buff_cnt=0x%X nbytes = 0x%X "
@@ -1660,22 +1660,22 @@ int ssi_buffer_mgr_map_hash_request_update(
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
        struct device *dev = &drvdata->plat_dev->dev;
-       uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+       u8* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
                        areq_ctx->buff0;
-       uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+       u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
                        &areq_ctx->buff0_cnt;
-       uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
+       u8* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
                        areq_ctx->buff1;
-       uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
+       u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
                        &areq_ctx->buff1_cnt;
        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
        unsigned int update_data_len;
-       uint32_t total_in_len = nbytes + *curr_buff_cnt;
+       u32 total_in_len = nbytes + *curr_buff_cnt;
        struct buffer_array sg_data;
        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
        unsigned int swap_index = 0;
-       uint32_t dummy = 0;
-       uint32_t mapped_nents = 0;
+       u32 dummy = 0;
+       u32 mapped_nents = 0;
 
        SSI_LOG_DEBUG(" update params : curr_buff=%pK "
                     "curr_buff_cnt=0x%X nbytes=0x%X "
@@ -1789,7 +1789,7 @@ void ssi_buffer_mgr_unmap_hash_request(
        struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
 {
        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-       uint32_t *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
+       u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
                                                &areq_ctx->buff1_cnt;
 
        /*In case a pool was set, a table was
index 8c3273edc61871720c60252d53fbd1ff4bf1fdc1..4acbb4b6afc9c54dafdcb641cc12a1e449957f35 100644 (file)
@@ -46,9 +46,9 @@ struct ssi_mlli {
 
 struct mlli_params {
        struct dma_pool *curr_pool;
-       uint8_t *mlli_virt_addr;
+       u8 *mlli_virt_addr;
        dma_addr_t mlli_dma_addr;
-       uint32_t mlli_len;
+       u32 mlli_len;
 };
 
 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
@@ -81,13 +81,13 @@ int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ct
 
 void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
 
-void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, uint32_t to_skip, uint32_t end, enum ssi_sg_cpy_direct direct);
+void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
 
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len);
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
 
 
 #ifdef CC_DMA_48BIT_SIM
-dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len);
+dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, u32 data_len);
 dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr);
 
 #define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = \
index 7e85d2c0dfabdc9c9c1ba5308cfbed3e180ba2d0..c62fe4f9859544441d241e8422848a0e18b2b3e5 100644 (file)
@@ -45,7 +45,7 @@ struct ssi_blkcipher_handle {
 };
 
 struct cc_user_key_info {
-       uint8_t *key;
+       u8 *key;
        dma_addr_t key_dma_addr;
 };
 struct cc_hw_key_info {
@@ -69,7 +69,7 @@ struct ssi_ablkcipher_ctx {
 static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
 
 
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size) {
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
        switch (ctx_p->flow_mode){
        case S_DIN_to_AES:
                switch (size){
@@ -329,7 +329,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 
        SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
                ctx_p, crypto_tfm_alg_name(tfm), keylen);
-       dump_byte_array("key", (uint8_t *)key, keylen);
+       dump_byte_array("key", (u8 *)key, keylen);
 
        CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
@@ -724,7 +724,7 @@ ssi_blkcipher_create_data_desc(
                                     "addr 0x%08X addr 0x%08X\n",
                                (unsigned int)ctx_p->drvdata->mlli_sram_addr,
                                (unsigned int)ctx_p->drvdata->mlli_sram_addr +
-                               (uint32_t)LLI_ENTRY_BYTE_SIZE *
+                               (u32)LLI_ENTRY_BYTE_SIZE *
                                                        req_ctx->in_nents);
                        HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
                                (ctx_p->drvdata->mlli_sram_addr +
@@ -750,7 +750,7 @@ static int ssi_blkcipher_complete(struct device *dev,
                                   void __iomem *cc_base)
 {
        int completion_error = 0;
-       uint32_t inflight_counter;
+       u32 inflight_counter;
        DECL_CYCLE_COUNT_RESOURCES;
 
        START_CYCLE_COUNT();
index ec2d4f4964d0105c9d90baaac346ac9fbd33437a..7d58b56fc2c7445598addd5a48c1a2ee979683be 100644 (file)
 struct blkcipher_req_ctx {
        struct async_gen_req_ctx gen_ctx;
        enum ssi_req_dma_buf_type dma_buf_type;
-       uint32_t in_nents;
-       uint32_t in_mlli_nents;
-       uint32_t out_nents;
-       uint32_t out_mlli_nents;
-       uint8_t *backup_info; /*store iv for generated IV flow*/
+       u32 in_nents;
+       u32 in_mlli_nents;
+       u32 out_nents;
+       u32 out_mlli_nents;
+       u8 *backup_info; /*store iv for generated IV flow*/
        bool is_giv;
        struct mlli_params mlli_params;
 };
index 75b9f41d9c50e922e144f4a0a455db9c73d465c5..52c6984314049a8f571c372c6fb7b94ba2c1c810 100644 (file)
 
 
 #ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size)
+void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
 {
        int i , line_offset = 0, ret = 0;
-       const uint8_t *cur_byte;
+       const u8 *cur_byte;
        char line_buf[80];
 
        if (the_array == NULL) {
@@ -116,8 +116,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 {
        struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
        void __iomem *cc_base = drvdata->cc_base;
-       uint32_t irr;
-       uint32_t imr;
+       u32 irr;
+       u32 imr;
        DECL_CYCLE_COUNT_RESOURCES;
 
        /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
@@ -154,7 +154,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 #endif
        /* AXI error interrupt */
        if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
-               uint32_t axi_err;
+               u32 axi_err;
 
                /* Read the AXI error ID */
                axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
@@ -224,7 +224,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
        void __iomem *cc_base = NULL;
        bool irq_registered = false;
        struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL);
-       uint32_t signature_val;
+       u32 signature_val;
        int rc = 0;
 
        if (unlikely(new_drvdata == NULL)) {
@@ -304,7 +304,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
        signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
        if (signature_val != DX_DEV_SIGNATURE) {
                SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
-                       signature_val, (uint32_t)DX_DEV_SIGNATURE);
+                       signature_val, (u32)DX_DEV_SIGNATURE);
                rc = -EINVAL;
                goto init_cc_res_err;
        }
@@ -479,7 +479,7 @@ static int cc7x_probe(struct platform_device *plat_dev)
 {
        int rc;
 #if defined(CONFIG_ARM) && defined(CC_DEBUG)
-       uint32_t ctr, cacheline_size;
+       u32 ctr, cacheline_size;
 
        asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
        cacheline_size =  4 << ((ctr >> 16) & 0xf);
index d3e9eb2dcbf6a77fbae88471da0e9172856ac746..45fc23fe169fd68a0dcfb1c71403ae5efd008295 100644 (file)
 #include <crypto/hash.h>
 #include <linux/version.h>
 
-#ifndef INT32_MAX /* Missing in Linux kernel */
-#define INT32_MAX 0x7FFFFFFFL
-#endif
-
 /* Registers definitions from shared/hw/ree_include */
 #include "dx_reg_base_host.h"
 #include "dx_host.h"
@@ -137,11 +133,11 @@ struct ssi_drvdata {
        struct resource *res_irq;
        void __iomem *cc_base;
        unsigned int irq;
-       uint32_t irq_mask;
-       uint32_t fw_ver;
+       u32 irq_mask;
+       u32 fw_ver;
        /* Calibration time of start/stop
        *  monitor descriptors */
-       uint32_t monitor_null_cycles;
+       u32 monitor_null_cycles;
        struct platform_device *plat_dev;
        ssi_sram_addr_t mlli_sram_addr;
        struct completion icache_setup_completion;
@@ -157,7 +153,7 @@ struct ssi_drvdata {
 #ifdef ENABLE_CYCLE_COUNT
        cycles_t isr_exit_cycles; /* Save for isr-to-tasklet latency */
 #endif
-       uint32_t inflight_counter;
+       u32 inflight_counter;
 
 };
 
@@ -196,7 +192,7 @@ struct async_gen_req_ctx {
 };
 
 #ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size);
+void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
 #else
 #define dump_byte_array(name, array, size) do {        \
 } while (0);
index 0c50655f7e349ac06627796a764aa9ec0b074f5f..607c64b8c458697e9016b70324c06d318e81ecd1 100644 (file)
 #ifndef __SSI_FIPS_H__
 #define __SSI_FIPS_H__
 
-
-#ifndef INT32_MAX /* Missing in Linux kernel */
-#define INT32_MAX 0x7FFFFFFFL
-#endif
-
-
 /*!
 @file
 @brief This file contains FIPS related defintions and APIs.
@@ -32,7 +26,7 @@ typedef enum ssi_fips_state {
         CC_FIPS_STATE_NOT_SUPPORTED = 0,
         CC_FIPS_STATE_SUPPORTED,
         CC_FIPS_STATE_ERROR,
-        CC_FIPS_STATE_RESERVE32B = INT32_MAX
+        CC_FIPS_STATE_RESERVE32B = S32_MAX
 } ssi_fips_state_t;
 
 
@@ -58,7 +52,7 @@ typedef enum ssi_fips_error {
        CC_REE_FIPS_ERROR_HMAC_SHA256_PUT,
        CC_REE_FIPS_ERROR_HMAC_SHA512_PUT,
        CC_REE_FIPS_ERROR_ROM_CHECKSUM,
-       CC_REE_FIPS_ERROR_RESERVE32B = INT32_MAX
+       CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX
 } ssi_fips_error_t;
 
 
index 3c7c9dd7fd614fe0ae3c513b1cacdac8886b6b39..eb468e1baf537dada3ee8ab21fbfca0c931134f2 100644 (file)
@@ -28,17 +28,17 @@ that executes the KAT.
 #include "ssi_request_mgr.h"
 
 
-static const uint32_t digest_len_init[] = {
+static const u32 digest_len_init[] = {
        0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const uint32_t sha1_init[] = {
+static const u32 sha1_init[] = {
        SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha256_init[] = {
+static const u32 sha256_init[] = {
        SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
        SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
 #if (CC_SUPPORT_SHA > 256)
-static const uint32_t digest_len_sha512_init[] = {
+static const u32 digest_len_sha512_init[] = {
        0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const uint64_t sha512_init[] = {
+static const u64 sha512_init[] = {
        SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
        SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 #endif
@@ -47,128 +47,128 @@ static const uint64_t sha512_init[] = {
 #define NIST_CIPHER_AES_MAX_VECTOR_SIZE      32
 
 struct fips_cipher_ctx {
-       uint8_t iv[CC_AES_IV_SIZE];
-       uint8_t key[AES_512_BIT_KEY_SIZE];
-       uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-       uint8_t dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8 iv[CC_AES_IV_SIZE];
+       u8 key[AES_512_BIT_KEY_SIZE];
+       u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8 dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
 };
 
 typedef struct _FipsCipherData {
-       uint8_t                   isAes;
-       uint8_t                   key[AES_512_BIT_KEY_SIZE];
+       u8                   isAes;
+       u8                   key[AES_512_BIT_KEY_SIZE];
        size_t                    keySize;
-       uint8_t                   iv[CC_AES_IV_SIZE];
+       u8                   iv[CC_AES_IV_SIZE];
        enum drv_crypto_direction direction;
        enum drv_cipher_mode      oprMode;
-       uint8_t                   dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-       uint8_t                   dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8                   dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8                   dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
        size_t                    dataInSize;
 } FipsCipherData;
 
 
 struct fips_cmac_ctx {
-       uint8_t key[AES_256_BIT_KEY_SIZE];
-       uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-       uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+       u8 key[AES_256_BIT_KEY_SIZE];
+       u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsCmacData {
        enum drv_crypto_direction direction;
-       uint8_t                   key[AES_256_BIT_KEY_SIZE];
+       u8                   key[AES_256_BIT_KEY_SIZE];
        size_t                    key_size;
-       uint8_t                   data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+       u8                   data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
        size_t                    data_in_size;
-       uint8_t                   mac_res[CC_DIGEST_SIZE_MAX];
+       u8                   mac_res[CC_DIGEST_SIZE_MAX];
        size_t                    mac_res_size;
 } FipsCmacData;
 
 
 struct fips_hash_ctx {
-       uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
-       uint8_t din[NIST_SHA_MSG_SIZE];
-       uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+       u8 initial_digest[CC_DIGEST_SIZE_MAX];
+       u8 din[NIST_SHA_MSG_SIZE];
+       u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsHashData {
        enum drv_hash_mode    hash_mode;
-       uint8_t               data_in[NIST_SHA_MSG_SIZE];
+       u8               data_in[NIST_SHA_MSG_SIZE];
        size_t                data_in_size;
-       uint8_t               mac_res[CC_DIGEST_SIZE_MAX];
+       u8               mac_res[CC_DIGEST_SIZE_MAX];
 } FipsHashData;
 
 
 /* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
 struct fips_hmac_ctx {
-       uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
-       uint8_t key[CC_HMAC_BLOCK_SIZE_MAX];
-       uint8_t k0[CC_HMAC_BLOCK_SIZE_MAX];
-       uint8_t digest_bytes_len[HASH_LEN_SIZE];
-       uint8_t tmp_digest[CC_DIGEST_SIZE_MAX];
-       uint8_t din[NIST_HMAC_MSG_SIZE];
-       uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+       u8 initial_digest[CC_DIGEST_SIZE_MAX];
+       u8 key[CC_HMAC_BLOCK_SIZE_MAX];
+       u8 k0[CC_HMAC_BLOCK_SIZE_MAX];
+       u8 digest_bytes_len[HASH_LEN_SIZE];
+       u8 tmp_digest[CC_DIGEST_SIZE_MAX];
+       u8 din[NIST_HMAC_MSG_SIZE];
+       u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsHmacData {
        enum drv_hash_mode    hash_mode;
-       uint8_t               key[CC_HMAC_BLOCK_SIZE_MAX];
+       u8               key[CC_HMAC_BLOCK_SIZE_MAX];
        size_t                key_size;
-       uint8_t               data_in[NIST_HMAC_MSG_SIZE];
+       u8               data_in[NIST_HMAC_MSG_SIZE];
        size_t                data_in_size;
-       uint8_t               mac_res[CC_DIGEST_SIZE_MAX];
+       u8               mac_res[CC_DIGEST_SIZE_MAX];
 } FipsHmacData;
 
 
 #define FIPS_CCM_B0_A0_ADATA_SIZE   (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
 
 struct fips_ccm_ctx {
-       uint8_t b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
-       uint8_t iv[NIST_AESCCM_IV_SIZE];
-       uint8_t ctr_cnt_0[NIST_AESCCM_IV_SIZE];
-       uint8_t key[CC_AES_KEY_SIZE_MAX];
-       uint8_t din[NIST_AESCCM_TEXT_SIZE];
-       uint8_t dout[NIST_AESCCM_TEXT_SIZE];
-       uint8_t mac_res[NIST_AESCCM_TAG_SIZE];
+       u8 b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
+       u8 iv[NIST_AESCCM_IV_SIZE];
+       u8 ctr_cnt_0[NIST_AESCCM_IV_SIZE];
+       u8 key[CC_AES_KEY_SIZE_MAX];
+       u8 din[NIST_AESCCM_TEXT_SIZE];
+       u8 dout[NIST_AESCCM_TEXT_SIZE];
+       u8 mac_res[NIST_AESCCM_TAG_SIZE];
 };
 
 typedef struct _FipsCcmData {
        enum drv_crypto_direction direction;
-       uint8_t                   key[CC_AES_KEY_SIZE_MAX];
+       u8                   key[CC_AES_KEY_SIZE_MAX];
        size_t                    keySize;
-       uint8_t                   nonce[NIST_AESCCM_NONCE_SIZE];
-       uint8_t                   adata[NIST_AESCCM_ADATA_SIZE];
+       u8                   nonce[NIST_AESCCM_NONCE_SIZE];
+       u8                   adata[NIST_AESCCM_ADATA_SIZE];
        size_t                    adataSize;
-       uint8_t                   dataIn[NIST_AESCCM_TEXT_SIZE];
+       u8                   dataIn[NIST_AESCCM_TEXT_SIZE];
        size_t                    dataInSize;
-       uint8_t                   dataOut[NIST_AESCCM_TEXT_SIZE];
-       uint8_t                   tagSize;
-       uint8_t                   macResOut[NIST_AESCCM_TAG_SIZE];
+       u8                   dataOut[NIST_AESCCM_TEXT_SIZE];
+       u8                   tagSize;
+       u8                   macResOut[NIST_AESCCM_TAG_SIZE];
 } FipsCcmData;
 
 
 struct fips_gcm_ctx {
-       uint8_t adata[NIST_AESGCM_ADATA_SIZE];
-       uint8_t key[CC_AES_KEY_SIZE_MAX];
-       uint8_t hkey[CC_AES_KEY_SIZE_MAX];
-       uint8_t din[NIST_AESGCM_TEXT_SIZE];
-       uint8_t dout[NIST_AESGCM_TEXT_SIZE];
-       uint8_t mac_res[NIST_AESGCM_TAG_SIZE];
-       uint8_t len_block[AES_BLOCK_SIZE];
-       uint8_t iv_inc1[AES_BLOCK_SIZE];
-       uint8_t iv_inc2[AES_BLOCK_SIZE];
+       u8 adata[NIST_AESGCM_ADATA_SIZE];
+       u8 key[CC_AES_KEY_SIZE_MAX];
+       u8 hkey[CC_AES_KEY_SIZE_MAX];
+       u8 din[NIST_AESGCM_TEXT_SIZE];
+       u8 dout[NIST_AESGCM_TEXT_SIZE];
+       u8 mac_res[NIST_AESGCM_TAG_SIZE];
+       u8 len_block[AES_BLOCK_SIZE];
+       u8 iv_inc1[AES_BLOCK_SIZE];
+       u8 iv_inc2[AES_BLOCK_SIZE];
 };
 
 typedef struct _FipsGcmData {
        enum drv_crypto_direction direction;
-       uint8_t                   key[CC_AES_KEY_SIZE_MAX];
+       u8                   key[CC_AES_KEY_SIZE_MAX];
        size_t                    keySize;
-       uint8_t                   iv[NIST_AESGCM_IV_SIZE];
-       uint8_t                   adata[NIST_AESGCM_ADATA_SIZE];
+       u8                   iv[NIST_AESGCM_IV_SIZE];
+       u8                   adata[NIST_AESGCM_ADATA_SIZE];
        size_t                    adataSize;
-       uint8_t                   dataIn[NIST_AESGCM_TEXT_SIZE];
+       u8                   dataIn[NIST_AESGCM_TEXT_SIZE];
        size_t                    dataInSize;
-       uint8_t                   dataOut[NIST_AESGCM_TEXT_SIZE];
-       uint8_t                   tagSize;
-       uint8_t                   macResOut[NIST_AESGCM_TAG_SIZE];
+       u8                   dataOut[NIST_AESGCM_TEXT_SIZE];
+       u8                   tagSize;
+       u8                   macResOut[NIST_AESGCM_TAG_SIZE];
 } FipsGcmData;
 
 
index 8f5df925e295aa16b737de4e70cd4154d813edfc..316507d88b4eb84e4d52c54362f5eab6f7f28079 100644 (file)
@@ -68,7 +68,7 @@ extern size_t ssi_fips_max_mem_alloc_size(void);
 /* The function called once at driver entry point to check whether TEE FIPS error occured.*/
 static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
 {
-       uint32_t regVal;
+       u32 regVal;
        void __iomem *cc_base = drvdata->cc_base;
 
        regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
@@ -145,8 +145,8 @@ static void fips_dsr(unsigned long devarg)
 {
        struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
        void __iomem *cc_base = drvdata->cc_base;
-       uint32_t irq;
-       uint32_t teeFipsError = 0;
+       u32 irq;
+       u32 teeFipsError = 0;
 
        irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
 
index e189b425d7f9d7eb5404bb9fbca7f0445163b174..038dd3b24903e0cc3968c749dfd632e4359e7c4f 100644 (file)
@@ -29,7 +29,7 @@ typedef enum CC_FipsSyncStatus{
        CC_FIPS_SYNC_MODULE_ERROR       = 0x1,
        CC_FIPS_SYNC_REE_STATUS         = 0x4,
        CC_FIPS_SYNC_TEE_STATUS         = 0x8,
-       CC_FIPS_SYNC_STATUS_RESERVE32B  = INT32_MAX
+       CC_FIPS_SYNC_STATUS_RESERVE32B  = S32_MAX
 }CCFipsSyncStatus_t;
 
 
index 69c1df2aa2e72fd46ed89f7ebe35fba84e5ed3d3..2d3c6304befd059975689fc9b905d6da7e71a0f5 100644 (file)
@@ -42,25 +42,25 @@ struct ssi_hash_handle {
        struct completion init_comp;
 };
 
-static const uint32_t digest_len_init[] = {
+static const u32 digest_len_init[] = {
        0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const uint32_t md5_init[] = {
+static const u32 md5_init[] = {
        SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha1_init[] = {
+static const u32 sha1_init[] = {
        SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha224_init[] = {
+static const u32 sha224_init[] = {
        SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
        SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
-static const uint32_t sha256_init[] = {
+static const u32 sha256_init[] = {
        SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
        SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
 #if (DX_DEV_SHA_MAX > 256)
-static const uint32_t digest_len_sha512_init[] = {
+static const u32 digest_len_sha512_init[] = {
        0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const uint64_t sha384_init[] = {
+static const u64 sha384_init[] = {
        SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
        SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static const uint64_t sha512_init[] = {
+static const u64 sha512_init[] = {
        SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
        SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 #endif
@@ -89,7 +89,7 @@ struct ssi_hash_alg {
 
 
 struct hash_key_req_ctx {
-       uint32_t keylen;
+       u32 keylen;
        dma_addr_t key_dma_addr;
 };
 
@@ -98,8 +98,8 @@ struct ssi_hash_ctx {
        struct ssi_drvdata *drvdata;
        /* holds the origin digest; the digest after "setkey" if HMAC,*
           the initial digest if HASH. */
-       uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
-       uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
+       u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
+       u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
        dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
        dma_addr_t digest_buff_dma_addr;
        /* use for hmac with key large then mode block size */
@@ -120,7 +120,7 @@ static void ssi_hash_create_data_desc(
        bool is_not_last_data,
        unsigned int *seq_size);
 
-static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
+static inline void ssi_set_hash_endianity(u32 mode, HwDesc_s *desc)
 {
        if (unlikely((mode == DRV_HASH_MD5) ||
                (mode == DRV_HASH_SHA384) ||
@@ -414,7 +414,7 @@ static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __i
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        SSI_LOG_DEBUG("req=%pK\n", req);
 
@@ -430,7 +430,7 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        SSI_LOG_DEBUG("req=%pK\n", req);
 
@@ -611,7 +611,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
        struct device *dev = &ctx->drvdata->plat_dev->dev;
        struct ssi_crypto_req ssi_req = {};
        HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
-       uint32_t idx = 0;
+       u32 idx = 0;
        int rc;
 
        SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
@@ -1473,7 +1473,7 @@ static int ssi_mac_update(struct ahash_request *req)
        struct ssi_crypto_req ssi_req = {};
        HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
        int rc;
-       uint32_t idx = 0;
+       u32 idx = 0;
 
        CHECK_AND_RETURN_UPON_FIPS_ERROR();
        if (req->nbytes == 0) {
@@ -1536,10 +1536,10 @@ static int ssi_mac_final(struct ahash_request *req)
        HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
        int idx = 0;
        int rc = 0;
-       uint32_t keySize, keyLen;
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 keySize, keyLen;
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
-       uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
+       u32 rem_cnt = state->buff_index ? state->buff1_cnt :
                        state->buff0_cnt;
 
 
@@ -1650,8 +1650,8 @@ static int ssi_mac_finup(struct ahash_request *req)
        HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
        int idx = 0;
        int rc = 0;
-       uint32_t key_len = 0;
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 key_len = 0;
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
        CHECK_AND_RETURN_UPON_FIPS_ERROR();
@@ -1719,10 +1719,10 @@ static int ssi_mac_digest(struct ahash_request *req)
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
        struct device *dev = &ctx->drvdata->plat_dev->dev;
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
        struct ssi_crypto_req ssi_req = {};
        HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
-       uint32_t keyLen;
+       u32 keyLen;
        int idx = 0;
        int rc;
 
@@ -1798,7 +1798,7 @@ static int ssi_shash_digest(struct shash_desc *desc,
        struct ahash_req_ctx *state = shash_desc_ctx(desc);
        struct crypto_shash *tfm = desc->tfm;
        struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-       uint32_t digestsize = crypto_shash_digestsize(tfm);
+       u32 digestsize = crypto_shash_digestsize(tfm);
        struct scatterlist src;
 
        if (len == 0) {
@@ -1817,7 +1817,7 @@ static int ssi_shash_update(struct shash_desc *desc,
        struct ahash_req_ctx *state = shash_desc_ctx(desc);
        struct crypto_shash *tfm = desc->tfm;
        struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-       uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base);
+       u32 blocksize = crypto_tfm_alg_blocksize(&tfm->base);
        struct scatterlist src;
 
        sg_init_one(&src, (const void *)data, len);
@@ -1831,7 +1831,7 @@ static int ssi_shash_finup(struct shash_desc *desc,
        struct ahash_req_ctx *state = shash_desc_ctx(desc);
        struct crypto_shash *tfm = desc->tfm;
        struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-       uint32_t digestsize = crypto_shash_digestsize(tfm);
+       u32 digestsize = crypto_shash_digestsize(tfm);
        struct scatterlist src;
 
        sg_init_one(&src, (const void *)data, len);
@@ -1844,7 +1844,7 @@ static int ssi_shash_final(struct shash_desc *desc, u8 *out)
        struct ahash_req_ctx *state = shash_desc_ctx(desc);
        struct crypto_shash *tfm = desc->tfm;
        struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-       uint32_t digestsize = crypto_shash_digestsize(tfm);
+       u32 digestsize = crypto_shash_digestsize(tfm);
 
        return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
 }
@@ -1890,7 +1890,7 @@ static int ssi_ahash_digest(struct ahash_request *req)
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
@@ -1910,7 +1910,7 @@ static int ssi_ahash_finup(struct ahash_request *req)
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
@@ -1920,7 +1920,7 @@ static int ssi_ahash_final(struct ahash_request *req)
        struct ahash_req_ctx *state = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-       uint32_t digestsize = crypto_ahash_digestsize(tfm);
+       u32 digestsize = crypto_ahash_digestsize(tfm);
 
        return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
@@ -2284,7 +2284,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
        struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
        ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
        unsigned int larval_seq_len = 0;
-       HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)];
+       HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(u32)];
        int rc = 0;
 #if (DX_DEV_SHA_MAX > 256)
        int i;
@@ -2351,15 +2351,15 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 #if (DX_DEV_SHA_MAX > 256)
        /* We are forced to swap each double-word larval before copying to sram */
        for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
-               const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1];
-               const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0];
+               const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
+               const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
 
                ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
                        larval_seq, &larval_seq_len);
-               sram_buff_ofs += sizeof(uint32_t);
+               sram_buff_ofs += sizeof(u32);
                ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
                        larval_seq, &larval_seq_len);
-               sram_buff_ofs += sizeof(uint32_t);
+               sram_buff_ofs += sizeof(u32);
        }
        rc = send_request_init(drvdata, larval_seq, larval_seq_len);
        if (unlikely(rc != 0)) {
@@ -2369,15 +2369,15 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
        larval_seq_len = 0;
 
        for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
-               const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1];
-               const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0];
+               const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
+               const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
 
                ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
                        larval_seq, &larval_seq_len);
-               sram_buff_ofs += sizeof(uint32_t);
+               sram_buff_ofs += sizeof(u32);
                ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
                        larval_seq, &larval_seq_len);
-               sram_buff_ofs += sizeof(uint32_t);
+               sram_buff_ofs += sizeof(u32);
        }
        rc = send_request_init(drvdata, larval_seq, larval_seq_len);
        if (unlikely(rc != 0)) {
@@ -2394,7 +2394,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 {
        struct ssi_hash_handle *hash_handle;
        ssi_sram_addr_t sram_buff;
-       uint32_t sram_size_to_alloc;
+       u32 sram_size_to_alloc;
        int rc = 0;
        int alg;
 
@@ -2686,9 +2686,9 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
  * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
  *
- * \return uint32_t The address of the inital digest in SRAM
+ * \return u32 The address of the inital digest in SRAM
  */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
 {
        struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
        struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
@@ -2734,7 +2734,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mo
 }
 
 ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode)
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
 {
        struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
        struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
index b5bf67721fbad46c8261abbb66c33d30b8c5981b..b821d0c854b571c742ce8858ee0019e3099d72b6 100644 (file)
@@ -48,26 +48,26 @@ struct aeshash_state {
 
 /* ahash state */
 struct ahash_req_ctx {
-       uint8_t* buff0;
-       uint8_t* buff1;
-       uint8_t* digest_result_buff;
+       u8* buff0;
+       u8* buff1;
+       u8* digest_result_buff;
        struct async_gen_req_ctx gen_ctx;
        enum ssi_req_dma_buf_type data_dma_buf_type;
-       uint8_t *digest_buff;
-       uint8_t *opad_digest_buff;
-       uint8_t *digest_bytes_len;
+       u8 *digest_buff;
+       u8 *opad_digest_buff;
+       u8 *digest_bytes_len;
        dma_addr_t opad_digest_dma_addr;
        dma_addr_t digest_buff_dma_addr;
        dma_addr_t digest_bytes_len_dma_addr;
        dma_addr_t digest_result_dma_addr;
-       uint32_t buff0_cnt;
-       uint32_t buff1_cnt;
-       uint32_t buff_index;
-       uint32_t xcbc_count; /* count xcbc update operatations */
+       u32 buff0_cnt;
+       u32 buff1_cnt;
+       u32 buff_index;
+       u32 xcbc_count; /* count xcbc update operatations */
        struct scatterlist buff_sg[2];
        struct scatterlist *curr_sg;
-       uint32_t in_nents;
-       uint32_t mlli_nents;
+       u32 in_nents;
+       u32 mlli_nents;
        struct mlli_params mlli_params;
 };
 
@@ -81,10 +81,10 @@ int ssi_hash_free(struct ssi_drvdata *drvdata);
  * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
  *
- * \return uint32_t returns the address of the initial digest length in SRAM
+ * \return u32 returns the address of the initial digest length in SRAM
  */
 ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode);
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode);
 
 /*!
  * Gets the address of the initial digest in SRAM
@@ -93,9 +93,9 @@ ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode);
  * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
  *
- * \return uint32_t The address of the inital digest in SRAM
+ * \return u32 The address of the inital digest in SRAM
  */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode);
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode);
 
 #endif /*__SSI_HASH_H__*/
 
index a760fafd1a43ff07889e109344f2cf7f59bb417e..3ea040f59c024c0f54a0307ddcd24a20dfdce3d2 100644 (file)
@@ -43,8 +43,8 @@ struct ssi_ivgen_ctx {
        ssi_sram_addr_t pool;
        ssi_sram_addr_t ctr_key;
        ssi_sram_addr_t ctr_iv;
-       uint32_t next_iv_ofs;
-       uint8_t *pool_meta;
+       u32 next_iv_ofs;
+       u8 *pool_meta;
        dma_addr_t pool_meta_dma;
 };
 
index 0631323a64b8f0e71e453a8b0eac96692a267cf5..260aee33f235eb7445a0fe0f3735a3fdcd477aa6 100644 (file)
@@ -87,7 +87,7 @@ do { \
  */
 #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
 do { \
-       uint32_t elapsed_cycles; \
+       u32 elapsed_cycles; \
        if ((is_monitored) == true) { \
                elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \
                clear_bit(MONITOR_CNTR_BIT, (lock_p)); \
@@ -111,13 +111,13 @@ struct ssi_request_mgr_handle {
        unsigned int min_free_hw_slots;
        unsigned int max_used_sw_slots;
        struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
-       uint32_t req_queue_head;
-       uint32_t req_queue_tail;
-       uint32_t axi_completed;
-       uint32_t q_free_slots;
+       u32 req_queue_head;
+       u32 req_queue_tail;
+       u32 axi_completed;
+       u32 q_free_slots;
        spinlock_t hw_lock;
        HwDesc_s compl_desc;
-       uint8_t *dummy_comp_buff;
+       u8 *dummy_comp_buff;
        dma_addr_t dummy_comp_buff_dma;
        HwDesc_s monitor_desc;
        volatile unsigned long monitor_lock;
@@ -147,7 +147,7 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
        if (req_mgr_h->dummy_comp_buff_dma != 0) {
                SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma);
                dma_free_coherent(&drvdata->plat_dev->dev,
-                                 sizeof(uint32_t), req_mgr_h->dummy_comp_buff,
+                                 sizeof(u32), req_mgr_h->dummy_comp_buff,
                                  req_mgr_h->dummy_comp_buff_dma);
        }
 
@@ -213,22 +213,22 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
 
        /* Allocate DMA word for "dummy" completion descriptor use */
        req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
-               sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+               sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
        if (!req_mgr_h->dummy_comp_buff) {
                SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
-                          "buffer\n", sizeof(uint32_t));
+                          "buffer\n", sizeof(u32));
                rc = -ENOMEM;
                goto req_mgr_init_err;
        }
        SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma,
-                                                            sizeof(uint32_t));
+                                                            sizeof(u32));
 
        /* Init. "dummy" completion descriptor */
        HW_DESC_INIT(&req_mgr_h->compl_desc);
-       HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t));
+       HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(u32));
        HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc,
                req_mgr_h->dummy_comp_buff_dma,
-               sizeof(uint32_t), NS_BIT, 1);
+               sizeof(u32), NS_BIT, 1);
        HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS);
        HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc);
 
@@ -581,7 +581,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
 #ifdef COMPLETION_DELAY
                /* Delay */
                {
-                       uint32_t axi_err;
+                       u32 axi_err;
                        int i;
                        SSI_LOG_INFO("Delay\n");
                        for (i=0;i<1000000;i++) {
@@ -615,7 +615,7 @@ static void comp_handler(unsigned long devarg)
        struct ssi_request_mgr_handle * request_mgr_handle =
                                                drvdata->request_mgr_handle;
 
-       uint32_t irq;
+       u32 irq;
 
        DECL_CYCLE_COUNT_RESOURCES;
 
index 44662fdd9c97e6da5aa2741b7133ce18f34ebfcd..7dd5a72c2da331faba57f5e064c3cdfa0717795a 100644 (file)
@@ -85,7 +85,7 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
  * \param drvdata
  * \param size The requested bytes to allocate
  */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
 {
        struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
        ssi_sram_addr_t p;
@@ -119,17 +119,17 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
  * @seq_len:     A pointer to the given IN/OUT sequence length
  */
 void ssi_sram_mgr_const2sram_desc(
-       const uint32_t *src, ssi_sram_addr_t dst,
+       const u32 *src, ssi_sram_addr_t dst,
        unsigned int nelement,
        HwDesc_s *seq, unsigned int *seq_len)
 {
-       uint32_t i;
+       u32 i;
        unsigned int idx = *seq_len;
 
        for (i = 0; i < nelement; i++, idx++) {
                HW_DESC_INIT(&seq[idx]);
-               HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(uint32_t));
-               HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(uint32_t)), sizeof(uint32_t));
+               HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(u32));
+               HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
                HW_DESC_SET_FLOW_MODE(&seq[idx], BYPASS);
        }
 
index a71e17dc8bbe790336319e4757c99b5c327722ea..df634db11e24d14bd5917264b2ae5b8ec9012fa7 100644 (file)
@@ -28,7 +28,7 @@ struct ssi_drvdata;
  * Address (offset) within CC internal SRAM
  */
 
-typedef uint64_t ssi_sram_addr_t;
+typedef u64 ssi_sram_addr_t;
 
 #define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
 
@@ -59,7 +59,7 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
  * \param drvdata
  * \param size The requested bytes to allocate
  */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
 
 /**
  * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
@@ -73,7 +73,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
  * @seq_len:     A pointer to the given IN/OUT sequence length
  */
 void ssi_sram_mgr_const2sram_desc(
-       const uint32_t *src, ssi_sram_addr_t dst,
+       const u32 *src, ssi_sram_addr_t dst,
        unsigned int nelement,
        HwDesc_s *seq, unsigned int *seq_len);
 
index 1ff1e78ccc08e1c7735a506e14875f54dd8a24bf..89021c00987220a3af021e5d04de1bce0315887d 100644 (file)
@@ -95,7 +95,7 @@ struct sys_dir {
        struct kobject *sys_dir_kobj;
        struct attribute_group sys_dir_attr_group;
        struct attribute **sys_dir_attr_list;
-       uint32_t num_of_attrs;
+       u32 num_of_attrs;
        struct ssi_drvdata *drvdata; /* Associated driver context */
 };
 
@@ -137,12 +137,12 @@ static void update_db(struct stat_item *item, unsigned int result)
 static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
 {
        unsigned int i, j;
-       uint64_t avg;
+       u64 avg;
 
        for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
                for (j=0; j<MAX_STAT_PHASES; j++) {
                        if (item[i][j].count > 0) {
-                               avg = (uint64_t)item[i][j].sum;
+                               avg = (u64)item[i][j].sum;
                                do_div(avg, item[i][j].count);
                                SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
                                        stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
@@ -176,8 +176,8 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
 {
        int i, j ;
        char line[512];
-       uint32_t min_cyc, max_cyc;
-       uint64_t avg;
+       u32 min_cyc, max_cyc;
+       u64 avg;
        ssize_t buf_len, tmp_len=0;
 
        buf_len = scnprintf(buf,PAGE_SIZE,
@@ -187,7 +187,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
        for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
                for (j=0; j<MAX_STAT_PHASES-1; j++) {
                        if (stat_host_db[i][j].count > 0) {
-                               avg = (uint64_t)stat_host_db[i][j].sum;
+                               avg = (u64)stat_host_db[i][j].sum;
                                do_div(avg, stat_host_db[i][j].count);
                                min_cyc = stat_host_db[i][j].min;
                                max_cyc = stat_host_db[i][j].max;
@@ -216,8 +216,8 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
 {
        int i;
        char line[256];
-       uint32_t min_cyc, max_cyc;
-       uint64_t avg;
+       u32 min_cyc, max_cyc;
+       u64 avg;
        ssize_t buf_len,tmp_len=0;
 
        buf_len = scnprintf(buf,PAGE_SIZE,
@@ -226,7 +226,7 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
                return buf_len;
        for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
                if (stat_cc_db[i][STAT_PHASE_6].count > 0) {
-                       avg = (uint64_t)stat_cc_db[i][STAT_PHASE_6].sum;
+                       avg = (u64)stat_cc_db[i][STAT_PHASE_6].sum;
                        do_div(avg, stat_cc_db[i][STAT_PHASE_6].count);
                        min_cyc = stat_cc_db[i][STAT_PHASE_6].min;
                        max_cyc = stat_cc_db[i][STAT_PHASE_6].max;
@@ -284,7 +284,7 @@ static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
                struct kobj_attribute *attr, char *buf)
 {
        struct ssi_drvdata *drvdata = sys_get_drvdata();
-       uint32_t register_value;
+       u32 register_value;
        void __iomem* cc_base = drvdata->cc_base;
        int offset = 0;
 
@@ -333,7 +333,7 @@ struct sys_dir {
        struct kobject *sys_dir_kobj;
        struct attribute_group sys_dir_attr_group;
        struct attribute **sys_dir_attr_list;
-       uint32_t num_of_attrs;
+       u32 num_of_attrs;
        struct ssi_drvdata *drvdata; /* Associated driver context */
 };
 
@@ -361,7 +361,7 @@ static struct ssi_drvdata *sys_get_drvdata(void)
 
 static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
                 struct kobject *parent_dir_kobj, const char *dir_name,
-                struct kobj_attribute *attrs, uint32_t num_of_attrs)
+                struct kobj_attribute *attrs, u32 num_of_attrs)
 {
        int i;